Merge "ASoC: msm: qdsp6v2: destroy routing lock at exit" into msm-4.9
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_qmp.txt b/Documentation/devicetree/bindings/arm/msm/msm_qmp.txt
new file mode 100644
index 0000000..0a5c0b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_qmp.txt
@@ -0,0 +1,55 @@
+Qualcomm Technologies, Inc. QTI Mailbox Protocol
+
+QMP Driver
+===================
+
+Required properties:
+- compatible : should be "qcom,qmp-mbox".
+- label : the name of the remote proc this link connects to.
+- reg : The location and size of shared memory.
+	The irq register base address for triggering interrupts.
+- reg-names : "msgram" - string to identify the shared memory region.
+	"irq-reg-base" - string to identify the irq register region.
+- qcom,irq-mask : the bitmask to trigger an interrupt.
+- interrupt : the receiving interrupt line.
+- mbox-desc-offset : offset of mailbox descriptor from start of the msgram.
+- #mbox-cells: Common mailbox binding property to identify the number of cells
+		required for the mailbox specifier, should be 1.
+
+Optional properties:
+- mbox-offset : offset of the mcore mailbox from the offset of msgram. If this
+			property is not used, qmp will use the configuration
+			provided by the ucore.
+- mbox-size : size of the mcore mailbox. If this property is not used, qmp will
+			use the configuration provided by the ucore.
+
+Example:
+	qmp_aop: qcom,qmp-aop {
+		compatible = "qcom,qmp-mbox";
+		label = "aop";
+		reg = <0xc300000 0x100000>,
+			<0x1799000C 0x4>;
+		reg-names = "msgram", "irq-reg-base";
+		qcom,irq-mask = <0x1>;
+		interrupt = <0 389 1>;
+		mbox-desc-offset = <0x100>;
+		mbox-offset = <0x500>;
+		mbox-size = <0x400>;
+		#mbox-cells = <1>;
+	};
+
+Mailbox Client
+==============
+"mboxes" and the optional "mbox-names" (please see
+Documentation/devicetree/bindings/mailbox/mailbox.txt for details). Each value
+of the mboxes property should contain a phandle to the mailbox controller
+device node and second argument is the channel index. It must be 0 (qmp
+supports only one channel).The equivalent "mbox-names" property value can be
+used to give a name to the communication channel to be used by the client user.
+
+Example:
+	qmp-client {
+		compatible = "qcom,qmp-client";
+		mbox-names = "aop";
+		mboxes = <&qmp_aop 0>,
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
new file mode 100644
index 0000000..964fea6
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
@@ -0,0 +1,476 @@
+Qualcomm Technologies, Inc. OSM Bindings
+
+Operating State Manager (OSM) is a hardware engine used by some Qualcomm
+Technologies, Inc. (QTI) SoCs to manage frequency and voltage scaling
+in hardware. OSM is capable of controlling frequency and voltage requests
+for multiple clusters via the existence of multiple OSM domains.
+
+Properties:
+- compatible
+	Usage:      required
+	Value type: <string>
+	Definition: must be "qcom,clk-cpu-osm".
+
+- reg
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Addresses and sizes for the memory of the OSM controller,
+		    cluster PLL management, and APCS common register regions.
+		    Optionally, the address of the efuse registers used to
+		    determine the pwrcl or perfcl speed-bins and/or the ACD
+		    register space to initialize prior to enabling OSM.
+
+- reg-names
+	Usage:      required
+	Value type: <stringlist>
+	Definition: Address names. Must be "osm_l3_base", "osm_pwrcl_base",
+		    "osm_perfcl_base", "l3_pll", "pwrcl_pll", "perfcl_pll",
+		    "l3_sequencer", "pwrcl_sequencer", "perfcl_sequencer" or
+		    "apps_itm_ctl". Optionally, "l3_efuse", "pwrcl_efuse"
+		    "perfcl_efuse".
+		    Must be specified in the same order as the corresponding
+		    addresses are specified in the reg property.
+
+- vdd-l3-supply
+	Usage:      required
+	Value type: <phandle>
+	Definition: phandle of the underlying regulator device that manages
+		    the voltage supply of the L3 cluster.
+
+- vdd-pwrcl-supply
+	Usage:      required
+	Value type: <phandle>
+	Definition: phandle of the underlying regulator device that manages
+		    the voltage supply of the Power cluster.
+
+- vdd-perfcl-supply
+	Usage:      required
+	Value type: <phandle>
+	Definition: phandle of the underlying regulator device that manages
+		    the voltage supply of the Performance cluster.
+
+- interrupts
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: OSM interrupt specifier.
+
+- interrupt-names
+	Usage:      required
+	Value type: <stringlist>
+	Definition: Interrupt names. this list must match up 1-to-1 with the
+		    interrupts specified in the 'interrupts' property.
+		    "pwrcl-irq" and "perfcl-irq" must be specified.
+
+- qcom,l3-speedbinX-v0
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the frequency in Hertz, frequency,
+		    PLL override data, ACC level, and virtual corner used
+		    by the OSM hardware for each supported DCVS setpoint
+		    of the L3 cluster.
+
+- qcom,pwrcl-speedbinX-v0
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the frequency in Hertz, frequency,
+		    PLL override data, ACC level, and virtual corner used
+		    by the OSM hardware for each supported DCVS setpoint
+		    of the Power cluster.
+
+- qcom,perfcl-speedbinX-v0
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the frequency in Hertz, frequency,
+		    PLL override data, ACC level and virtual corner used
+		    by the OSM hardware for each supported DCVS setpoint
+		    of the Performance cluster.
+
+- qcom,l3-min-cpr-vc-binX
+	Usage:	    required
+	Value type: <u32>
+	Definition: First virtual corner which does not use PLL post-divider
+		    for the L3 clock domain.
+
+- qcom,pwrcl-min-cpr-vc-binX
+	Usage:      required
+	Value type: <u32>
+	Definition: First virtual corner which does not use PLL post-divider
+		    for the power cluster.
+
+- qcom,perfcl-min-cpr-vc-binX
+	Usage:      required
+	Value type: <u32>
+	Definition: First virtual corner which does not use PLL post-divider
+		    for the performance cluster.
+
+- qcom,osm-no-tz
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates that there is no programming
+		    of the OSM hardware performed by the secure world.
+
+- qcom,osm-pll-setup
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates that the PLL setup sequence
+		    must be executed for each clock domain managed by the OSM
+		    controller.
+
+- qcom,up-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the DCVS up timer value in nanoseconds
+		    for each of the three clock domains managed by the OSM
+		    controller.
+
+- qcom,down-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the DCVS down timer value in nanoseconds
+		    for each of the three clock domains managed by the OSM
+		    controller.
+
+- qcom,pc-override-index
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the OSM performance index to be used
+		    when each cluster enters certain low power modes.
+
+- qcom,set-ret-inactive
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if domains in retention must
+		    be treated as inactive.
+
+- qcom,enable-llm-freq-vote
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if Limits hardware frequency
+		    votes must be honored by OSM.
+
+- qcom,llm-freq-up-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the LLM frequency up timer value in
+		    nanoseconds for each of the three clock domains managed by
+		    the OSM controller.
+
+- qcom,llm-freq-down-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the LLM frequency down timer value in
+		    nanoseconds for each of the three clock domains managed by
+		    the OSM controller.
+
+- qcom,enable-llm-volt-vote
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if Limits hardware voltage
+		    votes must be honored by OSM.
+
+- qcom,llm-volt-up-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the LLM voltage up timer value in
+		    nanoseconds for each of the three clock domains managed by
+		    the OSM controller.
+
+- qcom,llm-volt-down-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the LLM voltage down timer value in
+		    nanoseconds for each of the three clock domains managed by
+		    the OSM controller.
+
+- qcom,cc-reads
+	Usage:      optional
+	Value type: <integer>
+	Definition: Defines the number of times the cycle counters must be
+		    read to determine the performance level of each clock
+		    domain.
+
+- qcom,l-val-base
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the register addresses of the L_VAL
+		    control register for each of the three clock domains
+		    managed by the OSM controller.
+
+- qcom,apcs-pll-user-ctl
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the register addresses of the PLL
+		    user control register for each of the three clock domains
+		    managed by the OSM controller.
+
+- qcom,perfcl-apcs-apm-threshold-voltage
+	Usage:      required
+	Value type: <u32>
+	Definition: Specifies the APM threshold voltage in microvolts.  If the
+		    VDD_APCC supply voltage is above or at this level, then the
+		    APM is switched to use VDD_APCC.  If VDD_APCC is below
+		    this level, then the APM is switched to use VDD_MX.
+
+- qcom,apm-mode-ctl
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the register addresses of the APM
+		    control register for each of the two clusters managed
+		    by the OSM controller.
+
+- qcom,apm-status-ctrl
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the register addresses of the APM
+		    controller status register for each of the three clock
+		    domains managed by the OSM controller.
+
+- qcom,perfcl-isense-addr
+	Usage:      required
+	Value type: <u32>
+	Definition: Contains the ISENSE register address.
+
+- qcom,l3-mem-acc-addr
+	Usage:      required if qcom,osm-no-tz is specified
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the mem-acc
+		    configuration registers for the L3 cluster.
+		    The array must contain exactly three elements.
+
+- qcom,pwrcl-mem-acc-addr
+	Usage:      required if qcom,osm-no-tz is specified
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the mem-acc
+		    configuration registers for the Power cluster.
+		    The array must contain exactly three elements.
+
+- qcom,perfcl-mem-acc-addr
+	Usage:      required if qcom,osm-no-tz is specified
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the mem-acc
+		    configuration registers for the Performance cluster.
+		    The array must contain exactly three elements.
+
+		    corresponding CPRh device.
+
+- qcom,perfcl-apcs-mem-acc-threshold-voltage
+	Usage:      optional
+	Value type: <u32>
+	Definition: Specifies the highest MEM ACC threshold voltage in
+		    microvolts for the Performance cluster.  This voltage is
+		    used to determine which MEM ACC setting is used for the
+		    highest frequencies.  If specified, the voltage must match
+		    the MEM ACC threshold voltage specified for the
+		    corresponding CPRh device.
+
+- qcom,apcs-cbc-addr
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the APCS_CBC_ADDR
+		    registers for all three clock domains.
+
+- qcom,apcs-ramp-ctl-addr
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the APCS_RAMP_CTL_ADDR
+		    registers for all three clock domains.
+
+- qcom,red-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the reduction FSM
+		    should be enabled.
+
+- qcom,boost-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the boost FSM should
+		    be enabled.
+
+- qcom,safe-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the safe FSM should
+		    be enabled.
+
+- qcom,ps-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the PS FSM should be
+		    enabled.
+
+- qcom,droop-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the droop FSM should
+		    be enabled.
+
+- qcom,set-c3-active
+	Usage:	    optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the cores in C3 are to
+		    be treated as active for core count calculations.
+
+- qcom,set-c2-active
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the cores in C2 are to
+		    be treated as active for core count calculations.
+
+- qcom,disable-cc-dvcs
+	Usage:	    optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if core count based DCVS is
+		    to be disabled.
+
+- qcom,apcs-pll-min-freq
+	Usage:	    required
+	Value type: <u32>
+	Definition: Contains the addresses of the RAILx_CLKDOMy_PLL_MIN_FREQ
+		    registers for the three clock domains.
+
+- clock-names
+	Usage:      required
+	Value type: <string>
+	Definition: Must be "aux_clk".
+
+- clocks
+	Usage:      required
+	Value type: <phandle>
+	Definition: Phandle to the aux clock device.
+
+Example:
+	clock_cpucc: qcom,cpucc@0x17d41000 {
+		compatible = "qcom,clk-cpu-osm";
+		reg = <0x17d41000 0x1400>,
+			<0x17d43000 0x1400>,
+			<0x17d45800 0x1400>,
+			<0x178d0000 0x1000>,
+			<0x178c0000 0x1000>,
+			<0x178b0000 0x1000>,
+			<0x17d42400 0x0c00>,
+			<0x17d44400 0x0c00>,
+			<0x17d46c00 0x0c00>,
+			<0x17810090 0x8>;
+		reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base",
+			"l3_pll", "pwrcl_pll", "perfcl_pll",
+			"l3_sequencer", "pwrcl_sequencer",
+			"perfcl_sequencer", "apps_itm_ctl";
+
+		vdd-l3-supply = <&apc0_l3_vreg>;
+		vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
+		vdd-perfcl-supply = <&apc1_perfcl_vreg>;
+
+		qcom,l3-speedbin0-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x501c0422 0x00002020 0x1 5 >,
+			<   729600000 0x501c0526 0x00002020 0x1 6 >,
+			<   806400000 0x501c062a 0x00002222 0x1 7 >,
+			<   883200000 0x4024072b 0x00002525 0x1 8 >,
+			<   960000000 0x40240832 0x00002828 0x2 9 >;
+
+		qcom,pwrcl-speedbin0-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x501c0422 0x00002020 0x1 5 >,
+			<   748800000 0x501c0527 0x00002020 0x1 6 >,
+			<   825600000 0x401c062b 0x00002222 0x1 7 >,
+			<   902400000 0x4024072f 0x00002626 0x1 8 >,
+			<   979200000 0x40240833 0x00002929 0x1 9 >,
+			<  1056000000 0x402c0937 0x00002c2c 0x1 10 >,
+			<  1132800000 0x402c0a3b 0x00002f2f 0x1 11 >,
+			<  1209600000 0x402c0b3f 0x00003333 0x1 12 >,
+			<  1286400000 0x40340c43 0x00003636 0x1 13 >,
+			<  1363200000 0x40340d47 0x00003939 0x1 14 >,
+			<  1440000000 0x403c0e4b 0x00003c3c 0x1 15 >,
+			<  1516800000 0x403c0f4f 0x00004040 0x2 16 >,
+			<  1593600000 0x403c1053 0x00004343 0x2 17 >;
+
+		qcom,perfcl-speedbin0-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x501c0422 0x00002020 0x1 5 >,
+			<   729600000 0x501c0526 0x00002020 0x1 6 >,
+			<   806400000 0x501c062a 0x00002222 0x1 7 >,
+			<   883200000 0x4024072b 0x00002525 0x1 8 >,
+			<   960000000 0x40240832 0x00002828 0x1 9 >,
+			<  1036800000 0x40240936 0x00002b2b 0x1 10 >,
+			<  1113600000 0x402c0a3a 0x00002e2e 0x1 11 >,
+			<  1190400000 0x402c0b3e 0x00003232 0x1 12 >,
+			<  1267200000 0x40340c42 0x00003535 0x1 13 >,
+			<  1344000000 0x40340d46 0x00003838 0x1 14 >,
+			<  1420800000 0x40340e4a 0x00003b3b 0x1 15 >,
+			<  1497600000 0x403c0f4e 0x00003e3e 0x1 16 >,
+			<  1574400000 0x403c1052 0x00004242 0x2 17 >,
+			<  1651200000 0x403c1156 0x00004545 0x2 18 >,
+			<  1728000000 0x4044125a 0x00004848 0x2 19 >,
+			<  1804800000 0x4044135e 0x00004b4b 0x2 20 >,
+			<  1881600000 0x404c1462 0x00004e4e 0x2 21 >,
+			<  1958400000 0x404c1566 0x00005252 0x3 22 >;
+
+		qcom,l3-min-cpr-vc-bin0 = <7>;
+		qcom,pwrcl-min-cpr-vc-bin0 = <6>;
+		qcom,perfcl-min-cpr-vc-bin0 = <7>;
+
+		qcom,up-timer =
+			<1000 1000 1000>;
+		qcom,down-timer =
+			<100000 100000 100000>;
+		qcom,pc-override-index =
+			<0 0 0>;
+		qcom,set-ret-inactive;
+		qcom,enable-llm-freq-vote;
+		qcom,llm-freq-up-timer =
+			<1000 1000 1000>;
+		qcom,llm-freq-down-timer =
+			<327675 327675 327675>;
+		qcom,enable-llm-volt-vote;
+		qcom,llm-volt-up-timer =
+			<1000 1000 1000>;
+		qcom,llm-volt-down-timer =
+			<327675 327675 327675>;
+		qcom,cc-reads = <10>;
+		qcom,cc-delay = <5>;
+		qcom,cc-factor = <100>;
+		qcom,osm-clk-rate = <100000000>;
+		qcom,xo-clk-rate = <19200000>;
+
+		qcom,l-val-base =
+			<0x178d0004 0x178c0004 0x178b0004>;
+		qcom,apcs-pll-user-ctl =
+			<0x178d000c 0x178c000c 0x178b000c>;
+		qcom,apcs-pll-min-freq =
+			<0x17d41094 0x17d43094 0x17d45894>;
+		qcom,apm-mode-ctl =
+			<0x0 0x0 0x17d20010>;
+		qcom,apm-status-ctrl =
+			<0x0 0x0 0x17d20000>;
+		qcom,perfcl-isense-addr = <0x17871480>;
+		qcom,l3-mem-acc-addr = <0x17990170 0x17990170 0x17990170>;
+		qcom,pwrcl-mem-acc-addr = <0x17990160 0x17990164 0x17990164>;
+		qcom,perfcl-mem-acc-addr = <0x17990168 0x1799016c 0x1799016c>;
+		qcom,cfg-gfmux-addr =<0x178d0084 0x178c0084 0x178b0084>;
+		qcom,apcs-cbc-addr = <0x178d008c 0x178c008c 0x178b008c>;
+		qcom,apcs-ramp-ctl-addr = <0x17840904 0x17840904 0x17830904>;
+
+		qcom,perfcl-apcs-apm-threshold-voltage = <800000>;
+		qcom,perfcl-apcs-mem-acc-threshold-voltage = <852000>;
+		qcom,boost-fsm-en;
+		qcom,safe-fsm-en;
+		qcom,ps-fsm-en;
+		qcom,droop-fsm-en;
+		qcom,osm-no-tz;
+		qcom,osm-pll-setup;
+
+		clock-names = "xo_ao";
+		clocks = <&clock_rpmh RPMH_CXO_CLK_A>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/clock/qcom,gpucc.txt b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
new file mode 100644
index 0000000..f214c58
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
@@ -0,0 +1,33 @@
+Qualcomm Technologies, Inc. Graphics Clock & Reset Controller Binding
+--------------------------------------------------------------------
+
+Required properties :
+- compatible : shall contain only one of the following:
+		"qcom,gpucc-sdm845",
+		"qcom,gfxcc-sdm845"
+
+- reg : shall contain base register offset and size.
+- #clock-cells : shall contain 1.
+- #reset-cells : shall contain 1.
+- #vdd_<rail>-supply : The logic rail supply.
+
+Optional properties :
+- #power-domain-cells : shall contain 1.
+
+Example:
+	clock_gfx: qcom,gfxcc@5090000 {
+		compatible = "qcom,gfxcc-sdm845";
+		reg = <0x5090000 0x9000>;
+		vdd_gfx-supply = <&pm8005_s1_level>;
+		vdd_mx-supply = <&pm8998_s6_level>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	clock_gpucc: qcom,gpucc@5090000 {
+		compatible = "qcom,gpucc-sdm845";
+		reg = <0x5090000 0x9000>;
+		vdd_cx-supply = <&pm8998_s9_level>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/cpufreq/msm-cpufreq.txt b/Documentation/devicetree/bindings/cpufreq/msm-cpufreq.txt
new file mode 100644
index 0000000..9427123
--- /dev/null
+++ b/Documentation/devicetree/bindings/cpufreq/msm-cpufreq.txt
@@ -0,0 +1,47 @@
+Qualcomm MSM CPUfreq device
+
+msm-cpufreq is a device that represents the list of usable CPU frequencies
+and provides a device handle for the CPUfreq driver to get the CPU and cache
+clocks.
+
+Required properties:
+- compatible:		Must be "qcom,msm-cpufreq"
+- qcom,cpufreq-table, or qcom,cpufreq-table-<X>:
+			A list of usable CPU frequencies (KHz).
+			Use "qcom,cpufreq-table" if all CPUs in the system
+			should share same list of frequencies.
+			Use "qcom,cpufreq-table-<cpuid>" to describe
+			different CPU freq tables for different CPUs.
+			The table should be listed only for the first CPU
+			if multiple CPUs are synchronous.
+
+Optional properties:
+- clock-names:		When DT based binding of clock is available, this
+			provides a list of CPU subsystem clocks.
+			"cpuX_clk" for every CPU that's present.
+			"l2_clk" when an async cache/CCI is present.
+
+Optional properties:
+- qcom,governor-per-policy:	This property denotes that governor tunables
+				should be associated with each cpufreq policy
+				group instead of being global.
+
+Example:
+	qcom,msm-cpufreq {
+		compatible = "qcom,msm-cpufreq";
+		qcom,cpufreq-table =
+			<  300000 >,
+			<  422400 >,
+			<  652800 >,
+			<  729600 >,
+			<  883200 >,
+			<  960000 >,
+			< 1036800 >,
+			< 1190400 >,
+			< 1267200 >,
+			< 1497600 >,
+			< 1574400 >,
+			< 1728000 >,
+			< 1958400 >,
+			< 2265600 >;
+	};
diff --git a/Documentation/devicetree/bindings/devfreq/arm-memlat-mon.txt b/Documentation/devicetree/bindings/devfreq/arm-memlat-mon.txt
index 01b2424..67dc991 100644
--- a/Documentation/devicetree/bindings/devfreq/arm-memlat-mon.txt
+++ b/Documentation/devicetree/bindings/devfreq/arm-memlat-mon.txt
@@ -10,11 +10,20 @@
 - qcom,core-dev-table:		A mapping table of core frequency to a required bandwidth vote at the
 				given core frequency.
 
+Optional properties:
+- qcom,cachemiss-ev:		The cache miss event that this monitor is supposed to measure.
+				Defaults to 0x17 if not specified.
+- qcom,inst-ev:			The instruction count event that this monitor is supposed to measure.
+				Defaults to 0x08 if not specified.
+
+
 Example:
 	qcom,arm-memlat-mon {
 		compatible = "qcom,arm-memlat-mon";
 		qcom,cpulist = <&CPU0 &CPU1>;
 		qcom,target-dev = <&memlat0>;
+		qcom,cachemiss-ev = <0x2A>;
+		qcom,inst-ev = <0x08>;
 		qcom,core-dev-table =
 			<  300000 1525>,
 			<  499200 3143>,
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index b6544961..f4b6013 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -152,6 +152,11 @@
 				baseAddr - base address of the gpu channels in the qdss stm memory region
 				size     - size of the gpu stm region
 
+- qcom,gpu-qtimer:
+				<baseAddr size>
+				baseAddr - base address of the qtimer memory region
+				size     - size of the qtimer region
+
 - qcom,tsens-name:
 				Specify the name of GPU temperature sensor. This name will be used
 				to get the temperature from the thermal driver API.
diff --git a/Documentation/devicetree/bindings/mailbox/qcom-tcs.txt b/Documentation/devicetree/bindings/mailbox/qcom-tcs.txt
index 4ef34bf..318ef30 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom-tcs.txt
+++ b/Documentation/devicetree/bindings/mailbox/qcom-tcs.txt
@@ -81,6 +81,10 @@
 		ACTIVE_TCS
 		CONTROL_TCS
 	- Cell #2 (Number of TCS): <u32>
+- label:
+	Usage: optional
+	Value type: <string>
+	Definition: Name for the mailbox. The name would be used in trace logs.
 
 EXAMPLE 1:
 
@@ -92,6 +96,7 @@
 
 	apps_rsc: mailbox@179e000 {
 		compatible = "qcom,tcs_drv";
+		label = "apps_rsc";
 		reg = <0x179E0000 0x10000>, <0x179E0D00 0x3000>;
 		interrupts = <0 5 0>;
 		#mbox-cells = <1>;
@@ -110,17 +115,18 @@
 Second tuple: 0xAF20000 + 0x1C00
 
 	disp_rsc: mailbox@af20000 {
-			status = "disabled";
-			compatible = "qcom,tcs-drv";
-			reg = <0xAF20000 0x10000>, <0xAF21C00 0x3000>;
-			interrupts = <0 129 0>;
-			#mbox-cells = <1>;
-			qcom,drv-id = <0>;
-			qcom,tcs-config = <SLEEP_TCS 1>,
-					<WAKE_TCS    1>,
-					<ACTIVE_TCS  0>,
-					<CONTROL_TCS 1>;
-		};
+		status = "disabled";
+		label = "disp_rsc";
+		compatible = "qcom,tcs-drv";
+		reg = <0xAF20000 0x10000>, <0xAF21C00 0x3000>;
+		interrupts = <0 129 0>;
+		#mbox-cells = <1>;
+		qcom,drv-id = <0>;
+		qcom,tcs-config = <SLEEP_TCS 1>,
+				<WAKE_TCS    1>,
+				<ACTIVE_TCS  0>,
+				<CONTROL_TCS 1>;
+	};
 
 
 CLIENT:
diff --git a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
index b6bc475..058dab1 100644
--- a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
@@ -89,6 +89,10 @@
 				entries by name.
 - cache-slices:			The tuple has phandle to llcc device as the first argument and the
 				second argument is the usecase id of the client.
+- qcom,sde-ubwc-malsize:	A u32 property to specify the default UBWC
+				minimum allowable length configuration value.
+- qcom,sde-ubwc-swizzle:	A u32 property to specify the default UBWC
+				swizzle configuration value.
 
 Subnode properties:
 - compatible:		Compatible name used in smmu v2.
@@ -126,6 +130,8 @@
 		clock-names = "iface_clk", "rot_core_clk";
 
 		qcom,mdss-highest-bank-bit = <0x2>;
+		qcom,sde-ubwc-malsize = <0>;
+		qcom,sde-ubwc-swizzle = <1>;
 
 		/* Bus Scale Settings */
 		qcom,msm-bus,name = "mdss_rotator";
@@ -144,7 +150,7 @@
 
 		qcom,mdss-sbuf-headroom = <20>;
 		cache-slice-names = "rotator";
-		cache-slices = <&llcc 3>;
+		cache-slices = <&llcc 4>;
 
 		smmu_rot_unsec: qcom,smmu_rot_unsec_cb {
 			compatible = "qcom,smmu_sde_rot_unsec";
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
index 521c783..c01036d 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
@@ -43,6 +43,17 @@
 		    the first cell will be used to define gpio number and the
 		    second denotes the flags for this gpio
 
+- qcom,gpios-disallowed:
+	Usage: optional
+	Value type: <prop-encoded-array>
+	Definition: Array of the GPIO hardware numbers corresponding to GPIOs
+		    which the APSS processor is not allowed to configure.
+		    The hardware numbers are indexed from 1.
+		    The interrupt resources for these GPIOs must not be defined
+		    in "interrupts" and "interrupt-names" properties.
+		    GPIOs defined in this array won't be registered as pins
+		    in the pinctrl device or gpios in the gpio chip.
+
 Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
 a general description of GPIO and interrupt bindings.
 
@@ -233,6 +244,7 @@
 
 		gpio-controller;
 		#gpio-cells = <2>;
+		qcom,gpios-disallowed = <1 20>;
 
 		pm8921_gpio_keys: gpio-keys {
 			volume-keys {
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
new file mode 100644
index 0000000..9638888
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
@@ -0,0 +1,422 @@
+Qualcomm Techonologies, Inc. QPNP PMIC Fuel Gauge Gen3 Device
+
+QPNP PMIC FG Gen3 device provides interface to the clients to read properties
+related to the battery. Its main function is to retrieve the State of Charge
+(SOC), in percentage scale representing the amount of charge left in the
+battery.
+
+=======================
+Required Node Structure
+=======================
+
+FG Gen3 device must be described in two levels of device nodes.  The first
+level describes the FG Gen3 device.  The second level describes one or more
+peripherals managed by FG Gen3 driver. All the peripheral specific parameters
+such as base address, interrupts etc., should be under second level node.
+
+====================================
+First Level Node - FG Gen3 device
+====================================
+
+- compatible
+	Usage:      required
+	Value type: <string>
+	Definition: Should be "qcom,fg-gen3".
+
+- qcom,pmic-revid
+	Usage:      required
+	Value type: <phandle>
+	Definition: Should specify the phandle of PMIC revid module. This is
+		    used to identify the PMIC subtype.
+
+- io-channels
+- io-channel-names
+	Usage:      required
+	Value type: <phandle>
+	Definition: For details about IIO bindings see:
+		    Documentation/devicetree/bindings/iio/iio-bindings.txt
+
+- qcom,rradc-base
+	Usage:      required
+	Value type: <u32>
+	Definition: Should specify the base address of RR_ADC peripheral. This
+		    is used for reading certain peripheral registers under it.
+
+- qcom,fg-cutoff-voltage
+	Usage:      optional
+	Value type: <u32>
+	Definition: The voltage (in mV) where the fuel gauge will steer the SOC
+		    to be zero. For example, if the cutoff voltage is set to
+		    3400mv, the fuel gauge will try to count SoC so that the
+		    battery SOC will be 0 when it is 3400mV. If this property
+		    is not specified, then the default value used will be
+		    3200mV.
+
+- qcom,fg-empty-voltage
+	Usage:      optional
+	Value type: <u32>
+	Definition: The voltage threshold (in mV) based on which the empty soc
+		    interrupt will be triggered. When the empty soc interrupt
+		    fires, battery soc will be set to 0 and the userspace will
+		    be notified via the power supply framework. The userspace
+		    will read 0% soc and immediately shutdown. If this property
+		    is not specified, then the default value used will be
+		    2800mV.
+
+- qcom,fg-vbatt-low-thr
+	Usage:      optional
+	Value type: <u32>
+	Definition: The voltage threshold (in mV) which upon set will be used
+		    for configuring the low battery voltage threshold.
+
+- qcom,fg-recharge-voltage
+	Usage:      optional
+	Value type: <u32>
+	Definition: The voltage threshold (in mV) based on which the charging
+		    will be resumed once the charging is complete. If this
+		    property is not specified, then the default value will be
+		    4250mV.
+
+- qcom,fg-chg-term-current
+	Usage:      optional
+	Value type: <u32>
+	Definition: Battery current (in mA) at which the fuel gauge will issue
+		    an end of charge if the charger is configured to use the
+		    fuel gauge ADC for end of charge detection. If this
+		    property is not specified, then the default value used
+		    will be 100mA.
+
+- qcom,fg-sys-term-current
+	Usage:      optional
+	Value type: <u32>
+	Definition: Battery current (in mA) at which the fuel gauge will try to
+		    scale towards 100%. When the charge current goes above this
+		    the SOC should be at 100%. If this property is not
+		    specified, then the default value used will be -125mA.
+		    This value has to be specified in negative values for
+		    the charging current.
+
+- qcom,fg-delta-soc-thr
+	Usage:      optional
+	Value type: <u32>
+	Definition: Percentage of SOC increase upon which the delta monotonic &
+		    battery SOC interrupts will be triggered. If this property
+		    is not specified, then the default value will be 1.
+		    Possible values are in the range of 0 to 12.
+
+- qcom,fg-recharge-soc-thr
+	Usage:      optional
+	Value type: <u32>
+	Definition: Percentage of monotonic SOC upon which the charging will
+		    will be resumed once the charging is complete. If this
+		    property is not specified, then the default value will be
+		    95.
+
+- qcom,fg-rsense-sel
+	Usage:      optional
+	Value type: <u32>
+	Definition: Specifies the source of sense resistor.
+		    Allowed values are:
+		    0 - Rsense is from Battery FET
+		    2 - Rsense is Battery FET and SMB
+		    Option 2 can be used only when a parallel charger is
+		    present. If this property is not specified, then the
+		    default value will be 2.
+
+- qcom,fg-jeita-thresholds
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: A list of integers which holds the jeita thresholds (degC)
+		    in the following order. Allowed size is 4.
+		    Element 0 - JEITA cold threshold
+		    Element 1 - JEITA cool threshold
+		    Element 2 - JEITA warm threshold
+		    Element 3 - JEITA hot threshold
+		    If these parameters are not specified, then the default
+		    values used will be 0, 5, 45, 50.
+
+- qcom,fg-esr-timer-charging
+	Usage:      optional
+	Value type: <u32>
+	Definition: Number of cycles between ESR pulses while the battery is
+		    charging.
+
+- qcom,fg-esr-timer-awake
+	Usage:      optional
+	Value type: <u32>
+	Definition: Number of cycles between ESR pulses while the system is
+		    awake and the battery is discharging.
+
+- qcom,fg-esr-timer-asleep
+	Usage:      optional
+	Value type: <u32>
+	Definition: Number of cycles between ESR pulses while the system is
+		    asleep and the battery is discharging. This option requires
+		    qcom,fg-esr-timer-awake to be defined.
+
+- qcom,cycle-counter-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Enables the cycle counter feature.
+
+- qcom,fg-force-load-profile
+	Usage:      optional
+	Value type: <empty>
+	Definition: If set, battery profile will be force loaded if the profile
+		    loaded earlier by bootloader doesn't match with the profile
+		    available in the device tree.
+
+- qcom,cl-start-capacity
+	Usage:      optional
+	Value type: <u32>
+	Definition: Battery SOC threshold to start the capacity learning.
+		    If this is not specified, then the default value used
+		    will be 15.
+
+- qcom,cl-min-temp
+	Usage:      optional
+	Value type: <u32>
+	Definition: Lower limit of battery temperature to start the capacity
+		    learning. If this is not specified, then the default value
+		    used will be 150. Unit is in decidegC.
+
+- qcom,cl-max-temp
+	Usage:      optional
+	Value type: <u32>
+	Definition: Upper limit of battery temperature to start the capacity
+		    learning. If this is not specified, then the default value
+		    used will be 450 (45C). Unit is in decidegC.
+
+- qcom,cl-max-increment
+	Usage:      optional
+	Value type: <u32>
+	Definition: Maximum capacity increment allowed per capacity learning
+		    cycle. If this is not specified, then the default value
+		    used will be 5 (0.5%). Unit is in decipercentage.
+
+- qcom,cl-max-decrement
+	Usage:      optional
+	Value type: <u32>
+	Definition: Maximum capacity decrement allowed per capacity learning
+		    cycle. If this is not specified, then the default value
+		    used will be 100 (10%). Unit is in decipercentage.
+
+- qcom,cl-min-limit
+	Usage:      optional
+	Value type: <u32>
+	Definition: Minimum limit that the capacity cannot go below in a
+		    capacity learning cycle. If this is not specified, then
+		    the default value is 0. Unit is in decipercentage.
+
+- qcom,cl-max-limit
+	Usage:      optional
+	Value type: <u32>
+	Definition: Maximum limit that the capacity cannot go above in a
+		    capacity learning cycle. If this is not specified, then
+		    the default value is 0. Unit is in decipercentage.
+
+- qcom,battery-thermal-coefficients
+	Usage:      optional
+	Value type: <u8>
+	Definition: Byte array of battery thermal coefficients.
+		    This should be exactly 3 bytes in length.
+
+- qcom,fg-jeita-hyst-temp
+	Usage:      optional
+	Value type: <u32>
+	Definition: Hysteresis applied to Jeita temperature comparison.
+		    Possible values are:
+			0 - No hysteresis
+			1,2,3 - Value in Celsius.
+
+- qcom,fg-batt-temp-delta
+	Usage:      optional
+	Value type: <u32>
+	Definition: Battery temperature delta interrupt threshold. Possible
+		    values are: 2, 4, 6 and 10. Unit is in Kelvin.
+
+- qcom,hold-soc-while-full
+	Usage:      optional
+	Value type: <empty>
+	Definition: A boolean property that when defined holds SOC at 100% when
+		    the battery is full.
+
+- qcom,ki-coeff-soc-dischg
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array of monotonic SOC threshold values to change the ki
+		    coefficient for medium discharge current during discharge.
+		    This should be defined in the ascending order and in the
+		    range of 0-100. Array limit is set to 3.
+
+- qcom,ki-coeff-med-dischg
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array of ki coefficient values for medium discharge current
+		    during discharge. These values will be applied when the
+		    monotonic SOC goes below the SOC threshold specified under
+		    qcom,ki-coeff-soc-dischg. Array limit is set to 3. This
+		    property should be specified if qcom,ki-coeff-soc-dischg
+		    is specified to make it fully functional. Value has no
+		    unit. Allowed range is 0 to 62200 in micro units.
+
+- qcom,ki-coeff-hi-dischg
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array of ki coefficient values for high discharge current
+		    during discharge. These values will be applied when the
+		    monotonic SOC goes below the SOC threshold specified under
+		    qcom,ki-coeff-soc-dischg. Array limit is set to 3. This
+		    property should be specified if qcom,ki-coeff-soc-dischg
+		    is specified to make it fully functional. Value has no
+		    unit. Allowed range is 0 to 62200 in micro units.
+
+- qcom,fg-rconn-mohms
+	Usage:      optional
+	Value type: <u32>
+	Definition: Battery connector resistance (Rconn) in milliohms. If Rconn
+		    is specified, then ESR to Rslow scaling factors will be
+		    updated to account it for an accurate ESR.
+
+- qcom,fg-esr-clamp-mohms
+	Usage:      optional
+	Value type: <u32>
+	Definition: Equivalent series resistance (ESR) in milliohms. If this
+		    is specified, then ESR will be clamped to this value when
+		    ESR is found to be dropping below this. Default value is
+		    20.
+
+- qcom,fg-esr-filter-switch-temp
+	Usage:      optional
+	Value type: <u32>
+	Definition: Battery temperature threshold below which low temperature
+		    ESR filter coefficients will be switched to normal
+		    temperature ESR filter coefficients. If this is not
+		    specified, then the default value used will be 100. Unit is
+		    in decidegC.
+
+- qcom,fg-esr-tight-filter-micro-pct
+	Usage:      optional
+	Value type: <u32>
+	Definition: Value in micro percentage for ESR tight filter. If this is
+		    not specified, then a default value of 3907 (0.39 %) will
+		    be used. Lowest possible value is 1954 (0.19 %).
+
+- qcom,fg-esr-broad-filter-micro-pct
+	Usage:      optional
+	Value type: <u32>
+	Definition: Value in micro percentage for ESR broad filter. If this is
+		    not specified, then a default value of 99610 (9.96 %) will
+		    be used. Lowest possible value is 1954 (0.19 %).
+
+- qcom,fg-esr-tight-lt-filter-micro-pct
+	Usage:      optional
+	Value type: <u32>
+	Definition: Value in micro percentage for low temperature ESR tight
+		    filter. If this is not specified, then a default value of
+		    48829 (4.88 %) will be used. Lowest possible value is 1954
+		    (0.19 %).
+
+- qcom,fg-esr-broad-lt-filter-micro-pct
+	Usage:      optional
+	Value type: <u32>
+	Definition: Value in micro percentage for low temperature ESR broad
+		    filter. If this is not specified, then a default value of
+		    148438 (14.84 %) will be used. Lowest possible value is
+		    1954 (0.19 %).
+
+- qcom,fg-auto-recharge-soc
+	Usage:      optional
+	Value type: <empty>
+	Definition: A boolean property when defined will configure automatic
+		    recharge SOC threshold. If not specified, automatic
+		    recharge voltage threshold will be configured. This has
+		    to be configured in conjunction with the charger side
+		    configuration for proper functionality.
+
+- qcom,slope-limit-temp-threshold
+	Usage:      optional
+	Value type: <u32>
+	Definition: Battery temperature threshold to decide when slope limit
+		    coefficients should be applied along with charging status.
+		    Unit is in decidegC.
+
+- qcom,slope-limit-coeffs
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: A list of integers which holds the slope limit coefficients
+		    in the following order. Allowed size is 4. Possible values
+		    are from 0 to 31. Unit is in decipercentage.
+		    Element 0 - Low temperature discharging
+		    Element 1 - Low temperature charging
+		    Element 2 - High temperature discharging
+		    Element 3 - High temperature charging
+		    These coefficients have to be specified along with the
+		    property "qcom,slope-limit-temp-threshold" to make dynamic
+		    slope limit adjustment functional.
+
+==========================================================
+Second Level Nodes - Peripherals managed by FG Gen3 driver
+==========================================================
+- reg
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Addresses and sizes for the specified peripheral
+
+- interrupts
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Interrupt mapping as per the interrupt encoding
+
+- interrupt-names
+	Usage:      optional
+	Value type: <stringlist>
+	Definition: Interrupt names.  This list must match up 1-to-1 with the
+		    interrupts specified in the 'interrupts' property.
+
+========
+Example
+========
+
+pmi8998_fg: qpnp,fg {
+	compatible = "qcom,fg-gen3";
+	#address-cells = <1>;
+	#size-cells = <1>;
+	qcom,pmic-revid = <&pmi8998_revid>;
+	io-channels = <&pmi8998_rradc 3>;
+	io-channel-names = "rradc_batt_id";
+	qcom,rradc-base = <0x4500>;
+	qcom,ki-coeff-soc-dischg = <30 60 90>;
+	qcom,ki-coeff-med-dischg = <800 1000 1400>;
+	qcom,ki-coeff-hi-dischg = <1200 1500 2100>;
+	qcom,slope-limit-temp-threshold = <100>;
+	qcom,slope-limit-coeffs = <10 11 12 13>;
+	qcom,battery-thermal-coefficients = [9d 50 ff];
+	status = "okay";
+
+	qcom,fg-batt-soc@4000 {
+		status = "okay";
+		reg = <0x4000 0x100>;
+		interrupts = <0x2 0x40 0x0 IRQ_TYPE_EDGE_BOTH>,
+			     <0x2 0x40 0x1 IRQ_TYPE_EDGE_BOTH>,
+			     <0x2 0x40 0x2 IRQ_TYPE_EDGE_BOTH>,
+			     <0x2 0x40 0x3 IRQ_TYPE_EDGE_BOTH>;
+		interrupt-names = "soc-update",
+				  "soc-ready",
+				  "bsoc-delta",
+				  "msoc-delta";
+
+	};
+
+	qcom,fg-batt-info@4100 {
+		status = "okay";
+		reg = <0x4100 0x100>;
+		interrupts = <0x2 0x41 0x3 IRQ_TYPE_EDGE_BOTH>;
+		interrupt-names = "batt-missing";
+	};
+
+	qcom,fg-memif@4400 {
+		status = "okay";
+		reg = <0x4400 0x100>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qnovo.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qnovo.txt
new file mode 100644
index 0000000..96b7dd5
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-qnovo.txt
@@ -0,0 +1,32 @@
+QPNP Qnovo pulse engine
+
+QPNP Qnovo is a pulse charging engine which works in tandem with the QPNP SMB2
+Charger device. It configures the QPNP SMB2 charger to charge/discharge as per
+pulse characteristics.
+
+The QPNP Qnovo pulse engine has a single peripheral assigned to it.
+
+Required properties:
+- compatible:		Must be "qcom,qpnp-qnovo"
+- qcom,pmic-revid:	Should specify the phandle of PMIC
+			revid module. This is used to identify
+			the PMIC subtype.
+
+- reg:			The address for this peripheral
+- interrupts:		Specifies the interrupt associated with the peripheral.
+- interrupt-names:	Specifies the interrupt name for the peripheral. Qnovo
+			peripheral has only one interrupt "ptrain-done".
+
+Optional Properties:
+- qcom,external-rsense:		To indicate whether the platform uses external or
+				internal rsense for measuring battery current.
+
+Example:
+
+	qcom,qpnp-qnovo@1500 {
+		compatible = "qcom,qpnp-qnovo";
+		reg = <0x1500 0x100>;
+		interrupts = <0x2 0x15 0x0 IRQ_TYPE_NONE>;
+		interrupt-names = "ptrain-done";
+		qcom,pmic-revid = <&pmi8998_revid>;
+	};
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
new file mode 100644
index 0000000..f4a22e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -0,0 +1,315 @@
+Qualcomm Technologies, Inc. SMB2 Charger Specific Bindings
+
+SMB2 Charger is an efficient programmable battery charger capable of charging a
+high-capacity lithium-ion battery over micro-USB or USB Type-C ultrafast with
+Quick Charge 2.0, Quick Charge 3.0, and USB Power Delivery support. Wireless
+charging features full A4WP Rezence 1.2, WPC 1.2, and PMA support.
+
+=======================
+Required Node Structure
+=======================
+
+SMB2 Charger must be described in two levels of devices nodes.
+
+===============================
+First Level Node - SMB2 Charger
+===============================
+
+Charger specific properties:
+- compatible
+  Usage:      required
+  Value type: <string>
+  Definition: "qcom,qpnp-smb2".
+
+- qcom,pmic-revid
+  Usage:      required
+  Value type: phandle
+  Definition: Should specify the phandle of PMI's revid module. This is used to
+		identify the PMI subtype.
+
+- qcom,batteryless-platform
+  Usage:      optional
+  Value type: <empty>
+  Definition: Boolean flag which indicates that the platform does not have a
+		battery, and therefore charging should be disabled. In
+		addition battery properties will be faked such that the device
+		assumes normal operation.
+
+- qcom,external-vconn
+  Usage:      optional
+  Value type: <empty>
+  Definition: Boolean flag which indicates VCONN is sourced externally.
+
+- qcom,fcc-max-ua
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the maximum fast charge current in micro-amps.
+		If the value is not present, 1Amp is used as default.
+
+- qcom,fv-max-uv
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the maximum float voltage in micro-volts.
+		If the value is not present, 4.35V is used as default.
+
+- qcom,usb-icl-ua
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the USB input current limit in micro-amps.
+		 If the value is not present, 1.5Amps is used as default.
+
+- qcom,usb-ocl-ua
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the OTG output current limit in micro-amps.
+		If the value is not present, 1.5Amps is used as default
+
+- qcom,dc-icl-ua
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the DC input current limit in micro-amps.
+
+- qcom,boost-threshold-ua
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the boost current threshold in micro-amps.
+		If the value is not present, 100mA is used as default.
+
+- qcom,wipower-max-uw
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the DC input power limit in micro-watts.
+		If the value is not present, 8W is used as default.
+
+- qcom,thermal-mitigation
+  Usage:      optional
+  Value type: Array of <u32>
+  Definition: Array of fast charge current limit values for
+		different system thermal mitigation levels.
+		This should be a flat array that denotes the
+		maximum charge current in mA for each thermal
+		level.
+
+- qcom,step-soc-thresholds
+  Usage:      optional
+  Value type: Array of <u32>
+  Definition: Array of SOC threshold values, size of 4. This should be a
+		flat array that denotes the percentage ranging from 0 to 100.
+		If the array is not present, step charging is disabled.
+
+- qcom,step-current-deltas
+  Usage:      optional
+  Value type: Array of <s32>
+  Definition: Array of delta values for charging current, size of 5, with
+		FCC as base.  This should be a flat array that denotes the
+		offset of charging current in uA, from -3100000 to 3200000.
+		If the array is not present, step charging is disabled.
+
+- io-channels
+  Usage:      optional
+  Value type: List of <phandle u32>
+  Definition: List of phandle and IIO specifier pairs, one pair
+		for each IIO input to the device. Note: if the
+		IIO provider specifies '0' for #io-channel-cells,
+		then only the phandle portion of the pair will appear.
+
+- io-channel-names
+  Usage:      optional
+  Value type: List of <string>
+  Definition: List of IIO input name strings sorted in the same
+		order as the io-channels property. Consumer drivers
+		will use io-channel-names to match IIO input names
+		with IIO specifiers.
+
+- qcom,float-option
+  Usage:      optional
+  Value type: <u32>
+  Definition: Configures how the charger behaves when a float charger is
+	      detected by APSD
+	        1 - Treat as a DCP
+	        2 - Treat as a SDP
+	        3 - Disable charging
+                4 - Suspend USB input
+
+- qcom,hvdcp-disable
+  Usage:      optional
+  Value type: <empty>
+  Definition: Specifies if hvdcp charging is to be enabled or not.
+		If this property is not specified hvdcp will be enabled.
+		If this property is specified, hvdcp 2.0 detection will still
+		happen but the adapter won't be asked to switch to a higher
+		voltage point.
+
+- qcom,chg-inhibit-threshold-mv
+  Usage:      optional
+  Value type: <u32>
+  Definition: Charge inhibit threshold in milli-volts. Charging will be
+		inhibited when the battery voltage is within this threshold
+		from Vfloat at charger insertion. If this is not specified
+		then charge inhibit will be disabled by default.
+		Allowed values are: 50, 100, 200, 300.
+
+- qcom,auto-recharge-soc
+  Usage:      optional
+  Value type: <empty>
+  Definition: Specifies if automatic recharge needs to be based off battery
+		SOC. If this property is not specified, then auto recharge will
+		be based off battery voltage. For both SOC and battery voltage,
+		charger receives the signal from FG to resume charging.
+
+- qcom,micro-usb
+  Usage:      optional
+  Value type: <empty>
+  Definition: Boolean flag which indicates that the platform only support
+		micro usb port.
+
+- qcom,suspend-input-on-debug-batt
+  Usage:      optional
+  Value type: <empty>
+  Definition: Boolean flag which when present enables input suspend for
+		debug battery.
+
+=============================================
+Second Level Nodes - SMB2 Charger Peripherals
+=============================================
+
+Peripheral specific properties:
+- reg
+  Usage:      required
+  Value type: <prop-encoded-array>
+  Definition: Address and size of the peripheral's register block.
+
+- interrupts
+  Usage:      required
+  Value type: <prop-encoded-array>
+  Definition: Peripheral interrupt specifier.
+
+- interrupt-names
+  Usage:      required
+  Value type: <stringlist>
+  Definition: Interrupt names.  This list must match up 1-to-1 with the
+	      interrupts specified in the 'interrupts' property.
+
+=======
+Example
+=======
+
+pmi8998_charger: qcom,qpnp-smb2 {
+	compatible = "qcom,qpnp-smb2";
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	io-channels = <&pmic_rradc 0>;
+	io-channel-names = "rradc_batt_id";
+
+	dpdm-supply = <&qusb_phy0>;
+
+	qcom,step-soc-thresholds = <60 70 80 90>;
+	qcom,step-current-deltas = <500000 250000 150000 0 (-150000)>;
+
+	qcom,chgr@1000 {
+		reg = <0x1000 0x100>;
+		interrupts =    <0x2 0x10 0x0 IRQ_TYPE_NONE>,
+				<0x2 0x10 0x1 IRQ_TYPE_NONE>,
+				<0x2 0x10 0x2 IRQ_TYPE_NONE>,
+				<0x2 0x10 0x3 IRQ_TYPE_NONE>,
+				<0x2 0x10 0x4 IRQ_TYPE_NONE>;
+
+		interrupt-names =       "chg-error",
+					"chg-state-change",
+					"step-chg-state-change",
+					"step-chg-soc-update-fail",
+					"step-chg-soc-update-request";
+	};
+
+	qcom,otg@1100 {
+		reg = <0x1100 0x100>;
+		interrupts =    <0x2 0x11 0x0 IRQ_TYPE_NONE>,
+				<0x2 0x11 0x1 IRQ_TYPE_NONE>,
+				<0x2 0x11 0x2 IRQ_TYPE_NONE>,
+				<0x2 0x11 0x3 IRQ_TYPE_NONE>;
+
+		interrupt-names =       "otg-fail",
+					"otg-overcurrent",
+					"otg-oc-dis-sw-sts",
+					"testmode-change-detect";
+	};
+
+	qcom,bat-if@1200 {
+		reg = <0x1200 0x100>;
+		interrupts =    <0x2 0x12 0x0 IRQ_TYPE_NONE>,
+				<0x2 0x12 0x1 IRQ_TYPE_NONE>,
+				<0x2 0x12 0x2 IRQ_TYPE_NONE>,
+				<0x2 0x12 0x3 IRQ_TYPE_NONE>,
+				<0x2 0x12 0x4 IRQ_TYPE_NONE>,
+				<0x2 0x12 0x5 IRQ_TYPE_NONE>;
+
+		interrupt-names =       "bat-temp",
+					"bat-ocp",
+					"bat-ov",
+					"bat-low",
+					"bat-therm-or-id-missing",
+					"bat-terminal-missing";
+	};
+
+	qcom,usb-chgpth@1300 {
+		reg = <0x1300 0x100>;
+		interrupts =    <0x2 0x13 0x0 IRQ_TYPE_NONE>,
+				<0x2 0x13 0x1 IRQ_TYPE_NONE>,
+				<0x2 0x13 0x2 IRQ_TYPE_NONE>,
+				<0x2 0x13 0x3 IRQ_TYPE_NONE>,
+				<0x2 0x13 0x4 IRQ_TYPE_NONE>,
+				<0x2 0x13 0x5 IRQ_TYPE_NONE>,
+				<0x2 0x13 0x6 IRQ_TYPE_NONE>,
+				<0x2 0x13 0x7 IRQ_TYPE_NONE>;
+
+		interrupt-names =       "usbin-collapse",
+					"usbin-lt-3p6v",
+					"usbin-uv",
+					"usbin-ov",
+					"usbin-plugin",
+					"usbin-src-change",
+					"usbin-icl-change",
+					"type-c-change";
+	};
+
+	qcom,dc-chgpth@1400 {
+		reg = <0x1400 0x100>;
+		interrupts =    <0x2 0x14 0x0 IRQ_TYPE_NONE>,
+				<0x2 0x14 0x1 IRQ_TYPE_NONE>,
+				<0x2 0x14 0x2 IRQ_TYPE_NONE>,
+				<0x2 0x14 0x3 IRQ_TYPE_NONE>,
+				<0x2 0x14 0x4 IRQ_TYPE_NONE>,
+				<0x2 0x14 0x5 IRQ_TYPE_NONE>,
+				<0x2 0x14 0x6 IRQ_TYPE_NONE>;
+
+		interrupt-names =       "dcin-collapse",
+					"dcin-lt-3p6v",
+					"dcin-uv",
+					"dcin-ov",
+					"dcin-plugin",
+					"div2-en-dg",
+					"dcin-icl-change";
+	};
+
+	qcom,chgr-misc@1600 {
+		reg = <0x1600 0x100>;
+		interrupts =    <0x2 0x16 0x0 IRQ_TYPE_NONE>,
+				<0x2 0x16 0x1 IRQ_TYPE_NONE>,
+				<0x2 0x16 0x2 IRQ_TYPE_NONE>,
+				<0x2 0x16 0x3 IRQ_TYPE_NONE>,
+				<0x2 0x16 0x4 IRQ_TYPE_NONE>,
+				<0x2 0x16 0x5 IRQ_TYPE_NONE>,
+				<0x2 0x16 0x6 IRQ_TYPE_NONE>,
+				<0x2 0x16 0x7 IRQ_TYPE_NONE>;
+
+		interrupt-names =       "wdog-snarl",
+					"wdog-bark",
+					"aicl-fail",
+					"aicl-done",
+					"high-duty-cycle",
+					"input-current-limiting",
+					"temperature-change",
+					"switcher-power-ok";
+	};
+};
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb1351-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb1351-charger.txt
new file mode 100644
index 0000000..c200f94
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb1351-charger.txt
@@ -0,0 +1,115 @@
+Summit smb1351 battery charger
+
+SMB1351 is a single-cell battery charger. It can charge
+the battery and power the system via the USB/AC adapter input.
+
+The smb1351 interface is via I2C bus.
+
+Required Properties:
+- compatible			Must be "qcom,smb1351-charger".
+- reg				The device 7-bit I2C address.
+
+Required Properties for standalone charger:
+- regulator-name		A string used as a descriptive name for OTG regulator.
+- pinctrl-names			The state name of the pin configuration. Only
+				support "default".
+- pinctrl-0			The phandle of the pin configuration node in
+				pinctrl for smb_int_pin.
+
+Optional Properties:
+
+- interrupts			This indicates the IRQ number of the GPIO
+				connected to the STAT pin.
+- qcom,fastchg-current-max-ma 	Fast Charging current in mA. Supported range is
+				from 1000mA to 4500mA.
+- qcom,chg-autonomous-mode	This is a bool property and it indicates that the
+				charger is configured for autonomous operation and
+				does not require any software configuration.
+- qcom,disable-apsd		This is a bool property which disables automatic
+				power source detection (APSD). If this is set
+				charger detection is done by DCIN UV irq.
+- qcom,charging-disabled	This is a bool property which disables charging.
+- qcom,using-pmic-therm		This property indicates thermal pin connected to pmic or smb.
+- qcom,bms-psy-name		This is a string and it points to the bms
+				power supply name.
+- qcom,iterm-ma			Specifies the termination current to indicate end-of-charge.
+				Possible values in mA - 70, 100, 200, 300, 400, 500, 600, 700.
+- qcom,iterm-disabled		Disables the termination current feature. This is a bool
+				property.
+- qcom,float-voltage-mv	 	Float Voltage in mV - the maximum voltage up to which
+				the battery is charged. Supported range 3500mV to 4500mV
+- qcom,recharge-mv		Recharge threshold in mV - the offset from the float-volatge
+				as which the charger restarts charging. Possible
+				values are 50mV and 100mV.
+- qcom,recharge-disabled	Boolean value which disables the auto-recharge.
+- qcom,bms-controlled-charging	This property enables BMS to control EOC and
+				recharge. BMS and charger communicates with each
+				other via power_supply framework. This
+				property should be used with 'qcom,iterm-disabled'
+				to ensure EOC detection in charger is disabled.
+- qcom,force-hvdcp-2p0		Boolean value which allows to force hvdcp working on 2.0 mode.
+- qcom,parallel-charger		Boolean value which enables the parallel charger.
+- qcom,chg-vadc			Corresponding VADC device's phandle.
+- qcom,chg-adc_tm		phandle to the corresponding VADC device to read the ADC channels.
+- qcom,batt-cold-decidegc	Cold battery temperature in decidegC.
+- qcom,batt-hot-decidegc	Hot battery temperature in decidegC.
+- qcom,batt-missing-decidegc	This is a property indicating battery missing temperature, if
+				higher than it, battery should exist.
+- qcom,batt-warm-decidegc:	Warm battery temperature in decidegC. After hitting this threshold,
+				"qcom,warm-bat-ma" defines maximum charging current and
+				"qcom,warm-bat-mv" defines maximum target voltage.
+- qcom,batt-cool-decidegc:       Cool battery temperature in decidegC. After hitting this threshold,
+				"qcom,cool-bat-ma" defines maximum charging current and
+				"qcom,cool-bat-mv" defines maximum target voltage.
+- qcom,batt-warm-ma:		Maximum warm battery charge current in milli-amps.
+- qcom,batt-cool-ma:		Maximum cool battery charge current in milli-amps.
+- qcom,batt-warm-mv:		Maximum warm battery target voltage in milli-volts.
+- qcom,batt-cool-mv:		Maximum cool battery target voltage in milli-volts.
+- qcom,parallel-en-pin-polarity Specify the polarity of enable signal controlled
+				via pin in a parallel-charger configuration.
+				0 - Active low and 1  - Active high.
+				If not specified the default value is active-low.
+- qcom,parallel-external-current-sense If present specifies external rsense is
+				used for charge current sensing.
+
+Example for standalone charger:
+
+&i2c_4 {
+	smb1351_otg_supply: smb1351-charger@57 {
+		compatible = "qcom,smb1351-charger";
+		reg = <0x57>;
+		interrupt-parent = <&msm_gpio>;
+		interrupts = <62 2>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&smb_int_default>;
+		qcom,float-voltage-mv = <4350>;
+		qcom,iterm-ma = <100>;
+		qcom,recharge-mv = <100>;
+		qcom,bms-psy-name = "bms";
+		regulator-name = "smb1351_otg_vreg";
+		qcom,using-pmic-therm;
+		qcom,chg-adc_tm = <&pm8916_adc_tm>;
+		qcom,chg-vadc = <&pm8916_vadc>;
+		qcom,batt-hot-decidegc = <550>;
+		qcom,batt-cold-decidegc = <0>;
+		qcom,batt-missing-decidegc = <(-200)>;
+		qcom,batt-warm-decidegc = <500>;
+		qcom,batt-cool-decidegc = <50>;
+		qcom,batt-warm-ma = <350>;
+		qcom,batt-cool-ma = <350>;
+		qcom,batt-warm-mv = <4200>;
+		qcom,batt-cool-mv = <4200>;
+	};
+};
+
+Example for parallel charger:
+
+&i2c_11 {
+	smb1351-charger@1d {
+		compatible = "qcom,smb1351-charger";
+		reg = <0x1d>;
+		qcom,parallel-charger;
+		qcom,float-voltage-mv = <4400>;
+		qcom,recharge-mv = <100>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb135x-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb135x-charger.txt
new file mode 100644
index 0000000..90527f3
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb135x-charger.txt
@@ -0,0 +1,101 @@
+SMB135x battery charger
+
+SMB135x is a single-cell switching mode battery charger. It can charge
+the battery and power the system via the USB and AC adapter input.
+
+The smb135x interface is via I2C bus.
+
+Required Properties:
+- compatible:			Must be "qcom,smb1356-charger", "qcom,smb1357-charger",
+				"qcom,smb1358-charger" or "qcom,smb1359-charger".
+- reg:				The device 7-bit I2C address.
+
+Optional Properties:
+
+- interrupts			This indicates the IRQ number of the GPIO
+				connected to the STAT pin.
+- qcom,bms-psy-name	 	the psy name to use for reporting battery capacity. If left
+				unspecified it uses a preprogrammed default value.
+- qcom,float-voltage-mv	 	Float Voltage in mV - the maximum voltage up to which
+				the battery is charged. Supported range 3600mV to 4500mV
+- qcom,charging-timeout		Maximum duration in minutes that a single charge
+				cycle may last.  Supported values are: 0, 192, 384,
+				768, and 1536.  A value of 0 means that no
+				charge cycle timeout is used and charging can
+				continue indefinitely.
+- qcom,dc-psy-type		The type of charger connected to the DC path.
+				Can be "Mains" or "Wireless"
+- qcom,dc-psy-ma		The current in mA dc path can support. Must be specified if
+				dc-psy-type is specified. Valid range 300mA to 2000mA.
+- qcom,charging-disabled	Set this if charging should be disabled in the build
+				by default. Useful in usecases where battery current
+				needs to be profiled even when USB is present.
+- qcom,recharge-thresh-mv	Specifies the minimum voltage drop in millivolts
+				below the float voltage that is required in
+				order to initiate a new charging cycle.
+				Supported values are: 50, 100, 200 and 300mV.
+- qcom,bmd-algo-disabled	Indicates if the battery missing detection algorithm
+				is disabled. If this node is present SMB uses
+				the THERM pin for battery missing detection.
+- qcom,iterm-ma			Specifies the termination current to indicate end-of-charge.
+				Possible values in mA - 50, 100, 150, 200, 250, 300, 500, 600.
+- qcom,iterm-disabled		Disables the termination current feature. This is a bool
+				property.
+- qcom,soft-vfloat-comp-disabled	Set this property when the battery is powered via external
+					source and could go above the float voltage.  smb135x chips
+					go in to unintentional reverse boost in such a situation and
+					the float voltage compensation needs to be disabled to avoid
+					that reverse boost.
+- qcom,soft-current-comp-disabled	Set this property to disable charging current compensation
+					if battery temperature exceeds soft JEITA thresholds.
+- qcom,gamma-setting			Array of gamma values for JEITA. The sequence is
+					<"Cold Hard" "Hot Hard" "Cold Soft" "Hot Soft">. Gamma value
+					indicates the ratio of the pull up resistors and NTC
+					resistor in battery pack. There are 4 options referring to
+					the graphic user interface.
+- qcom,thermal-mitigation:		Array of input current limit values for different
+					system thermal mitigation level.
+- regulator-name			A string used as a descriptive name for OTG regulator.
+- therm-bias-supply			The supply that provides bias voltage to the battery
+					thermistor. This is useful in designs that do not use the SYSON
+					pin to bias the thermistor.
+- usb-pullup-supply			The supply regulator that act as pull-up for USB data lines.
+- qcom,parallel-charger:		A flag to indicate if the charger merely assists for USB
+					charging. In this case the input current from USB is split
+					between a main charger and smb135x for reducing thermal impact
+					of high current charging from USB path.
+- qcom,inhibit-disabled:	Disables the charger-inhibit function.
+- qcom,bms-controlled-charging: This property enables BMS to control EOC and
+				recharge. BMS and charger communicates with each
+				other via power_supply framework. This
+				property should be used with 'qcom,iterm-disabled'
+				to ensure EOC detection in charger is
+				disabled.
+- qcom,fastchg-ma:		Specifies the maximum fastcharge current.
+				The possible range for fastcharge current is
+				from 300mA to 3000mA.
+- qcom,id-line-not-connected:	Specifies if smb135x charger is not monitoring the USB_ID line.
+- qcom,parallel-en-pin-polarity Specify the polarity of enable signal controlled
+				via pin in a parallel-charger configuration.
+				0 - Active low and 1  - Active high.
+				If not specified the default value is active-low.
+
+Example:
+	i2c@f9967000 {
+		smb1357-charger@1b {
+			compatible = "qcom,smb1357-charger";
+			reg = <0x1b>;
+			interrupt-parent = <&spmi_bus>;
+			interrupts = <0x00 0xCD 0>;
+			qcom,float-voltage-mv = <4200>;
+			qcom,iterm-ma = <100>;
+			qcom,dc-psy-type = <8>;
+			qcom,dc-psy-ma = <800>;
+			qcom,charging-disabled;
+			qcom,recharge-thresh-mv = <100>;
+			regulator-name = "smb1357-otg";
+			qcom,thermal-mitigation = <1500 700 600 325>;
+			qcom,gamma-setting = <3 2 0 2>;
+			qcom,fastchg-ma = <3000>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
new file mode 100644
index 0000000..c8f2a5a
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
@@ -0,0 +1,234 @@
+Qualcomm Technologies, Inc. SMB138X Charger Specific Bindings
+
+SMB138X Charger is an efficient programmable battery charger capable of charging
+a high-capacity lithium-ion battery over micro-USB or USB Type-C ultrafast with
+Quick Charge 2.0, Quick Charge 3.0 support. Wireless charging features full
+A4WP Rezence 1.2, WPC 1.2, and PMA support.
+
+=======================
+Required Node Structure
+=======================
+
+SMB138X Charger must be described in two levels of devices nodes.
+
+==================================
+First Level Node - SMB138X Charger
+==================================
+
+Charger specific properties:
+- compatible
+  Usage:      required
+  Value type: <string>
+  Definition: String which indicates the charging mode. Can be one of the
+	      following:
+              Standalone/Parallel Master	- "qcom,smb138x-charger"
+	      Parallel Slave			- "qcom,smb138x-parallel-slave"
+
+- qcom,pmic-revid
+  Usage:      required
+  Value type: phandle
+  Definition: Should specify the phandle of SMB's
+	revid module. This is used to identify
+	the SMB subtype.
+
+- qcom,suspend-input
+  Usage:      optional
+  Value type: <empty>
+  Definition: Boolean flag which indicates that the charger should not draw
+	      current from any of its input sources (USB, DC).
+
+- qcom,fcc-max-ua
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the maximum fast charge current in micro-amps.
+
+- qcom,usb-icl-ua
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the USB input current limit in micro-amps.
+
+- qcom,dc-icl-ua
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the DC input current limit in micro-amps.
+
+- qcom,charger-temp-max-mdegc
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the maximum charger temperature in milli-degrees
+	      Celsius. If unspecified a default of 80000 will be used.
+
+- qcom,connector-temp-max-mdegc
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the maximum connector temperature in milli-degrees
+	      Celsius. If unspecified a default value of 105000 will be used.
+
+- io-channels
+  Usage:      optional
+  Value type: List of <phandle u32>
+  Definition: List of phandle and IIO specifier pairs, one pair
+		for each IIO input to the device. Note: if the
+		IIO provider specifies '0' for #io-channel-cells,
+		then only the phandle portion of the pair will appear.
+
+- io-channel-names
+  Usage:      optional
+  Value type: List of <string>
+  Definition: List of IIO input name strings sorted in the same
+		order as the io-channels property. Consumer drivers
+		will use io-channel-names to match IIO input names
+		with IIO specifiers.
+
+================================================
+Second Level Nodes - SMB138X Charger Peripherals
+================================================
+
+Peripheral specific properties:
+- reg
+  Usage:      required
+  Value type: <prop-encoded-array>
+  Definition: Address and size of the peripheral's register block.
+
+- interrupts
+  Usage:      required
+  Value type: <prop-encoded-array>
+  Definition: Peripheral interrupt specifier.
+
+- interrupt-names
+  Usage:      required
+  Value type: <stringlist>
+  Definition: Interrupt names.  This list must match up 1-to-1 with the
+	      interrupts specified in the 'interrupts' property.
+
+=======================================
+Second Level Nodes - SMB138X Regulators
+=======================================
+
+The following regulator nodes are supported:
+"qcom,smb138x-vbus"	- Regulator for enabling VBUS
+"qcom,smb138x-vconn"	- Regulator for enabling VCONN
+
+- regulator-name
+  Usage:      required
+  Value type: <string>
+  Definition: Specifies the name for this regulator.
+
+=======
+Example
+=======
+
+smb138x_charger: qcom,smb138x-charger {
+	compatible = "qcom,qpnp-smb138x-charger";
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	qcom,suspend-input;
+	dpdm-supply = <&qusb_phy0>;
+
+	qcom,chgr@1000 {
+		reg = <0x1000 0x100>;
+		interrupts =    <0x10 0x0 IRQ_TYPE_EDGE_BOTH>,
+				<0x10 0x1 IRQ_TYPE_EDGE_BOTH>,
+				<0x10 0x2 IRQ_TYPE_EDGE_BOTH>,
+				<0x10 0x3 IRQ_TYPE_EDGE_BOTH>,
+				<0x10 0x4 IRQ_TYPE_EDGE_BOTH>;
+
+		interrupt-names =       "chg-error",
+					"chg-state-change",
+					"step-chg-state-change",
+					"step-chg-soc-update-fail",
+					"step-chg-soc-update-request";
+	};
+
+	qcom,otg@1100 {
+		reg = <0x1100 0x100>;
+		interrupts =    <0x11 0x0 IRQ_TYPE_EDGE_BOTH>,
+				<0x11 0x1 IRQ_TYPE_EDGE_BOTH>,
+				<0x11 0x2 IRQ_TYPE_EDGE_BOTH>,
+				<0x11 0x3 IRQ_TYPE_EDGE_BOTH>;
+
+		interrupt-names =       "otg-fail",
+					"otg-overcurrent",
+					"otg-oc-dis-sw-sts",
+					"testmode-change-detect";
+	};
+
+	qcom,bat-if@1200 {
+		reg = <0x1200 0x100>;
+		interrupts =    <0x12 0x0 IRQ_TYPE_EDGE_BOTH>,
+				<0x12 0x1 IRQ_TYPE_EDGE_BOTH>,
+				<0x12 0x2 IRQ_TYPE_EDGE_BOTH>,
+				<0x12 0x3 IRQ_TYPE_EDGE_BOTH>,
+				<0x12 0x4 IRQ_TYPE_EDGE_BOTH>,
+				<0x12 0x5 IRQ_TYPE_EDGE_BOTH>;
+
+		interrupt-names =       "bat-temp",
+					"bat-ocp",
+					"bat-ov",
+					"bat-low",
+					"bat-therm-or-id-missing",
+					"bat-terminal-missing";
+	};
+
+	qcom,usb-chgpth@1300 {
+		reg = <0x1300 0x100>;
+		interrupts =    <0x13 0x0 IRQ_TYPE_EDGE_BOTH>,
+				<0x13 0x1 IRQ_TYPE_EDGE_BOTH>,
+				<0x13 0x2 IRQ_TYPE_EDGE_BOTH>,
+				<0x13 0x3 IRQ_TYPE_EDGE_BOTH>,
+				<0x13 0x4 IRQ_TYPE_EDGE_BOTH>,
+				<0x13 0x5 IRQ_TYPE_EDGE_BOTH>,
+				<0x13 0x6 IRQ_TYPE_EDGE_BOTH>,
+				<0x13 0x7 IRQ_TYPE_EDGE_BOTH>;
+
+		interrupt-names =       "usbin-collapse",
+					"usbin-lt-3p6v",
+					"usbin-uv",
+					"usbin-ov",
+					"usbin-plugin",
+					"usbin-src-change",
+					"usbin-icl-change",
+					"type-c-change";
+	};
+
+	qcom,dc-chgpth@1400 {
+		reg = <0x1400 0x100>;
+		interrupts =    <0x14 0x0 IRQ_TYPE_EDGE_BOTH>,
+				<0x14 0x1 IRQ_TYPE_EDGE_BOTH>,
+				<0x14 0x2 IRQ_TYPE_EDGE_BOTH>,
+				<0x14 0x3 IRQ_TYPE_EDGE_BOTH>,
+				<0x14 0x4 IRQ_TYPE_EDGE_BOTH>,
+				<0x14 0x5 IRQ_TYPE_EDGE_BOTH>,
+				<0x14 0x6 IRQ_TYPE_EDGE_BOTH>;
+
+		interrupt-names =       "dcin-collapse",
+					"dcin-lt-3p6v",
+					"dcin-uv",
+					"dcin-ov",
+					"dcin-plugin",
+					"div2-en-dg",
+					"dcin-icl-change";
+	};
+
+	qcom,chgr-misc@1600 {
+		reg = <0x1600 0x100>;
+		interrupts =    <0x16 0x0 IRQ_TYPE_EDGE_BOTH>,
+				<0x16 0x1 IRQ_TYPE_EDGE_BOTH>,
+				<0x16 0x2 IRQ_TYPE_EDGE_BOTH>,
+				<0x16 0x3 IRQ_TYPE_EDGE_BOTH>,
+				<0x16 0x4 IRQ_TYPE_EDGE_BOTH>,
+				<0x16 0x5 IRQ_TYPE_EDGE_BOTH>,
+				<0x16 0x6 IRQ_TYPE_EDGE_BOTH>,
+				<0x16 0x7 IRQ_TYPE_EDGE_BOTH>;
+
+		interrupt-names =       "wdog-snarl",
+					"wdog-bark",
+					"aicl-fail",
+					"aicl-done",
+					"high-duty-cycle",
+					"input-current-limiting",
+					"temperature-change",
+					"switcher-power-ok";
+	};
+};
diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt
index 88b6ea1..2cb5419 100644
--- a/Documentation/devicetree/bindings/thermal/thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/thermal.txt
@@ -168,6 +168,14 @@
 			by means of sensor ID. Additional coefficients are
 			interpreted as constant offset.
 
+- thermal-governor:     Thermal governor to be used for this thermal zone.
+			Expected values are:
+			"step_wise": Use step wise governor.
+			"fair_share": Use fair share governor.
+			"user_space": Use user space governor.
+			"power_allocator": Use power allocator governor.
+  Type: string
+
 - sustainable-power:	An estimate of the sustainable power (in mW) that the
   Type: unsigned	thermal zone can dissipate at the desired
   Size: one cell	control temperature.  For reference, the
@@ -175,6 +183,11 @@
 			2000mW, while on a 10'' tablet is around
 			4500mW.
 
+- tracks-low:		Indicates that the temperature sensor tracks the low
+  Type: bool		thresholds, so the governors may mitigate by ensuring
+			timing closures and other low temperature operating
+			issues.
+
 Note: The delay properties are bound to the maximum dT/dt (temperature
 derivative over time) in two situations for a thermal zone:
 (i)  - when passive cooling is activated (polling-delay-passive); and
diff --git a/Documentation/devicetree/bindings/thermal/tsens.txt b/Documentation/devicetree/bindings/thermal/tsens.txt
new file mode 100644
index 0000000..1065456
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/tsens.txt
@@ -0,0 +1,54 @@
+Qualcomm Technologies, Inc. TSENS driver
+
+Temperature sensor (TSENS) driver supports reading temperature from sensors
+across the MSM. The driver defaults to support a 12 bit ADC.
+
+The driver uses the Thermal sysfs framework to provide thermal
+clients the ability to read from supported on-die temperature sensors,
+set temperature thresholds for cool/warm thresholds and receive notification
+on temperature threshold events.
+
+TSENS node
+
+Required properties:
+- compatible : should be "qcom,msm8996-tsens" for 8996 TSENS driver.
+	       should be "qcom,msm8953-tsens" for 8953 TSENS driver.
+	       should be "qcom,msm8998-tsens" for 8998 TSENS driver.
+	       should be "qcom,msmhamster-tsens" for hamster TSENS driver.
+	       should be "qcom,sdm660-tsens" for 660 TSENS driver.
+	       should be "qcom,sdm630-tsens" for 630 TSENS driver.
+	       should be "qcom,sdm845-tsens" for SDM845 TSENS driver.
+	       The compatible property is used to identify the respective controller to use
+	       for the corresponding SoC.
+- reg : offset and length of the TSENS registers with associated property in reg-names
+	as "tsens_physical" for TSENS TM physical address region.
+- reg-names : resource names used for the physical address of the TSENS
+	      registers. Should be "tsens_physical" for physical address of the TSENS.
+- interrupts : TSENS interrupt to notify Upper/Lower and Critical temperature threshold.
+- interrupt-names: Should be "tsens-upper-lower" for temperature threshold.
+		   Add "tsens-critical" for Critical temperature threshold notification
+		   in addition to "tsens-upper-lower" for 8996 TSENS since
+		   8996 supports Upper/Lower and Critical temperature threshold.
+- qcom,sensors : Total number of available Temperature sensors for TSENS.
+
+Optional properties:
+- qcom,sensor-id : If the flag is present map the TSENS sensors based on the
+		remote sensors that are enabled in HW. Ensure the mapping is not
+		more than the number of supported sensors.
+- qcom,client-id : If the flag is present use it to identify the SW ID mapping
+		used to associate it with the controller and the physical sensor
+		mapping within the controller. The physical sensor mapping within
+		each controller is done using the qcom,sensor-id property. If the
+		property is not present the SW ID mapping with default from 0 to
+		total number of supported sensors with each controller instance.
+
+Example:
+
+tsens@fc4a8000 {
+	compatible = "qcom,msm-tsens";
+	reg = <0xfc4a8000 0x2000>;,
+	reg-names = "tsens_physical";
+	interrupts = <0 184 0>;
+	interrupt-names = "tsens-upper-lower";
+	qcom,sensors = <11>;
+};
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index d20a7cb..8e5782a 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -106,8 +106,7 @@
 
 Optional properties:
  - reg-names: Additional registers corresponding with the following:
-   "tune2_efuse_addr": EFUSE based register address to read TUNE2 parameter.
-   via the QSCRATCH interface.
+   "efuse_addr": EFUSE address to read and update analog tune parameter.
    "emu_phy_base" : phy base address used for programming emulation target phy.
    "ref_clk_addr" : ref_clk bcr address used for on/off ref_clk before reset.
  - clocks: a list of phandles to the PHY clocks. Use as per
@@ -120,8 +119,8 @@
  - qcom,emu-init-seq : emulation initialization sequence with value,reg pair.
  - qcom,phy-pll-reset-seq : emulation PLL reset sequence with value,reg pair.
  - qcom,emu-dcm-reset-seq : emulation DCM reset sequence with value,reg pair.
- - qcom,tune2-efuse-bit-pos: TUNE2 parameter related start bit position with EFUSE register
- - qcom,tune2-efuse-num-bits: Number of bits based value to use for TUNE2 high nibble
+ - qcom,efuse-bit-pos: start bit position within EFUSE register
+ - qcom,efuse-num-bits: Number of bits to read from EFUSE register
  - qcom,emulation: Indicates that we are running on emulation platform.
  - qcom,hold-reset: Indicates that hold QUSB PHY into reset state.
  - qcom,phy-clk-scheme: Should be one of "cml" or "cmos" if ref_clk_addr is provided.
@@ -136,8 +135,8 @@
 		vdda18-supply = <&pm8994_l6>;
 		vdda33-supply = <&pm8994_l24>;
 		qcom,vdd-voltage-level = <1 5 7>;
-		qcom,tune2-efuse-bit-pos = <21>;
-		qcom,tune2-efuse-num-bits = <3>;
+		qcom,efuse-bit-pos = <21>;
+		qcom,efuse-num-bits = <3>;
 
 		clocks = <&clock_rpm clk_ln_bb_clk>,
 			 <&clock_gcc clk_gcc_rx2_usb1_clkref_clk>,
diff --git a/Documentation/mmc/mmc-dev-attrs.txt b/Documentation/mmc/mmc-dev-attrs.txt
index 404a0e9..379dc99 100644
--- a/Documentation/mmc/mmc-dev-attrs.txt
+++ b/Documentation/mmc/mmc-dev-attrs.txt
@@ -74,3 +74,13 @@
 	"raw_rpmb_size_mult" is a mutliple of 128kB block.
 	RPMB size in byte is calculated by using the following equation:
 	RPMB partition size = 128kB x raw_rpmb_size_mult
+
+SD/MMC/SDIO Clock Gating Attribute
+==================================
+
+Read and write access is provided to following attribute.
+This attribute appears only if CONFIG_MMC_CLKGATE is enabled.
+
+	clkgate_delay	Tune the clock gating delay with desired value in milliseconds.
+
+echo <desired delay> > /sys/class/mmc_host/mmcX/clkgate_delay
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index e206560..0f0fc7d 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1462,11 +1462,20 @@
 	Functional default: enabled if accept_ra is enabled.
 			    disabled if accept_ra is disabled.
 
+accept_ra_rt_info_min_plen - INTEGER
+	Minimum prefix length of Route Information in RA.
+
+	Route Information w/ prefix smaller than this variable shall
+	be ignored.
+
+	Functional default: 0 if accept_ra_rtr_pref is enabled.
+			    -1 if accept_ra_rtr_pref is disabled.
+
 accept_ra_rt_info_max_plen - INTEGER
 	Maximum prefix length of Route Information in RA.
 
-	Route Information w/ prefix larger than or equal to this
-	variable shall be ignored.
+	Route Information w/ prefix larger than this variable shall
+	be ignored.
 
 	Functional default: 0 if accept_ra_rtr_pref is enabled.
 			    -1 if accept_ra_rtr_pref is disabled.
diff --git a/Makefile b/Makefile
index 71a7187..e70a1eb 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 17
+SUBLEVEL = 20
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 7173ec9..8158c873 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -266,7 +266,7 @@
 		};
 
 		usb1: ohci@00400000 {
-			compatible = "atmel,sama5d2-ohci", "usb-ohci";
+			compatible = "atmel,at91rm9200-ohci", "usb-ohci";
 			reg = <0x00400000 0x100000>;
 			interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>;
 			clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index b4332b7..31dde8b 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -289,6 +289,22 @@
 		at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
 }
 
+static void sama5d3_ddr_standby(void)
+{
+	u32 lpr0;
+	u32 saved_lpr0;
+
+	saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
+	lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
+	lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
+
+	at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
+
+	cpu_do_idle();
+
+	at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
+}
+
 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
  * remember.
  */
@@ -323,7 +339,7 @@
 	{ .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
 	{ .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
 	{ .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
-	{ .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby },
+	{ .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby },
 	{ /*sentinel*/ }
 };
 
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index d0d096e..ba0695b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1079,6 +1079,26 @@
 	  DTBs to be built by default (instead of a standalone Image.gz.)
 	  The image will built in arch/arm64/boot/Image.gz-dtb
 
+choice
+	prompt "Appended DTB Kernel Image name"
+	depends on BUILD_ARM64_APPENDED_DTB_IMAGE
+	help
+	  Enabling this option will cause a specific kernel image Image or
+	  Image.gz to be used for final image creation.
+	  The image will built in arch/arm64/boot/IMAGE-NAME-dtb
+
+	config IMG_GZ_DTB
+		bool "Image.gz-dtb"
+	config IMG_DTB
+		bool "Image-dtb"
+endchoice
+
+config BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME
+	string
+	depends on BUILD_ARM64_APPENDED_DTB_IMAGE
+	default "Image.gz-dtb" if IMG_GZ_DTB
+	default "Image-dtb" if IMG_DTB
+
 config BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES
 	string "Default dtb names"
 	depends on BUILD_ARM64_APPENDED_DTB_IMAGE
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index f7a21a6..445aeb6 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -120,7 +120,7 @@
 	  This enables support for the ARMv8 based Qualcomm chipsets.
 
 config ARCH_SDM845
-	bool "Enable Support for Qualcomm SDM845"
+	bool "Enable Support for Qualcomm Technologies Inc. SDM845"
 	depends on ARCH_QCOM
 	select COMMON_CLK_QCOM
 	select QCOM_GDSC
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index bba8d2c..13a64c9 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -96,7 +96,7 @@
 
 # Default target when executing plain make
 ifeq ($(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE),y)
-KBUILD_IMAGE	:= Image.gz-dtb
+KBUILD_IMAGE	:= $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME))
 else
 KBUILD_IMAGE	:= Image.gz
 endif
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 3eea0af..c32324f 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -9,7 +9,9 @@
 	sdm845-v2-rumi.dtb \
 	sdm845-v2-mtp.dtb \
 	sdm845-v2-cdp.dtb \
-	sdm845-qrd.dtb
+	sdm845-qrd.dtb \
+	sdm845-4k-panel-mtp.dtb \
+	sdm845-4k-panel-cdp.dtb
 
 dtb-$(CONFIG_ARCH_SDM830) += sdm830-sim.dtb \
 	sdm830-rumi.dtb \
diff --git a/arch/arm64/boot/dts/qcom/pm8005.dtsi b/arch/arm64/boot/dts/qcom/pm8005.dtsi
index 241864f..1f8d20e 100644
--- a/arch/arm64/boot/dts/qcom/pm8005.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8005.dtsi
@@ -32,37 +32,15 @@
 			label = "pm8005_tz";
 		};
 
-		pm8005_gpios: gpios {
-			compatible = "qcom,qpnp-pin";
+		pm8005_gpios: pinctrl@c000 {
+			compatible = "qcom,spmi-gpio";
+			reg = <0xc000 0x400>;
+			interrupts = <0x4 0xc0 0 IRQ_TYPE_NONE>,
+					<0x4 0xc1 0 IRQ_TYPE_NONE>;
+			interrupt-names = "pm8005_gpio1", "pm8005_gpio2";
 			gpio-controller;
 			#gpio-cells = <2>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			label = "pm8005-gpio";
-
-			gpio@c000 {
-				reg = <0xc000 0x100>;
-				qcom,pin-num = <1>;
-				status = "disabled";
-			};
-
-			gpio@c100 {
-				reg = <0xc100 0x100>;
-				qcom,pin-num = <2>;
-				status = "disabled";
-			};
-
-			gpio@c200 {
-				reg = <0xc200 0x100>;
-				qcom,pin-num = <3>;
-				status = "disabled";
-			};
-
-			gpio@c300 {
-				reg = <0xc300 0x100>;
-				qcom,pin-num = <4>;
-				status = "disabled";
-			};
+			qcom,gpios-disallowed = <3 4>;
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/pm8998.dtsi b/arch/arm64/boot/dts/qcom/pm8998.dtsi
index ed4fdde..5290f46 100644
--- a/arch/arm64/boot/dts/qcom/pm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8998.dtsi
@@ -72,169 +72,41 @@
 			label = "pm8998_tz";
 		};
 
-		pm8998_gpios: gpios {
-			compatible = "qcom,qpnp-pin";
+		pm8998_gpios: pinctrl@c000 {
+			compatible = "qcom,spmi-gpio";
+			reg = <0xc000 0x1a00>;
+			interrupts = <0x0 0xc0 0 IRQ_TYPE_NONE>,
+					<0x0 0xc1 0 IRQ_TYPE_NONE>,
+					<0x0 0xc3 0 IRQ_TYPE_NONE>,
+					<0x0 0xc4 0 IRQ_TYPE_NONE>,
+					<0x0 0xc5 0 IRQ_TYPE_NONE>,
+					<0x0 0xc6 0 IRQ_TYPE_NONE>,
+					<0x0 0xc7 0 IRQ_TYPE_NONE>,
+					<0x0 0xc8 0 IRQ_TYPE_NONE>,
+					<0x0 0xc9 0 IRQ_TYPE_NONE>,
+					<0x0 0xca 0 IRQ_TYPE_NONE>,
+					<0x0 0xcb 0 IRQ_TYPE_NONE>,
+					<0x0 0xcc 0 IRQ_TYPE_NONE>,
+					<0x0 0xcd 0 IRQ_TYPE_NONE>,
+					<0x0 0xcf 0 IRQ_TYPE_NONE>,
+					<0x0 0xd0 0 IRQ_TYPE_NONE>,
+					<0x0 0xd1 0 IRQ_TYPE_NONE>,
+					<0x0 0xd2 0 IRQ_TYPE_NONE>,
+					<0x0 0xd4 0 IRQ_TYPE_NONE>,
+					<0x0 0xd6 0 IRQ_TYPE_NONE>;
+			interrupt-names = "pm8998_gpio1", "pm8998_gpio2",
+					"pm8998_gpio4", "pm8998_gpio5",
+					"pm8998_gpio6", "pm8998_gpio7",
+					"pm8998_gpio8", "pm8998_gpio9",
+					"pm8998_gpio10", "pm8998_gpio11",
+					"pm8998_gpio12", "pm8998_gpio13",
+					"pm8998_gpio14", "pm8998_gpio16",
+					"pm8998_gpio17", "pm8998_gpio18",
+					"pm8998_gpio19", "pm8998_gpio21",
+					"pm8998_gpio23";
 			gpio-controller;
 			#gpio-cells = <2>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			label = "pm8998-gpio";
-
-			gpio@c000 {
-				reg = <0xc000 0x100>;
-				qcom,pin-num = <1>;
-				status = "disabled";
-			};
-
-			gpio@c100 {
-				reg = <0xc100 0x100>;
-				qcom,pin-num = <2>;
-				status = "disabled";
-			};
-
-			gpio@c200 {
-				reg = <0xc200 0x100>;
-				qcom,pin-num = <3>;
-				status = "disabled";
-			};
-
-			gpio@c300 {
-				reg = <0xc300 0x100>;
-				qcom,pin-num = <4>;
-				status = "disabled";
-			};
-
-			gpio@c400 {
-				reg = <0xc400 0x100>;
-				qcom,pin-num = <5>;
-				status = "disabled";
-			};
-
-			gpio@c500 {
-				reg = <0xc500 0x100>;
-				qcom,pin-num = <6>;
-				status = "disabled";
-			};
-
-			gpio@c600 {
-				reg = <0xc600 0x100>;
-				qcom,pin-num = <7>;
-				status = "disabled";
-			};
-
-			gpio@c700 {
-				reg = <0xc700 0x100>;
-				qcom,pin-num = <8>;
-				status = "disabled";
-			};
-
-			gpio@c800 {
-				reg = <0xc800 0x100>;
-				qcom,pin-num = <9>;
-				status = "disabled";
-			};
-
-			gpio@c900 {
-				reg = <0xc900 0x100>;
-				qcom,pin-num = <10>;
-				status = "disabled";
-			};
-
-			gpio@ca00 {
-				reg = <0xca00 0x100>;
-				qcom,pin-num = <11>;
-				status = "disabled";
-			};
-
-			gpio@cb00 {
-				reg = <0xcb00 0x100>;
-				qcom,pin-num = <12>;
-				status = "disabled";
-			};
-
-			gpio@cc00 {
-				reg = <0xcc00 0x100>;
-				qcom,pin-num = <13>;
-				status = "disabled";
-			};
-
-			gpio@cd00 {
-				reg = <0xcd00 0x100>;
-				qcom,pin-num = <14>;
-				status = "disabled";
-			};
-
-			gpio@ce00 {
-				reg = <0xce00 0x100>;
-				qcom,pin-num = <15>;
-				status = "disabled";
-			};
-
-			gpio@cf00 {
-				reg = <0xcf00 0x100>;
-				qcom,pin-num = <16>;
-				status = "disabled";
-			};
-
-			gpio@d000 {
-				reg = <0xd000 0x100>;
-				qcom,pin-num = <17>;
-				status = "disabled";
-			};
-
-			gpio@d100 {
-				reg = <0xd100 0x100>;
-				qcom,pin-num = <18>;
-				status = "disabled";
-			};
-
-			gpio@d200 {
-				reg = <0xd200 0x100>;
-				qcom,pin-num = <19>;
-				status = "disabled";
-			};
-
-			gpio@d300 {
-				reg = <0xd300 0x100>;
-				qcom,pin-num = <20>;
-				status = "disabled";
-			};
-
-			gpio@d400 {
-				reg = <0xd400 0x100>;
-				qcom,pin-num = <21>;
-				status = "disabled";
-			};
-
-			gpio@d500 {
-				reg = <0xd500 0x100>;
-				qcom,pin-num = <22>;
-				status = "disabled";
-			};
-
-			gpio@d600 {
-				reg = <0xd600 0x100>;
-				qcom,pin-num = <23>;
-				status = "disabled";
-			};
-
-			gpio@d700 {
-				reg = <0xd700 0x100>;
-				qcom,pin-num = <24>;
-				status = "disabled";
-			};
-
-			gpio@d800 {
-				reg = <0xd800 0x100>;
-				qcom,pin-num = <25>;
-				status = "disabled";
-			};
-
-			gpio@d900 {
-				reg = <0xd900 0x100>;
-				qcom,pin-num = <26>;
-				status = "disabled";
-			};
+			qcom,gpios-disallowed = <3 15 20 22 24 25 26>;
 		};
 
 		pm8998_coincell: qcom,coincell@2800 {
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index 1659706..1f27b21 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -38,97 +38,29 @@
 			label = "pmi8998_tz";
 		};
 
-		pmi8998_gpios: gpios {
-			compatible = "qcom,qpnp-pin";
+		pmi8998_gpios: pinctrl@c000 {
+			compatible = "qcom,spmi-gpio";
+			reg = <0xc000 0xe00>;
+			interrupts = <0x2 0xc0 0 IRQ_TYPE_NONE>,
+					<0x2 0xc1 0 IRQ_TYPE_NONE>,
+					<0x2 0xc2 0 IRQ_TYPE_NONE>,
+					<0x2 0xc4 0 IRQ_TYPE_NONE>,
+					<0x2 0xc5 0 IRQ_TYPE_NONE>,
+					<0x2 0xc7 0 IRQ_TYPE_NONE>,
+					<0x2 0xc8 0 IRQ_TYPE_NONE>,
+					<0x2 0xc9 0 IRQ_TYPE_NONE>,
+					<0x2 0xca 0 IRQ_TYPE_NONE>,
+					<0x2 0xcb 0 IRQ_TYPE_NONE>,
+					<0x2 0xcd 0 IRQ_TYPE_NONE>;
+			interrupt-names = "pmi8998_gpio1", "pmi8998_gpio2",
+					"pmi8998_gpio3", "pmi8998_gpio5",
+					"pmi8998_gpio6", "pmi8998_gpio8",
+					"pmi8998_gpio9", "pmi8998_gpio10",
+					"pmi8998_gpio11", "pmi8998_gpio12",
+					"pmi8998_gpio14";
 			gpio-controller;
 			#gpio-cells = <2>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			label = "pmi8998-gpio";
-
-			gpio@c000 {
-				reg = <0xc000 0x100>;
-				qcom,pin-num = <1>;
-				status = "disabled";
-			};
-
-			gpio@c100 {
-				reg = <0xc100 0x100>;
-				qcom,pin-num = <2>;
-				status = "disabled";
-			};
-
-			gpio@c200 {
-				reg = <0xc200 0x100>;
-				qcom,pin-num = <3>;
-				status = "disabled";
-			};
-
-			gpio@c300 {
-				reg = <0xc300 0x100>;
-				qcom,pin-num = <4>;
-				status = "disabled";
-			};
-
-			gpio@c400 {
-				reg = <0xc400 0x100>;
-				qcom,pin-num = <5>;
-				status = "disabled";
-			};
-
-			gpio@c500 {
-				reg = <0xc500 0x100>;
-				qcom,pin-num = <6>;
-				status = "disabled";
-			};
-
-			gpio@c600 {
-				reg = <0xc600 0x100>;
-				qcom,pin-num = <7>;
-				status = "disabled";
-			};
-
-			gpio@c700 {
-				reg = <0xc700 0x100>;
-				qcom,pin-num = <8>;
-				status = "disabled";
-			};
-
-			gpio@c800 {
-				reg = <0xc800 0x100>;
-				qcom,pin-num = <9>;
-				status = "disabled";
-			};
-
-			gpio@c900 {
-				reg = <0xc900 0x100>;
-				qcom,pin-num = <10>;
-				status = "disabled";
-			};
-
-			gpio@ca00 {
-				reg = <0xca00 0x100>;
-				qcom,pin-num = <11>;
-				status = "disabled";
-			};
-
-			gpio@cb00 {
-				reg = <0xcb00 0x100>;
-				qcom,pin-num = <12>;
-				status = "disabled";
-			};
-
-			gpio@cc00 {
-				reg = <0xcc00 0x100>;
-				qcom,pin-num = <13>;
-				status = "disabled";
-			};
-
-			gpio@cd00 {
-				reg = <0xcd00 0x100>;
-				qcom,pin-num = <14>;
-				status = "disabled";
-			};
+			qcom,gpios-disallowed = <4 7 13>;
 		};
 
 		pmi8998_rradc: rradc@4500 {
@@ -372,7 +304,7 @@
 			qcom,en-ext-pfet-sc-pro;
 			qcom,pmic-revid = <&pmi8998_revid>;
 			qcom,loop-auto-gm-en;
-			status = "okay";
+			status = "disabled";
 		};
 
 		flash_led: qcom,leds@d300 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
new file mode 100644
index 0000000..d5646bf
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm845.dtsi"
+#include "sdm845-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. sdm845 4K Display Panel CDP";
+	compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
+	qcom,board-id = <1 1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
new file mode 100644
index 0000000..d641276
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm845.dtsi"
+#include "sdm845-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. sdm845 4K Display Panel MTP";
+	compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
+	qcom,board-id = <8 1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
index 2d2b264..4088f3b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
@@ -34,12 +34,27 @@
 		mbox-names = "apps_rsc", "disp_rsc";
 		mboxes = <&apps_rsc 0 &disp_rsc 0>;
 
+	/*RSCs*/
+		rsc_apps: rsc-apps {
+			cell-id = <MSM_BUS_RSC_APPS>;
+			label = "apps_rsc";
+			qcom,rsc-dev;
+			qcom,req_state = <2>;
+		};
+
+		rsc_disp: rsc-disp {
+			cell-id = <MSM_BUS_RSC_DISP>;
+			label = "disp_rsc";
+			qcom,rsc-dev;
+			qcom,req_state = <3>;
+		};
+
 	/*BCMs*/
 		bcm_acv: bcm-acv {
 			cell-id = <MSM_BUS_BCM_ACV>;
 			label = "ACV";
 			qcom,bcm-name = "ACV";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -47,7 +62,7 @@
 			cell-id = <MSM_BUS_BCM_ALC>;
 			label = "ALC";
 			qcom,bcm-name = "ALC";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -55,7 +70,7 @@
 			cell-id = <MSM_BUS_BCM_MC0>;
 			label = "MC0";
 			qcom,bcm-name = "MC0";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -63,7 +78,7 @@
 			cell-id = <MSM_BUS_BCM_SH0>;
 			label = "SH0";
 			qcom,bcm-name = "SH0";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -71,7 +86,7 @@
 			cell-id = <MSM_BUS_BCM_MM0>;
 			label = "MM0";
 			qcom,bcm-name = "MM0";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -79,7 +94,7 @@
 			cell-id = <MSM_BUS_BCM_SH1>;
 			label = "SH1";
 			qcom,bcm-name = "SH1";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -87,7 +102,7 @@
 			cell-id = <MSM_BUS_BCM_MM1>;
 			label = "MM1";
 			qcom,bcm-name = "MM1";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -95,7 +110,7 @@
 			cell-id = <MSM_BUS_BCM_SH2>;
 			label = "SH2";
 			qcom,bcm-name = "SH2";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -103,7 +118,7 @@
 			cell-id = <MSM_BUS_BCM_MM2>;
 			label = "MM2";
 			qcom,bcm-name = "MM2";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -111,7 +126,7 @@
 			cell-id = <MSM_BUS_BCM_SH3>;
 			label = "SH3";
 			qcom,bcm-name = "SH3";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -119,7 +134,7 @@
 			cell-id = <MSM_BUS_BCM_MM3>;
 			label = "MM3";
 			qcom,bcm-name = "MM3";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -127,7 +142,7 @@
 			cell-id = <MSM_BUS_BCM_SH4>;
 			label = "SH4";
 			qcom,bcm-name = "SH4";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -135,7 +150,7 @@
 			cell-id = <MSM_BUS_BCM_SH5>;
 			label = "SH5";
 			qcom,bcm-name = "SH5";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -143,7 +158,7 @@
 			cell-id = <MSM_BUS_BCM_MM5>;
 			label = "MM5";
 			qcom,bcm-name = "MM5";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -151,7 +166,7 @@
 			cell-id = <MSM_BUS_BCM_SN0>;
 			label = "SN0";
 			qcom,bcm-name = "SN0";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -159,7 +174,7 @@
 			cell-id = <MSM_BUS_BCM_CE0>;
 			label = "CE0";
 			qcom,bcm-name = "CE0";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -167,7 +182,7 @@
 			cell-id = <MSM_BUS_BCM_IP0>;
 			label = "IP0";
 			qcom,bcm-name = "IP0";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -175,7 +190,7 @@
 			cell-id = <MSM_BUS_BCM_CN0>;
 			label = "CN0";
 			qcom,bcm-name = "CN0";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -183,7 +198,7 @@
 			cell-id = <MSM_BUS_BCM_SN1>;
 			label = "SN1";
 			qcom,bcm-name = "SN1";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -191,7 +206,7 @@
 			cell-id = <MSM_BUS_BCM_SN2>;
 			label = "SN2";
 			qcom,bcm-name = "SN2";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -199,7 +214,7 @@
 			cell-id = <MSM_BUS_BCM_SN3>;
 			label = "SN3";
 			qcom,bcm-name = "SN3";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -207,7 +222,7 @@
 			cell-id = <MSM_BUS_BCM_SN4>;
 			label = "SN4";
 			qcom,bcm-name = "SN4";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -215,7 +230,7 @@
 			cell-id = <MSM_BUS_BCM_SN5>;
 			label = "SN5";
 			qcom,bcm-name = "SN5";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -223,7 +238,7 @@
 			cell-id = <MSM_BUS_BCM_SN6>;
 			label = "SN6";
 			qcom,bcm-name = "SN6";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -231,7 +246,7 @@
 			cell-id = <MSM_BUS_BCM_SN7>;
 			label = "SN7";
 			qcom,bcm-name = "SN7";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -239,7 +254,7 @@
 			cell-id = <MSM_BUS_BCM_SN8>;
 			label = "SN8";
 			qcom,bcm-name = "SN8";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -247,7 +262,7 @@
 			cell-id = <MSM_BUS_BCM_SN9>;
 			label = "SN9";
 			qcom,bcm-name = "SN9";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -255,7 +270,7 @@
 			cell-id = <MSM_BUS_BCM_SN11>;
 			label = "SN11";
 			qcom,bcm-name = "SN11";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -263,7 +278,7 @@
 			cell-id = <MSM_BUS_BCM_SN12>;
 			label = "SN12";
 			qcom,bcm-name = "SN12";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -271,7 +286,7 @@
 			cell-id = <MSM_BUS_BCM_SN14>;
 			label = "SN14";
 			qcom,bcm-name = "SN14";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -279,7 +294,7 @@
 			cell-id = <MSM_BUS_BCM_SN15>;
 			label = "SN15";
 			qcom,bcm-name = "SN15";
-			qcom,drv-id = <2>;
+			qcom,rscs = <&rsc_apps>;
 			qcom,bcm-dev;
 		};
 
@@ -287,7 +302,7 @@
 			cell-id = <MSM_BUS_BCM_MC0_DISPLAY>;
 			label = "MC0_DISPLAY";
 			qcom,bcm-name = "MC0";
-			qcom,drv-id = <0>;
+			qcom,rscs = <&rsc_disp>;
 			qcom,bcm-dev;
 		};
 
@@ -295,7 +310,7 @@
 			cell-id = <MSM_BUS_BCM_SH0_DISPLAY>;
 			label = "SH0_DISPLAY";
 			qcom,bcm-name = "SH0";
-			qcom,drv-id = <0>;
+			qcom,rscs = <&rsc_disp>;
 			qcom,bcm-dev;
 		};
 
@@ -303,7 +318,7 @@
 			cell-id = <MSM_BUS_BCM_MM0_DISPLAY>;
 			label = "MM0_DISPLAY";
 			qcom,bcm-name = "MM0";
-			qcom,drv-id = <0>;
+			qcom,rscs = <&rsc_disp>;
 			qcom,bcm-dev;
 		};
 
@@ -311,7 +326,7 @@
 			cell-id = <MSM_BUS_BCM_MM1_DISPLAY>;
 			label = "MM1_DISPLAY";
 			qcom,bcm-name = "MM1";
-			qcom,drv-id = <0>;
+			qcom,rscs = <&rsc_disp>;
 			qcom,bcm-dev;
 		};
 
@@ -319,7 +334,7 @@
 			cell-id = <MSM_BUS_BCM_MM2_DISPLAY>;
 			label = "MM2_DISPLAY";
 			qcom,bcm-name = "MM2";
-			qcom,drv-id = <0>;
+			qcom,rscs = <&rsc_disp>;
 			qcom,bcm-dev;
 		};
 
@@ -327,7 +342,7 @@
 			cell-id = <MSM_BUS_BCM_MM3_DISPLAY>;
 			label = "MM3_DISPLAY";
 			qcom,bcm-name = "MM3";
-			qcom,drv-id = <0>;
+			qcom,rscs = <&rsc_disp>;
 			qcom,bcm-dev;
 		};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index d47dd36..663ff3a4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -10,8 +10,61 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/gpio/gpio.h>
+
 &soc {
 	sound-tavil {
 		qcom,us-euro-gpios = <&tavil_us_euro_sw>;
 	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		label = "gpio-keys";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&key_home_default
+			     &key_vol_up_default
+			     &key_cam_snapshot_default
+			     &key_cam_focus_default>;
+
+		home {
+			label = "home";
+			gpios = <&pm8998_gpios 5 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <102>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&pm8998_gpios 6 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <115>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+
+		cam_snapshot {
+			label = "cam_snapshot";
+			gpios = <&pm8998_gpios 7 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <766>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+
+		cam_focus {
+			label = "cam_focus";
+			gpios = <&pm8998_gpios 8 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <528>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index cfba6f4..edc1f23 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -9,3 +9,47 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
+
+#include <dt-bindings/gpio/gpio.h>
+
+&soc {
+	gpio_keys {
+		compatible = "gpio-keys";
+		label = "gpio-keys";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&key_vol_up_default
+			     &key_cam_snapshot_default
+			     &key_cam_focus_default>;
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&pm8998_gpios 6 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <115>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+
+		cam_snapshot {
+			label = "cam_snapshot";
+			gpios = <&pm8998_gpios 7 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <766>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+
+		cam_focus {
+			label = "cam_focus";
+			gpios = <&pm8998_gpios 8 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <528>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index c5b53b8..04e3f22 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -1113,3 +1113,45 @@
 		};
 	};
 };
+
+&pm8998_gpios {
+	key_home {
+		key_home_default: key_home_default {
+			pins = "gpio5";
+			function = "normal";
+			input-enable;
+			bias-pull-up;
+			power-source = <0>;
+		};
+	};
+
+	key_vol_up {
+		key_vol_up_default: key_vol_up_default {
+			pins = "gpio6";
+			function = "normal";
+			input-enable;
+			bias-pull-up;
+			power-source = <0>;
+		};
+	};
+
+	key_cam_snapshot {
+		key_cam_snapshot_default: key_cam_snapshot_default {
+			pins = "gpio7";
+			function = "normal";
+			input-enable;
+			bias-pull-up;
+			power-source = <0>;
+		};
+	};
+
+	key_cam_focus {
+		key_cam_focus_default: key_cam_focus_default {
+			pins = "gpio8";
+			function = "normal";
+			input-enable;
+			bias-pull-up;
+			power-source = <0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index 67dd934..ca325c0 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -801,6 +801,7 @@
 			<RPMH_REGULATOR_MODE_LDO_LPM
 			 RPMH_REGULATOR_MODE_LDO_HPM>;
 		qcom,mode-threshold-currents = <0 10000>;
+		pm8998_l24-parent-supply = <&pm8998_l12>;
 		pm8998_l24: regulator-l24 {
 			regulator-name = "pm8998_l24";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 5d81487..2983358 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -23,7 +23,6 @@
 #include "dsi-panel-sharp-1080p-cmd.dtsi"
 #include "dsi-panel-sharp-dualmipi-1080p-120hz.dtsi"
 #include "dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi"
-#include "sdm845-pinctrl.dtsi"
 
 &soc {
 	dsi_panel_pwr_supply: dsi_panel_pwr_supply {
@@ -102,7 +101,7 @@
 
 	dsi_sharp_4k_dsc_video_display: qcom,dsi-display@0 {
 		compatible = "qcom,dsi-display";
-		label = "dsi_sharp_4k_dsc_video";
+		label = "dsi_sharp_4k_dsc_video_display";
 		qcom,display-type = "primary";
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
@@ -125,7 +124,7 @@
 
 	dsi_sharp_4k_dsc_cmd_display: qcom,dsi-display@1 {
 		compatible = "qcom,dsi-display";
-		label = "dsi_sharp_4k_dsc_cmd";
+		label = "dsi_sharp_4k_dsc_cmd_display";
 		qcom,display-type = "primary";
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
@@ -148,7 +147,7 @@
 
 	dsi_sharp_1080_cmd_display: qcom,dsi-display@2 {
 		compatible = "qcom,dsi-display";
-		label = "dsi_sharp_1080_cmd";
+		label = "dsi_sharp_1080_cmd_display";
 		qcom,display-type = "primary";
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
@@ -171,7 +170,7 @@
 
 	dsi_dual_sharp_1080_120hz_cmd_display: qcom,dsi-display@3 {
 		compatible = "qcom,dsi-display";
-		label = "dsi_dual_sharp_1080_120hz_cmd";
+		label = "dsi_dual_sharp_1080_120hz_cmd_display";
 		qcom,display-type = "primary";
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
@@ -194,7 +193,7 @@
 
 	dsi_dual_nt35597_truly_video_display: qcom,dsi-display@4 {
 		compatible = "qcom,dsi-display";
-		label = "dsi_dual_nt35597_truly_video";
+		label = "dsi_dual_nt35597_truly_video_display";
 		qcom,display-type = "primary";
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
@@ -217,7 +216,7 @@
 
 	dsi_dual_nt35597_truly_cmd_display: qcom,dsi-display@5 {
 		compatible = "qcom,dsi-display";
-		label = "dsi_dual_nt35597_truly_cmd";
+		label = "dsi_dual_nt35597_truly_cmd_display";
 		qcom,display-type = "primary";
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
@@ -240,7 +239,7 @@
 
 	dsi_nt35597_truly_dsc_cmd_display: qcom,dsi-display@6 {
 		compatible = "qcom,dsi-display";
-		label = "dsi_nt35597_truly_dsc_cmd";
+		label = "dsi_nt35597_truly_dsc_cmd_display";
 		qcom,display-type = "primary";
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
@@ -263,7 +262,7 @@
 
 	dsi_nt35597_truly_dsc_video_display: qcom,dsi-display@7 {
 		compatible = "qcom,dsi-display";
-		label = "dsi_nt35597_truly_dsc_video";
+		label = "dsi_nt35597_truly_dsc_video_display";
 		qcom,display-type = "primary";
 
 		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index b157e04..ab4c253 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -112,6 +112,7 @@
 		qcom,sde-sspp-qseed-off = <0xa00>;
 		qcom,sde-mixer-linewidth = <2560>;
 		qcom,sde-sspp-linewidth = <2560>;
+		qcom,sde-wb-linewidth = <4096>;
 		qcom,sde-mixer-blendstages = <0xb>;
 		qcom,sde-highest-bank-bit = <0x2>;
 		qcom,sde-panic-per-pipe;
@@ -127,6 +128,8 @@
 		qcom,sde-vbif-size = <0x1040>;
 		qcom,sde-vbif-id = <0>;
 
+		qcom,sde-inline-rotator = <&mdss_rotator 0>;
+
 		qcom,sde-sspp-vig-blocks {
 			qcom,sde-vig-csc-off = <0x1a00>;
 			qcom,sde-vig-qseed-off = <0xa00>;
@@ -204,8 +207,12 @@
 		reg-names = "mdp_phys",
 			"rot_vbif_phys";
 
+		#list-cells = <1>;
+
 		qcom,mdss-rot-mode = <1>;
 		qcom,mdss-highest-bank-bit = <0x2>;
+		qcom,sde-ubwc-malsize = <1>;
+		qcom,sde-ubwc-swizzle = <1>;
 
 		/* Bus Scale Settings */
 		qcom,msm-bus,name = "mdss_rotator";
@@ -238,6 +245,11 @@
 		qcom,mdss-default-ot-rd-limit = <32>;
 		qcom,mdss-default-ot-wr-limit = <32>;
 
+		qcom,mdss-sbuf-headroom = <20>;
+
+		cache-slice-names = "rotator";
+		cache-slices = <&llcc 4>;
+
 		smmu_rot_unsec: qcom,smmu_rot_unsec_cb {
 			compatible = "qcom,smmu_sde_rot_unsec";
 			iommus = <&apps_smmu 0x1090>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index 5399e99..7f090ad 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -19,6 +19,7 @@
 		reg = <0x0a600000 0xf8c00>,
 		      <0x088ee000 0x400>;
 		reg-names = "core_base", "ahb2phy_base";
+		iommus = <&apps_smmu 0x740>;
 		#address-cells = <1>;
 		#size-cells = <1>;
 		ranges;
@@ -56,6 +57,7 @@
 			interrupts = <0 133 0>;
 			usb-phy = <&qusb_phy0>, <&usb_nop_phy>;
 			tx-fifo-resize;
+			linux,sysdev_is_parent;
 			snps,disable-clk-gating;
 			snps,has-lpm-erratum;
 			snps,hird-threshold = /bits/ 8 <0x10>;
@@ -109,6 +111,7 @@
 		reg = <0x0a800000 0xf8c00>,
 		      <0x088ee000 0x400>;
 		reg-names = "core_base", "ahb2phy_base";
+		iommus = <&apps_smmu 0x760>;
 		#address-cells = <1>;
 		#size-cells = <1>;
 		ranges;
@@ -144,6 +147,7 @@
 			interrupts = <0 138 0>;
 			usb-phy = <&qusb_phy1>, <&usb_qmp_phy>;
 			tx-fifo-resize;
+			linux,sysdev_is_parent;
 			snps,disable-clk-gating;
 			snps,has-lpm-erratum;
 			snps,hird-threshold = /bits/ 8 <0x10>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index b2382d1..d22b972 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -500,6 +500,212 @@
 		cell-index = <0>;
 	};
 
+	msm_cpufreq: qcom,msm-cpufreq {
+		compatible = "qcom,msm-cpufreq";
+		clock-names = "cpu0_clk", "cpu4_clk";
+		clocks = <&clock_cpucc CPU0_PWRCL_CLK>,
+			 <&clock_cpucc CPU4_PERFCL_CLK>;
+
+		qcom,governor-per-policy;
+
+		qcom,cpufreq-table-0 =
+			<  300000 >,
+			<  422400 >,
+			<  499200 >,
+			<  576000 >,
+			<  652800 >,
+			<  748800 >,
+			<  825600 >,
+			<  902400 >,
+			<  979200 >,
+			< 1056000 >,
+			< 1132800 >,
+			< 1209600 >,
+			< 1286400 >,
+			< 1363200 >,
+			< 1440000 >,
+			< 1516800 >,
+			< 1593600 >;
+
+		qcom,cpufreq-table-4 =
+			<  300000 >,
+			<  422400 >,
+			<  499200 >,
+			<  576000 >,
+			<  652800 >,
+			<  729600 >,
+			<  806400 >,
+			<  883200 >,
+			<  960000 >,
+			< 1036800 >,
+			< 1113600 >,
+			< 1190400 >,
+			< 1267200 >,
+			< 1344000 >,
+			< 1420800 >,
+			< 1497600 >,
+			< 1574400 >,
+			< 1651200 >,
+			< 1728000 >,
+			< 1804800 >,
+			< 1881600 >,
+			< 1958400 >;
+	};
+
+	cpubw: qcom,cpubw {
+		compatible = "qcom,devbw";
+		governor = "performance";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			<  762 /*  200 MHz */ >,
+			< 1144 /*  300 MHz */ >,
+			< 1720 /*  451 MHz */ >,
+			< 2086 /*  547 MHz */ >,
+			< 2597 /*  681 MHz */ >,
+			< 2929 /*  768 MHz */ >,
+			< 3879 /* 1017 MHz */ >,
+			< 4943 /* 1296 MHz */ >,
+			< 5931 /* 1555 MHz */ >,
+			< 6881 /* 1804 MHz */ >;
+	};
+
+	bwmon: qcom,cpu-bwmon {
+		compatible = "qcom,bimc-bwmon4";
+		reg = <0x1436400 0x300>, <0x1436300 0x200>;
+		reg-names = "base", "global_base";
+		interrupts = <0 581 4>;
+		qcom,mport = <0>;
+		qcom,hw-timer-hz = <19200000>;
+		qcom,target-dev = <&cpubw>;
+	};
+
+	memlat_cpu0: qcom,memlat-cpu0 {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			<  762 /*  200 MHz */ >,
+			< 1144 /*  300 MHz */ >,
+			< 1720 /*  451 MHz */ >,
+			< 2086 /*  547 MHz */ >,
+			< 2597 /*  681 MHz */ >,
+			< 2929 /*  768 MHz */ >,
+			< 3879 /* 1017 MHz */ >,
+			< 4943 /* 1296 MHz */ >,
+			< 5931 /* 1555 MHz */ >,
+			< 6881 /* 1804 MHz */ >;
+	};
+
+	memlat_cpu4: qcom,memlat-cpu4 {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		status = "ok";
+		qcom,bw-tbl =
+			<  762 /*  200 MHz */ >,
+			< 1144 /*  300 MHz */ >,
+			< 1720 /*  451 MHz */ >,
+			< 2086 /*  547 MHz */ >,
+			< 2597 /*  681 MHz */ >,
+			< 2929 /*  768 MHz */ >,
+			< 3879 /* 1017 MHz */ >,
+			< 4943 /* 1296 MHz */ >,
+			< 5931 /* 1555 MHz */ >,
+			< 6881 /* 1804 MHz */ >;
+	};
+
+	devfreq_memlat_0: qcom,cpu0-memlat-mon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3>;
+		qcom,target-dev = <&memlat_cpu0>;
+		qcom,cachemiss-ev = <0x2A>;
+		qcom,core-dev-table =
+			<  300000  762 >,
+			<  748800 1720 >,
+			<  979200 2929 >,
+			< 1209600 3879 >,
+			< 1516800 4943 >,
+			< 1593600 5931 >;
+	};
+
+	devfreq_memlat_4: qcom,cpu4-memlat-mon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,target-dev = <&memlat_cpu4>;
+		qcom,cachemiss-ev = <0x2A>;
+		qcom,core-dev-table =
+			<  300000  762 >,
+			< 1036800 2929 >,
+			< 1190400 3879 >,
+			< 1574400 4943 >,
+			< 1804800 5931 >,
+			< 1958400 6881 >;
+	};
+
+	l3_cpu0: qcom,l3-cpu0 {
+		compatible = "devfreq-simple-dev";
+		clock-names = "devfreq_clk";
+		clocks = <&clock_cpucc L3_CLUSTER0_VOTE_CLK>;
+		governor = "performance";
+		freq-tbl-khz =
+			< 300000 >,
+			< 422400 >,
+			< 499200 >,
+			< 576000 >,
+			< 652800 >,
+			< 729600 >,
+			< 806400 >,
+			< 883200 >,
+			< 960000 >;
+	};
+
+	l3_cpu4: qcom,l3-cpu4 {
+		compatible = "devfreq-simple-dev";
+		clock-names = "devfreq_clk";
+		clocks = <&clock_cpucc L3_CLUSTER1_VOTE_CLK>;
+		governor = "performance";
+		freq-tbl-khz =
+			< 300000 >,
+			< 422400 >,
+			< 499200 >,
+			< 576000 >,
+			< 652800 >,
+			< 729600 >,
+			< 806400 >,
+			< 883200 >,
+			< 960000 >;
+	};
+
+	devfreq_l3lat_0: qcom,cpu0-l3lat-mon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3>;
+		qcom,target-dev = <&l3_cpu0>;
+		qcom,cachemiss-ev = <0x17>;
+		qcom,core-dev-table =
+			<  300000 300000 >,
+			<  748800 576000 >,
+			<  979200 652800 >,
+			< 1209600 806400 >,
+			< 1516800 883200 >,
+			< 1593600 960000 >;
+	};
+
+	devfreq_l3lat_4: qcom,cpu4-l3lat-mon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,target-dev = <&l3_cpu4>;
+		qcom,cachemiss-ev = <0x17>;
+		qcom,core-dev-table =
+			<  300000 300000 >,
+			< 1036800 652800 >,
+			< 1190400 806400 >,
+			< 1574400 883200 >,
+			< 1651200 960000 >;
+	};
+
 	clock_gcc: qcom,gcc@100000 {
 		compatible = "qcom,gcc-sdm845";
 		reg = <0x100000 0x1f0000>;
@@ -1619,6 +1825,11 @@
 		      <0x10ae000 0x2000>;
 		reg-names = "dcc-base", "dcc-ram-base";
 	};
+
+	qcom,msm-core@780000 {
+		compatible = "qcom,apss-core-ea";
+		reg = <0x780000 0x1000>;
+	};
 };
 
 &pcie_0_gdsc {
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 1be0423..8207c51 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -17,6 +17,7 @@
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_SCHED_HMP=y
 CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
 CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
@@ -74,7 +75,10 @@
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_MSM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -276,16 +280,21 @@
 CONFIG_SPMI=y
 CONFIG_PINCTRL_SDM845=y
 CONFIG_PINCTRL_SDM830=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_QPNP_PIN=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_POWER_RESET_XGENE=y
 CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_QPNP_SMB2=y
+CONFIG_SMB138X_CHARGER=y
+CONFIG_QPNP_QNOVO=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_WCD934X_CODEC=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -309,6 +318,7 @@
 CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
 CONFIG_DRM=y
 CONFIG_DRM_SDE_EVTLOG_DEBUG=y
+CONFIG_DRM_SDE_RSC=y
 CONFIG_FB_VIRTUAL=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
@@ -427,6 +437,7 @@
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
+CONFIG_APSS_CORE_EA=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
 CONFIG_QCOMCCI_HWMON=y
@@ -467,6 +478,7 @@
 CONFIG_TIMER_STATS=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_IPC_LOGGING=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
 CONFIG_DEBUG_ALIGN_RODATA=y
 CONFIG_CORESIGHT=y
 CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index b825da9..54f2d53 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -23,6 +23,7 @@
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_SCHED_HMP=y
 CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
 CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
@@ -80,7 +81,10 @@
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_MSM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -288,16 +292,21 @@
 CONFIG_SPMI=y
 CONFIG_PINCTRL_SDM845=y
 CONFIG_PINCTRL_SDM830=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_QPNP_PIN=y
 CONFIG_POWER_RESET_QCOM=y
 CONFIG_POWER_RESET_XGENE=y
 CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_FG_GEN3=y
+CONFIG_QPNP_SMB2=y
+CONFIG_SMB138X_CHARGER=y
+CONFIG_QPNP_QNOVO=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_WCD934X_CODEC=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -322,6 +331,7 @@
 CONFIG_QCOM_KGSL=y
 CONFIG_DRM=y
 CONFIG_DRM_SDE_EVTLOG_DEBUG=y
+CONFIG_DRM_SDE_RSC=y
 CONFIG_FB_VIRTUAL=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
@@ -451,6 +461,7 @@
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
+CONFIG_APSS_CORE_EA=y
 CONFIG_QCOM_DCC_V2=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
@@ -531,6 +542,7 @@
 CONFIG_FUNCTION_TRACER=y
 CONFIG_TRACER_SNAPSHOT=y
 CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
 CONFIG_LKDTM=y
 CONFIG_MEMTEST=y
 CONFIG_ARM64_PTDUMP=y
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index d1472eb..4ad25a5 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -47,9 +47,6 @@
 struct thread_info {
 	unsigned long		flags;		/* low level flags */
 	mm_segment_t		addr_limit;	/* address limit */
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
-	u64			ttbr0;		/* saved TTBR0_EL1 */
-#endif
 	struct task_struct	*task;		/* main task structure */
 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
 	u64			ttbr0;		/* saved TTBR0_EL1 */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 7d66bba..2c03b01 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -33,7 +33,8 @@
 arm64-obj-$(CONFIG_MODULES)		+= arm64ksyms.o module.o
 arm64-obj-$(CONFIG_ARM64_MODULE_PLTS)	+= module-plts.o
 arm64-obj-$(CONFIG_PERF_EVENTS)		+= perf_regs.o perf_callchain.o
-arm64-obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o
+arm64-obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o perf_trace_counters.o   \
+					   perf_trace_user.o
 arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= hw_breakpoint.o
 arm64-obj-$(CONFIG_CPU_PM)		+= sleep.o suspend.o
 arm64-obj-$(CONFIG_CPU_IDLE)		+= cpuidle.o
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 769f24e..d7e90d9 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -131,11 +131,15 @@
 	/*
 	 * The kernel Image should not extend across a 1GB/32MB/512MB alignment
 	 * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
-	 * happens, increase the KASLR offset by the size of the kernel image.
+	 * happens, increase the KASLR offset by the size of the kernel image
+	 * rounded up by SWAPPER_BLOCK_SIZE.
 	 */
 	if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
-	    (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT))
-		offset = (offset + (u64)(_end - _text)) & mask;
+	    (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
+		u64 kimg_sz = _end - _text;
+		offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
+				& mask;
+	}
 
 	if (IS_ENABLED(CONFIG_KASAN))
 		/*
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 6d47969..2f8d275 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -840,12 +840,10 @@
 	struct hw_perf_event *hwc = &event->hw;
 	unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
 
-	/* Always place a cycle counter into the cycle counter. */
+	/* Place the first cycle counter request into the cycle counter. */
 	if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
-		if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
-			return -EAGAIN;
-
-		return ARMV8_IDX_CYCLE_COUNTER;
+		if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
+			return ARMV8_IDX_CYCLE_COUNTER;
 	}
 
 	/*
diff --git a/arch/arm64/kernel/perf_trace_counters.c b/arch/arm64/kernel/perf_trace_counters.c
new file mode 100644
index 0000000..1f0b74a
--- /dev/null
+++ b/arch/arm64/kernel/perf_trace_counters.c
@@ -0,0 +1,177 @@
+/* Copyright (c) 2013-2014,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/cpu.h>
+#include <linux/tracepoint.h>
+#include <trace/events/sched.h>
+#define CREATE_TRACE_POINTS
+#include "perf_trace_counters.h"
+
+static unsigned int tp_pid_state;
+
+DEFINE_PER_CPU(u32, cntenset_val);
+DEFINE_PER_CPU(u32, previous_ccnt);
+DEFINE_PER_CPU(u32[NUM_L1_CTRS], previous_l1_cnts);
+DEFINE_PER_CPU(u32, old_pid);
+DEFINE_PER_CPU(u32, hotplug_flag);
+
+#define USE_CPUHP_STATE CPUHP_AP_ONLINE
+
+static int tracectr_cpu_hotplug_coming_up(unsigned int cpu)
+{
+	per_cpu(hotplug_flag, cpu) = 1;
+
+	return 0;
+}
+
+static void setup_prev_cnts(u32 cpu, u32 cnten_val)
+{
+	int i;
+
+	if (cnten_val & CC)
+		per_cpu(previous_ccnt, cpu) =
+			read_sysreg(pmccntr_el0);
+
+	for (i = 0; i < NUM_L1_CTRS; i++) {
+		if (cnten_val & (1 << i)) {
+			/* Select */
+			write_sysreg(i, pmselr_el0);
+			isb();
+			/* Read value */
+			per_cpu(previous_l1_cnts[i], cpu) =
+				read_sysreg(pmxevcntr_el0);
+		}
+	}
+}
+
+void tracectr_notifier(void *ignore, bool preempt,
+			struct task_struct *prev, struct task_struct *next)
+{
+	u32 cnten_val;
+	int current_pid;
+	u32 cpu = task_thread_info(next)->cpu;
+
+	if (tp_pid_state != 1)
+		return;
+	current_pid = next->pid;
+	if (per_cpu(old_pid, cpu) != -1) {
+		cnten_val = read_sysreg(pmcntenset_el0);
+		per_cpu(cntenset_val, cpu) = cnten_val;
+		/* Disable all the counters that were enabled */
+		write_sysreg(cnten_val, pmcntenclr_el0);
+
+		if (per_cpu(hotplug_flag, cpu) == 1) {
+			per_cpu(hotplug_flag, cpu) = 0;
+			setup_prev_cnts(cpu, cnten_val);
+		} else {
+			trace_sched_switch_with_ctrs(per_cpu(old_pid, cpu),
+						     current_pid);
+		}
+
+		/* Enable all the counters that were disabled */
+		write_sysreg(cnten_val, pmcntenset_el0);
+	}
+	per_cpu(old_pid, cpu) = current_pid;
+}
+
+static void enable_tp_pid(void)
+{
+	if (tp_pid_state == 0) {
+		tp_pid_state = 1;
+		register_trace_sched_switch(tracectr_notifier, NULL);
+	}
+}
+
+static void disable_tp_pid(void)
+{
+	if (tp_pid_state == 1) {
+		tp_pid_state = 0;
+		unregister_trace_sched_switch(tracectr_notifier, NULL);
+	}
+}
+
+static ssize_t read_enabled_perftp_file_bool(struct file *file,
+		char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char buf[2];
+
+	buf[1] = '\n';
+	if (tp_pid_state == 0)
+		buf[0] = '0';
+	else
+		buf[0] = '1';
+	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t write_enabled_perftp_file_bool(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char buf[32];
+	size_t buf_size;
+
+	buf_size = min(count, (sizeof(buf)-1));
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	switch (buf[0]) {
+	case 'y':
+	case 'Y':
+	case '1':
+		enable_tp_pid();
+		break;
+	case 'n':
+	case 'N':
+	case '0':
+		disable_tp_pid();
+		break;
+	}
+
+	return count;
+}
+
+static const struct file_operations fops_perftp = {
+	.read =		read_enabled_perftp_file_bool,
+	.write =	write_enabled_perftp_file_bool,
+	.llseek =	default_llseek,
+};
+
+int __init init_tracecounters(void)
+{
+	struct dentry *dir;
+	struct dentry *file;
+	unsigned int value = 1;
+	int cpu, rc;
+
+	dir = debugfs_create_dir("perf_debug_tp", NULL);
+	if (!dir)
+		return -ENOMEM;
+	file = debugfs_create_file("enabled", 0660, dir,
+		&value, &fops_perftp);
+	if (!file) {
+		debugfs_remove(dir);
+		return -ENOMEM;
+	}
+	for_each_possible_cpu(cpu)
+		per_cpu(old_pid, cpu) = -1;
+	rc = cpuhp_setup_state_nocalls(USE_CPUHP_STATE,
+		"tracectr_cpu_hotplug",
+		tracectr_cpu_hotplug_coming_up,
+		NULL);
+	return 0;
+}
+
+int __exit exit_tracecounters(void)
+{
+	cpuhp_remove_state_nocalls(USE_CPUHP_STATE);
+	return 0;
+}
+late_initcall(init_tracecounters);
diff --git a/arch/arm64/kernel/perf_trace_counters.h b/arch/arm64/kernel/perf_trace_counters.h
new file mode 100644
index 0000000..660f6ce
--- /dev/null
+++ b/arch/arm64/kernel/perf_trace_counters.h
@@ -0,0 +1,110 @@
+/* Copyright (c) 2013-2014,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM perf_trace_counters
+
+#if !defined(_PERF_TRACE_COUNTERS_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _PERF_TRACE_COUNTERS_H_
+
+/* Ctr index for PMCNTENSET/CLR */
+#define CC 0x80000000
+#define C0 0x1
+#define C1 0x2
+#define C2 0x4
+#define C3 0x8
+#define C4 0x10
+#define C5 0x20
+#define C_ALL (CC | C0 | C1 | C2 | C3 | C4 | C5)
+#define NUM_L1_CTRS 6
+
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/tracepoint.h>
+
+DECLARE_PER_CPU(u32, cntenset_val);
+DECLARE_PER_CPU(u32, previous_ccnt);
+DECLARE_PER_CPU(u32[NUM_L1_CTRS], previous_l1_cnts);
+TRACE_EVENT(sched_switch_with_ctrs,
+
+		TP_PROTO(pid_t prev, pid_t next),
+
+		TP_ARGS(prev, next),
+
+		TP_STRUCT__entry(
+			__field(pid_t,	old_pid)
+			__field(pid_t,	new_pid)
+			__field(u32, cctr)
+			__field(u32, ctr0)
+			__field(u32, ctr1)
+			__field(u32, ctr2)
+			__field(u32, ctr3)
+			__field(u32, ctr4)
+			__field(u32, ctr5)
+		),
+
+		TP_fast_assign(
+			u32 cpu = smp_processor_id();
+			u32 i;
+			u32 cnten_val;
+			u32 total_ccnt = 0;
+			u32 total_cnt = 0;
+			u32 delta_l1_cnts[NUM_L1_CTRS];
+
+			__entry->old_pid	= prev;
+			__entry->new_pid	= next;
+
+			cnten_val = per_cpu(cntenset_val, cpu);
+
+			if (cnten_val & CC) {
+				/* Read value */
+				total_ccnt = read_sysreg(pmccntr_el0);
+				__entry->cctr = total_ccnt -
+					per_cpu(previous_ccnt, cpu);
+				per_cpu(previous_ccnt, cpu) = total_ccnt;
+			}
+			for (i = 0; i < NUM_L1_CTRS; i++) {
+				if (cnten_val & (1 << i)) {
+					/* Select */
+					write_sysreg(i, pmselr_el0);
+					isb();
+					/* Read value */
+					total_cnt = read_sysreg(pmxevcntr_el0);
+					delta_l1_cnts[i] = total_cnt -
+					  per_cpu(previous_l1_cnts[i], cpu);
+					per_cpu(previous_l1_cnts[i], cpu) =
+						total_cnt;
+				} else
+					delta_l1_cnts[i] = 0;
+			}
+
+			__entry->ctr0 = delta_l1_cnts[0];
+			__entry->ctr1 = delta_l1_cnts[1];
+			__entry->ctr2 = delta_l1_cnts[2];
+			__entry->ctr3 = delta_l1_cnts[3];
+			__entry->ctr4 = delta_l1_cnts[4];
+			__entry->ctr5 = delta_l1_cnts[5];
+		),
+
+		TP_printk("prev_pid=%d, next_pid=%d, CCNTR: %u, CTR0: %u, CTR1: %u, CTR2: %u, CTR3: %u, CTR4: %u, CTR5: %u",
+				__entry->old_pid, __entry->new_pid,
+				__entry->cctr,
+				__entry->ctr0, __entry->ctr1,
+				__entry->ctr2, __entry->ctr3,
+				__entry->ctr4, __entry->ctr5)
+);
+
+#endif
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../arch/arm64/kernel
+#define TRACE_INCLUDE_FILE perf_trace_counters
+#include <trace/define_trace.h>
diff --git a/arch/arm64/kernel/perf_trace_user.c b/arch/arm64/kernel/perf_trace_user.c
new file mode 100644
index 0000000..5a83cc5
--- /dev/null
+++ b/arch/arm64/kernel/perf_trace_user.c
@@ -0,0 +1,96 @@
+/* Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/perf_event.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/preempt.h>
+#include <linux/stat.h>
+#include <asm/uaccess.h>
+
+#define CREATE_TRACE_POINTS
+#include "perf_trace_user.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM perf_trace_counters
+
+#define TRACE_USER_MAX_BUF_SIZE 100
+
+static ssize_t perf_trace_write(struct file *file,
+				const char __user *user_string_in,
+				size_t len, loff_t *ppos)
+{
+	u32 cnten_val;
+	int rc;
+	char buf[TRACE_USER_MAX_BUF_SIZE + 1];
+	ssize_t length;
+
+	if (len == 0)
+		return 0;
+
+	length = len > TRACE_USER_MAX_BUF_SIZE ? TRACE_USER_MAX_BUF_SIZE : len;
+
+	rc = copy_from_user(buf, user_string_in, length);
+	if (rc) {
+		pr_err("%s copy_from_user failed, rc=%d\n", __func__, rc);
+		return length;
+	}
+
+	/* Remove any trailing newline and make sure string is terminated */
+	if (buf[length - 1] == '\n')
+		buf[length - 1] = '\0';
+	else
+		buf[length] = '\0';
+
+	/*
+	 * Disable preemption to ensure that all the performance counter
+	 * accesses happen on the same cpu
+	 */
+	preempt_disable();
+	/* stop counters, call the trace function, restart them */
+
+	cnten_val = read_sysreg(pmcntenset_el0);
+	/* Disable all the counters that were enabled */
+	write_sysreg(cnten_val, pmcntenclr_el0);
+
+	trace_perf_trace_user(buf, cnten_val);
+
+	/* Enable all the counters that were disabled */
+	write_sysreg(cnten_val, pmcntenset_el0);
+	preempt_enable();
+
+	return length;
+}
+
+static const struct file_operations perf_trace_fops = {
+	.write = perf_trace_write
+};
+
+static int __init init_perf_trace(void)
+{
+	struct dentry *dir;
+	struct dentry *file;
+	unsigned int value = 1;
+
+	dir = debugfs_create_dir("msm_perf", NULL);
+	if (!dir)
+		return -ENOMEM;
+	file = debugfs_create_file("trace_marker", 0220, dir,
+		&value, &perf_trace_fops);
+	if (!file)
+		return -ENOMEM;
+
+	return 0;
+}
+
+late_initcall(init_perf_trace);
diff --git a/arch/arm64/kernel/perf_trace_user.h b/arch/arm64/kernel/perf_trace_user.h
new file mode 100644
index 0000000..d592392
--- /dev/null
+++ b/arch/arm64/kernel/perf_trace_user.h
@@ -0,0 +1,84 @@
+/* Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#if !defined(_PERF_TRACE_USER_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _PERF_TRACE_USER_H_
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM perf_trace_counters
+
+#include <linux/tracepoint.h>
+
+#define CNTENSET_CC    0x80000000
+#define NUM_L1_CTRS             6
+
+TRACE_EVENT(perf_trace_user,
+	TP_PROTO(char *string, u32 cnten_val),
+	TP_ARGS(string, cnten_val),
+
+	TP_STRUCT__entry(
+		__field(u32, cctr)
+		__field(u32, ctr0)
+		__field(u32, ctr1)
+		__field(u32, ctr2)
+		__field(u32, ctr3)
+		__field(u32, ctr4)
+		__field(u32, ctr5)
+		__string(user_string, string)
+		),
+
+	TP_fast_assign(
+		u32 cnt;
+		u32 l1_cnts[NUM_L1_CTRS];
+		int i;
+
+		if (cnten_val & CNTENSET_CC) {
+			/* Read value */
+			cnt = read_sysreg(pmccntr_el0);
+			__entry->cctr = cnt;
+		} else
+			__entry->cctr = 0;
+		for (i = 0; i < NUM_L1_CTRS; i++) {
+			if (cnten_val & (1 << i)) {
+				/* Select */
+				write_sysreg(i, pmselr_el0);
+				isb();
+				/* Read value */
+				cnt = read_sysreg(pmxevcntr_el0);
+				l1_cnts[i] = cnt;
+			} else {
+				l1_cnts[i] = 0;
+			}
+		}
+
+		__entry->ctr0 = l1_cnts[0];
+		__entry->ctr1 = l1_cnts[1];
+		__entry->ctr2 = l1_cnts[2];
+		__entry->ctr3 = l1_cnts[3];
+		__entry->ctr4 = l1_cnts[4];
+		__entry->ctr5 = l1_cnts[5];
+		__assign_str(user_string, string);
+		),
+
+		TP_printk("CCNTR: %u, CTR0: %u, CTR1: %u, CTR2: %u, CTR3: %u, CTR4: %u, CTR5: %u, MSG=%s",
+				__entry->cctr,
+				__entry->ctr0, __entry->ctr1,
+				__entry->ctr2, __entry->ctr3,
+				__entry->ctr4, __entry->ctr5,
+				__get_str(user_string)
+			)
+	);
+
+#endif
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../arch/arm64/kernel
+#define TRACE_INCLUDE_FILE perf_trace_user
+#include <trace/define_trace.h>
diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c
index 3c494e8..a511ac1 100644
--- a/arch/c6x/kernel/ptrace.c
+++ b/arch/c6x/kernel/ptrace.c
@@ -69,46 +69,6 @@
 				   0, sizeof(*regs));
 }
 
-static int gpr_set(struct task_struct *target,
-		   const struct user_regset *regset,
-		   unsigned int pos, unsigned int count,
-		   const void *kbuf, const void __user *ubuf)
-{
-	int ret;
-	struct pt_regs *regs = task_pt_regs(target);
-
-	/* Don't copyin TSR or CSR */
-	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-				 &regs,
-				 0, PT_TSR * sizeof(long));
-	if (ret)
-		return ret;
-
-	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
-					PT_TSR * sizeof(long),
-					(PT_TSR + 1) * sizeof(long));
-	if (ret)
-		return ret;
-
-	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-				 &regs,
-				 (PT_TSR + 1) * sizeof(long),
-				 PT_CSR * sizeof(long));
-	if (ret)
-		return ret;
-
-	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
-					PT_CSR * sizeof(long),
-					(PT_CSR + 1) * sizeof(long));
-	if (ret)
-		return ret;
-
-	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-				 &regs,
-				 (PT_CSR + 1) * sizeof(long), -1);
-	return ret;
-}
-
 enum c6x_regset {
 	REGSET_GPR,
 };
@@ -120,7 +80,6 @@
 		.size = sizeof(u32),
 		.align = sizeof(u32),
 		.get = gpr_get,
-		.set = gpr_set
 	},
 };
 
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
index 9207554..0dc1c8f 100644
--- a/arch/h8300/kernel/ptrace.c
+++ b/arch/h8300/kernel/ptrace.c
@@ -95,7 +95,8 @@
 	long *reg = (long *)&regs;
 
 	/* build user regs in buffer */
-	for (r = 0; r < ARRAY_SIZE(register_offset); r++)
+	BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
+	for (r = 0; r < sizeof(regs) / sizeof(long); r++)
 		*reg++ = h8300_get_reg(target, r);
 
 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@@ -113,7 +114,8 @@
 	long *reg;
 
 	/* build user regs in buffer */
-	for (reg = (long *)&regs, r = 0; r < ARRAY_SIZE(register_offset); r++)
+	BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
+	for (reg = (long *)&regs, r = 0; r < sizeof(regs) / sizeof(long); r++)
 		*reg++ = h8300_get_reg(target, r);
 
 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@@ -122,7 +124,7 @@
 		return ret;
 
 	/* write back to pt_regs */
-	for (reg = (long *)&regs, r = 0; r < ARRAY_SIZE(register_offset); r++)
+	for (reg = (long *)&regs, r = 0; r < sizeof(regs) / sizeof(long); r++)
 		h8300_put_reg(target, r, *reg++);
 	return 0;
 }
diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c
index 7563628..5e2dc7d 100644
--- a/arch/metag/kernel/ptrace.c
+++ b/arch/metag/kernel/ptrace.c
@@ -24,6 +24,16 @@
  * user_regset definitions.
  */
 
+static unsigned long user_txstatus(const struct pt_regs *regs)
+{
+	unsigned long data = (unsigned long)regs->ctx.Flags;
+
+	if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
+		data |= USER_GP_REGS_STATUS_CATCH_BIT;
+
+	return data;
+}
+
 int metag_gp_regs_copyout(const struct pt_regs *regs,
 			  unsigned int pos, unsigned int count,
 			  void *kbuf, void __user *ubuf)
@@ -62,9 +72,7 @@
 	if (ret)
 		goto out;
 	/* TXSTATUS */
-	data = (unsigned long)regs->ctx.Flags;
-	if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
-		data |= USER_GP_REGS_STATUS_CATCH_BIT;
+	data = user_txstatus(regs);
 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 				  &data, 4*25, 4*26);
 	if (ret)
@@ -119,6 +127,7 @@
 	if (ret)
 		goto out;
 	/* TXSTATUS */
+	data = user_txstatus(regs);
 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 				 &data, 4*25, 4*26);
 	if (ret)
@@ -244,6 +253,8 @@
 	unsigned long long *ptr;
 	int ret, i;
 
+	if (count < 4*13)
+		return -EINVAL;
 	/* Read the entire pipeline before making any changes */
 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 				 &rp, 0, 4*13);
@@ -303,7 +314,7 @@
 			const void *kbuf, const void __user *ubuf)
 {
 	int ret;
-	void __user *tls;
+	void __user *tls = target->thread.tls_ptr;
 
 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
 	if (ret)
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index a92994d..bf83dc1 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -485,7 +485,8 @@
 					  &target->thread.fpu,
 					  0, sizeof(elf_fpregset_t));
 
-	for (i = 0; i < NUM_FPU_REGS; i++) {
+	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+	for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
 		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 					 &fpr_val, i * sizeof(elf_fpreg_t),
 					 (i + 1) * sizeof(elf_fpreg_t));
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 7bd69bd..1d8c24d 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -45,28 +45,9 @@
 
 #define flush_kernel_dcache_range(start,size) \
 	flush_kernel_dcache_range_asm((start), (start)+(size));
-/* vmap range flushes and invalidates.  Architecturally, we don't need
- * the invalidate, because the CPU should refuse to speculate once an
- * area has been flushed, so invalidate is left empty */
-static inline void flush_kernel_vmap_range(void *vaddr, int size)
-{
-	unsigned long start = (unsigned long)vaddr;
 
-	flush_kernel_dcache_range_asm(start, start + size);
-}
-static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
-{
-	unsigned long start = (unsigned long)vaddr;
-	void *cursor = vaddr;
-
-	for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
-		struct page *page = vmalloc_to_page(cursor);
-
-		if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
-			flush_kernel_dcache_page(page);
-	}
-	flush_kernel_dcache_range_asm(start, start + size);
-}
+void flush_kernel_vmap_range(void *vaddr, int size);
+void invalidate_kernel_vmap_range(void *vaddr, int size);
 
 #define flush_cache_vmap(start, end)		flush_cache_all()
 #define flush_cache_vunmap(start, end)		flush_cache_all()
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 977f0a4f..53ec75f 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -633,3 +633,25 @@
 		__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
 	}
 }
+
+void flush_kernel_vmap_range(void *vaddr, int size)
+{
+	unsigned long start = (unsigned long)vaddr;
+
+	if ((unsigned long)size > parisc_cache_flush_threshold)
+		flush_data_cache();
+	else
+		flush_kernel_dcache_range_asm(start, start + size);
+}
+EXPORT_SYMBOL(flush_kernel_vmap_range);
+
+void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+	unsigned long start = (unsigned long)vaddr;
+
+	if ((unsigned long)size > parisc_cache_flush_threshold)
+		flush_data_cache();
+	else
+		flush_kernel_dcache_range_asm(start, start + size);
+}
+EXPORT_SYMBOL(invalidate_kernel_vmap_range);
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 4063943..e81afc37 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -139,6 +139,8 @@
 
 	printk(KERN_EMERG "System shut down completed.\n"
 	       "Please power this system off now.");
+
+	for (;;);
 }
 
 void (*pm_power_off)(void) = machine_power_off;
diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
index 861e721..f080abf 100644
--- a/arch/powerpc/boot/zImage.lds.S
+++ b/arch/powerpc/boot/zImage.lds.S
@@ -68,6 +68,7 @@
   }
 
 #ifdef CONFIG_PPC64_BOOT_WRAPPER
+  . = ALIGN(256);
   .got :
   {
     __toc_start = .;
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 72dac0b..b350ac5 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -439,9 +439,23 @@
 _GLOBAL(pnv_wakeup_tb_loss)
 	ld	r1,PACAR1(r13)
 	/*
-	 * Before entering any idle state, the NVGPRs are saved in the stack
-	 * and they are restored before switching to the process context. Hence
-	 * until they are restored, they are free to be used.
+	 * Before entering any idle state, the NVGPRs are saved in the stack.
+	 * If there was a state loss, or PACA_NAPSTATELOST was set, then the
+	 * NVGPRs are restored. If we are here, it is likely that state is lost,
+	 * but not guaranteed -- neither ISA207 nor ISA300 tests to reach
+	 * here are the same as the test to restore NVGPRS:
+	 * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
+	 * and SRR1 test for restoring NVGPRs.
+	 *
+	 * We are about to clobber NVGPRs now, so set NAPSTATELOST to
+	 * guarantee they will always be restored. This might be tightened
+	 * with careful reading of specs (particularly for ISA300) but this
+	 * is already a slow wakeup path and it's simpler to be safe.
+	 */
+	li	r0,1
+	stb	r0,PACA_NAPSTATELOST(r13)
+
+	/*
 	 *
 	 * Save SRR1 and LR in NVGPRs as they might be clobbered in
 	 * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index ac082dd..7037ca3 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -313,7 +313,7 @@
 	}
 
 	if (!ret) {
-		unsigned long y;
+		unsigned long y = regs->y;
 
 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 					 &y,
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index c2b8d24..6226cb0e 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -35,6 +35,7 @@
 };
 
 void kvm_page_track_init(struct kvm *kvm);
+void kvm_page_track_cleanup(struct kvm *kvm);
 
 void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
 				 struct kvm_memory_slot *dont);
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index b431539..85024e0 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -156,6 +156,14 @@
 	return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
 }
 
+void kvm_page_track_cleanup(struct kvm *kvm)
+{
+	struct kvm_page_track_notifier_head *head;
+
+	head = &kvm->arch.track_notifier_head;
+	cleanup_srcu_struct(&head->track_srcu);
+}
+
 void kvm_page_track_init(struct kvm *kvm)
 {
 	struct kvm_page_track_notifier_head *head;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 731044e..e5bc139 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7976,6 +7976,7 @@
 	kvm_free_vcpus(kvm);
 	kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
 	kvm_mmu_uninit_vm(kvm);
+	kvm_page_track_cleanup(kvm);
 }
 
 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 81caceb..ee54ad0 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -629,17 +629,8 @@
 {
 	struct blk_mq_timeout_data *data = priv;
 
-	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
-		/*
-		 * If a request wasn't started before the queue was
-		 * marked dying, kill it here or it'll go unnoticed.
-		 */
-		if (unlikely(blk_queue_dying(rq->q))) {
-			rq->errors = -EIO;
-			blk_mq_end_request(rq, rq->errors);
-		}
+	if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
 		return;
-	}
 
 	if (time_after_eq(jiffies, rq->deadline)) {
 		if (!blk_mark_rq_complete(rq))
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index d19b09c..54fc90e 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -245,7 +245,7 @@
 	struct alg_sock *ask = alg_sk(sk);
 	struct hash_ctx *ctx = ask->private;
 	struct ahash_request *req = &ctx->req;
-	char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))];
+	char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req)) ? : 1];
 	struct sock *sk2;
 	struct alg_sock *ask2;
 	struct hash_ctx *ctx2;
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index bf43b5d..83f1439 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -218,6 +218,7 @@
 	{ .compatible = "img,boston-lcd", .data = &boston_config },
 	{ .compatible = "mti,malta-lcd", .data = &malta_config },
 	{ .compatible = "mti,sead3-lcd", .data = &sead3_config },
+	{ /* sentinel */ }
 };
 
 /**
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 4a99ac7..9959c76 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -55,6 +55,7 @@
 struct amd768_priv {
 	void __iomem *iobase;
 	struct pci_dev *pcidev;
+	u32 pmbase;
 };
 
 static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
@@ -148,33 +149,58 @@
 	if (pmbase == 0)
 		return -EIO;
 
-	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 
-	if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET,
-				PMBASE_SIZE, DRV_NAME)) {
+	if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) {
 		dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n",
 			pmbase + 0xF0);
-		return -EBUSY;
+		err = -EBUSY;
+		goto out;
 	}
 
-	priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET,
-			PMBASE_SIZE);
+	priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
 	if (!priv->iobase) {
 		pr_err(DRV_NAME "Cannot map ioport\n");
-		return -ENOMEM;
+		err = -EINVAL;
+		goto err_iomap;
 	}
 
 	amd_rng.priv = (unsigned long)priv;
+	priv->pmbase = pmbase;
 	priv->pcidev = pdev;
 
 	pr_info(DRV_NAME " detected\n");
-	return devm_hwrng_register(&pdev->dev, &amd_rng);
+	err = hwrng_register(&amd_rng);
+	if (err) {
+		pr_err(DRV_NAME " registering failed (%d)\n", err);
+		goto err_hwrng;
+	}
+	return 0;
+
+err_hwrng:
+	ioport_unmap(priv->iobase);
+err_iomap:
+	release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+out:
+	kfree(priv);
+	return err;
 }
 
 static void __exit mod_exit(void)
 {
+	struct amd768_priv *priv;
+
+	priv = (struct amd768_priv *)amd_rng.priv;
+
+	hwrng_unregister(&amd_rng);
+
+	ioport_unmap(priv->iobase);
+
+	release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+
+	kfree(priv);
 }
 
 module_init(mod_init);
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index e7a2459..e1d421a 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -31,6 +31,9 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 
+
+#define PFX	KBUILD_MODNAME ": "
+
 #define GEODE_RNG_DATA_REG   0x50
 #define GEODE_RNG_STATUS_REG 0x54
 
@@ -82,6 +85,7 @@
 
 static int __init mod_init(void)
 {
+	int err = -ENODEV;
 	struct pci_dev *pdev = NULL;
 	const struct pci_device_id *ent;
 	void __iomem *mem;
@@ -89,27 +93,43 @@
 
 	for_each_pci_dev(pdev) {
 		ent = pci_match_id(pci_tbl, pdev);
-		if (ent) {
-			rng_base = pci_resource_start(pdev, 0);
-			if (rng_base == 0)
-				return -ENODEV;
-
-			mem = devm_ioremap(&pdev->dev, rng_base, 0x58);
-			if (!mem)
-				return -ENOMEM;
-			geode_rng.priv = (unsigned long)mem;
-
-			pr_info("AMD Geode RNG detected\n");
-			return devm_hwrng_register(&pdev->dev, &geode_rng);
-		}
+		if (ent)
+			goto found;
 	}
-
 	/* Device not found. */
-	return -ENODEV;
+	goto out;
+
+found:
+	rng_base = pci_resource_start(pdev, 0);
+	if (rng_base == 0)
+		goto out;
+	err = -ENOMEM;
+	mem = ioremap(rng_base, 0x58);
+	if (!mem)
+		goto out;
+	geode_rng.priv = (unsigned long)mem;
+
+	pr_info("AMD Geode RNG detected\n");
+	err = hwrng_register(&geode_rng);
+	if (err) {
+		pr_err(PFX "RNG registering failed (%d)\n",
+		       err);
+		goto err_unmap;
+	}
+out:
+	return err;
+
+err_unmap:
+	iounmap(mem);
+	goto out;
 }
 
 static void __exit mod_exit(void)
 {
+	void __iomem *mem = (void __iomem *)geode_rng.priv;
+
+	hwrng_unregister(&geode_rng);
+	iounmap(mem);
 }
 
 module_init(mod_init);
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index d327826..cf874a1 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -204,3 +204,23 @@
 	  SoCs. It accepts requests from other hardware subsystems via RSC.
 	  Say Y to support the clocks managed by RPMh VRM/ARC on platforms
 	  such as sdm845.
+
+config CLOCK_CPU_OSM
+	tristate "OSM CPU Clock Controller"
+	depends on COMMON_CLK_QCOM
+	help
+	 Support for the OSM clock controller.
+	 Operating State Manager (OSM) is a hardware engine used by some
+	 Qualcomm Technologies, Inc. (QTI) SoCs to manage frequency and
+	 voltage scaling in hardware. OSM is capable of controlling
+	 frequency and voltage requests for multiple clusters via the
+	 existence of multiple OSM domains.
+	 Say Y if you want to support OSM clocks.
+
+config MSM_GPUCC_SDM845
+	tristate "SDM845 Graphics Clock Controller"
+	depends on MSM_GCC_SDM845
+	help
+	  Support for the graphics clock controller on Qualcomm Technologies, Inc.
+	  sdm845 devices.
+	  Say Y if you want to support graphics controller devices.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index ebcf4fc..836f0c7 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -16,23 +16,25 @@
 # Keep alphabetically sorted by config
 obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
 obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
+obj-$(CONFIG_CLOCK_CPU_OSM) += clk-cpu-osm.o
+obj-$(CONFIG_CLOCK_QPNP_DIV) += clk-qpnp-div.o
 obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
 obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
 obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
 obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
 obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
 obj-$(CONFIG_MSM_CAMCC_SDM845) += camcc-sdm845.o
+obj-$(CONFIG_MSM_CLK_RPMH) += clk-rpmh.o
+obj-$(CONFIG_MSM_DISPCC_SDM845) += dispcc-sdm845.o
 obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
 obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
 obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
 obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
 obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
 obj-$(CONFIG_MSM_GCC_SDM845) += gcc-sdm845.o
-obj-$(CONFIG_MSM_DISPCC_SDM845) += dispcc-sdm845.o
-obj-$(CONFIG_MSM_VIDEOCC_SDM845) += videocc-sdm845.o
+obj-$(CONFIG_MSM_GPUCC_SDM845) += gpucc-sdm845.o
 obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
 obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
-obj-$(CONFIG_CLOCK_QPNP_DIV) += clk-qpnp-div.o
-obj-$(CONFIG_MSM_CLK_RPMH) += clk-rpmh.o
+obj-$(CONFIG_MSM_VIDEOCC_SDM845) += videocc-sdm845.o
\ No newline at end of file
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
new file mode 100644
index 0000000..d5e2be6
--- /dev/null
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -0,0 +1,2619 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/cpu.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/driver.h>
+#include <linux/regmap.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <soc/qcom/scm.h>
+#include <dt-bindings/clock/qcom,cpucc-sdm845.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-rcg.h"
+#include "clk-voter.h"
+
+#define OSM_TABLE_SIZE			40
+#define SINGLE_CORE			1
+#define MAX_CLUSTER_CNT			3
+#define MAX_MEM_ACC_VAL_PER_LEVEL	3
+#define MAX_CORE_COUNT			4
+#define CORE_COUNT_VAL(val)		((val & GENMASK(18, 16)) >> 16)
+
+#define OSM_CYCLE_COUNTER_CTRL_REG		0x760
+#define OSM_CYCLE_COUNTER_USE_XO_EDGE_EN	BIT(8)
+
+#define OSM_REG_SIZE			32
+
+#define L3_EFUSE_SHIFT			0
+#define L3_EFUSE_MASK			0
+#define PWRCL_EFUSE_SHIFT		0
+#define PWRCL_EFUSE_MASK		0
+#define PERFCL_EFUSE_SHIFT		29
+#define PERFCL_EFUSE_MASK		0x7
+
+#define ENABLE_REG			0x0
+#define FREQ_REG			0x110
+#define VOLT_REG			0x114
+#define OVERRIDE_REG			0x118
+#define SPM_CC_INC_HYSTERESIS		0x1c
+#define SPM_CC_DEC_HYSTERESIS		0x20
+#define SPM_CORE_INACTIVE_MAPPING	0x28
+#define CC_ZERO_BEHAV_CTRL		0xc
+#define ENABLE_OVERRIDE			BIT(0)
+#define SPM_CC_DCVS_DISABLE		0x24
+#define LLM_FREQ_VOTE_INC_HYSTERESIS	0x30
+#define LLM_FREQ_VOTE_DEC_HYSTERESIS	0x34
+#define LLM_INTF_DCVS_DISABLE		0x40
+#define LLM_VOLTAGE_VOTE_INC_HYSTERESIS	0x38
+#define LLM_VOLTAGE_VOTE_DEC_HYSTERESIS	0x3c
+#define VMIN_REDUCTION_ENABLE_REG	0x48
+#define VMIN_REDUCTION_TIMER_REG	0x4c
+#define PDN_FSM_CTRL_REG		0x54
+#define DELTA_DEX_VAL			BVAL(31, 23, 0xa)
+#define IGNORE_PLL_LOCK			BIT(15)
+#define CC_BOOST_FSM_EN			BIT(0)
+#define CC_BOOST_FSM_TIMERS_REG0	0x58
+#define CC_BOOST_FSM_TIMERS_REG1	0x5c
+#define CC_BOOST_FSM_TIMERS_REG2	0x60
+#define DCVS_BOOST_FSM_EN_MASK		BIT(2)
+#define DCVS_BOOST_FSM_TIMERS_REG0	0x64
+#define DCVS_BOOST_FSM_TIMERS_REG1	0x68
+#define DCVS_BOOST_FSM_TIMERS_REG2	0x6c
+#define PS_BOOST_FSM_EN_MASK		BIT(1)
+#define PS_BOOST_FSM_TIMERS_REG0	0x74
+#define PS_BOOST_FSM_TIMERS_REG1	0x78
+#define PS_BOOST_FSM_TIMERS_REG2	0x7c
+#define BOOST_PROG_SYNC_DELAY_REG	0x80
+#define DCVS_DROOP_FSM_EN_MASK		BIT(5)
+#define DROOP_PROG_SYNC_DELAY_REG	0x9c
+#define DROOP_RELEASE_TIMER_CTRL	0x88
+#define DROOP_CTRL_REG			0x84
+#define DCVS_DROOP_TIMER_CTRL		0x98
+#define PLL_SW_OVERRIDE_ENABLE		0xa0
+#define PLL_SW_OVERRIDE_DROOP_EN	BIT(0)
+#define SPM_CORE_COUNT_CTRL		0x2c
+#define CORE_DCVS_CTRL			0xbc
+#define OVERRIDE_CLUSTER_IDLE_ACK	0x800
+#define REQ_GEN_FSM_STATUS		0x70c
+
+#define PLL_MIN_LVAL			0x21
+#define PLL_MIN_FREQ_REG		0x94
+#define PLL_POST_DIV1			0x1F
+#define PLL_POST_DIV2			0x11F
+#define PLL_MODE			0x0
+#define PLL_L_VAL			0x4
+#define PLL_USER_CTRL			0xc
+#define PLL_CONFIG_CTL_LO		0x10
+#define PLL_CONFIG_CTL_HI		0x14
+#define MIN_VCO_VAL			0x2b
+
+#define MAX_VC				63
+#define MAX_MEM_ACC_LEVELS		3
+#define MAX_MEM_ACC_VAL_PER_LEVEL	3
+#define MAX_MEM_ACC_VALUES		(MAX_MEM_ACC_LEVELS * \
+					MAX_MEM_ACC_VAL_PER_LEVEL)
+#define MEM_ACC_ADDRS			3
+
+#define ISENSE_ON_DATA			0xf
+#define ISENSE_OFF_DATA			0x0
+#define CONSTANT_32			0x20
+
+#define APM_MX_MODE			0x0
+#define APM_APC_MODE			0x2
+#define APM_READ_DATA_MASK		0xc
+#define APM_MX_MODE_VAL			0x4
+#define APM_APC_READ_VAL		0x8
+#define APM_MX_READ_VAL			0x4
+#define APM_CROSSOVER_VC		0xb0
+
+#define MEM_ACC_SEQ_CONST(n)		(n)
+#define MEM_ACC_APM_READ_MASK		0xff
+#define MEMACC_CROSSOVER_VC		0xb8
+
+#define PLL_WAIT_LOCK_TIME_US		10
+#define PLL_WAIT_LOCK_TIME_NS		(PLL_WAIT_LOCK_TIME_US * 1000)
+#define SAFE_FREQ_WAIT_NS		5000
+#define DEXT_DECREMENT_WAIT_NS		1000
+
+#define DATA_MEM(n)			(0x400 + (n) * 4)
+
+#define DCVS_PERF_STATE_DESIRED_REG_0	0x780
+#define DCVS_PERF_STATE_DESIRED_REG(n) (DCVS_PERF_STATE_DESIRED_REG_0 + \
+					(4 * n))
+#define OSM_CYCLE_COUNTER_STATUS_REG_0	0x7d0
+#define OSM_CYCLE_COUNTER_STATUS_REG(n)	(OSM_CYCLE_COUNTER_STATUS_REG_0 + \
+					(4 * n))
+
+static const struct regmap_config osm_qcom_regmap_config = {
+	.reg_bits       = 32,
+	.reg_stride     = 4,
+	.val_bits       = 32,
+	.fast_io	= true,
+};
+
+enum clk_osm_bases {
+	OSM_BASE,
+	PLL_BASE,
+	EFUSE_BASE,
+	SEQ_BASE,
+	NUM_BASES,
+};
+
+enum clk_osm_lut_data {
+	FREQ,
+	FREQ_DATA,
+	PLL_OVERRIDES,
+	MEM_ACC_LEVEL,
+	VIRTUAL_CORNER,
+	NUM_FIELDS,
+};
+
+struct osm_entry {
+	u16 virtual_corner;
+	u16 open_loop_volt;
+	u32 freq_data;
+	u32 override_data;
+	u32 mem_acc_level;
+	long frequency;
+};
+
+struct clk_osm {
+	struct clk_hw hw;
+	struct osm_entry osm_table[OSM_TABLE_SIZE];
+	struct dentry *debugfs;
+	struct regulator *vdd_reg;
+	struct platform_device *vdd_dev;
+	void *vbases[NUM_BASES];
+	unsigned long pbases[NUM_BASES];
+	spinlock_t lock;
+
+	u32 cpu_reg_mask;
+	u32 num_entries;
+	u32 cluster_num;
+	u32 core_num;
+	u32 apm_crossover_vc;
+	u32 apm_threshold_vc;
+	u32 mem_acc_crossover_vc;
+	u32 mem_acc_threshold_vc;
+	u32 min_cpr_vc;
+	u32 cycle_counter_reads;
+	u32 cycle_counter_delay;
+	u32 cycle_counter_factor;
+	u64 total_cycle_counter;
+	u32 prev_cycle_counter;
+	u32 l_val_base;
+	u32 apcs_pll_user_ctl;
+	u32 apcs_pll_min_freq;
+	u32 cfg_gfmux_addr;
+	u32 apcs_cbc_addr;
+	u32 speedbin;
+	u32 mem_acc_crossover_vc_addr;
+	u32 mem_acc_addr[MEM_ACC_ADDRS];
+	u32 ramp_ctl_addr;
+	u32 apm_mode_ctl;
+	u32 apm_status_ctl;
+	u32 osm_clk_rate;
+	u32 xo_clk_rate;
+	bool secure_init;
+	bool red_fsm_en;
+	bool boost_fsm_en;
+	bool safe_fsm_en;
+	bool ps_fsm_en;
+	bool droop_fsm_en;
+
+	struct notifier_block panic_notifier;
+	u32 trace_periodic_timer;
+	bool trace_en;
+	bool wdog_trace_en;
+};
+
+static struct regulator *vdd_l3;
+static struct regulator *vdd_pwrcl;
+static struct regulator *vdd_perfcl;
+
+static inline struct clk_osm *to_clk_osm(struct clk_hw *_hw)
+{
+	return container_of(_hw, struct clk_osm, hw);
+}
+
+static inline void clk_osm_masked_write_reg(struct clk_osm *c, u32 val,
+					    u32 offset, u32 mask)
+{
+	u32 val2, orig_val;
+
+	val2 = orig_val = readl_relaxed((char *)c->vbases[OSM_BASE] + offset);
+	val2 &= ~mask;
+	val2 |= val & mask;
+
+	if (val2 != orig_val)
+		writel_relaxed(val2, (char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline void clk_osm_write_seq_reg(struct clk_osm *c, u32 val, u32 offset)
+{
+	writel_relaxed(val, (char *)c->vbases[SEQ_BASE] + offset);
+}
+
+static inline void clk_osm_write_reg(struct clk_osm *c, u32 val, u32 offset)
+{
+	writel_relaxed(val, (char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline int clk_osm_read_reg(struct clk_osm *c, u32 offset)
+{
+	return readl_relaxed((char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline int clk_osm_read_reg_no_log(struct clk_osm *c, u32 offset)
+{
+	return readl_relaxed_no_log((char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline int clk_osm_mb(struct clk_osm *c, int base)
+{
+	return readl_relaxed_no_log((char *)c->vbases[base] + ENABLE_REG);
+}
+
+static long clk_osm_list_rate(struct clk_hw *hw, unsigned int n,
+					unsigned long rate_max)
+{
+	if (n >= hw->init->num_rate_max)
+		return -ENXIO;
+	return hw->init->rate_max[n];
+}
+
+static inline bool is_better_rate(unsigned long req, unsigned long best,
+			unsigned long new)
+{
+	if (IS_ERR_VALUE(new))
+		return false;
+
+	return (req <= new && new < best) || (best < req && best < new);
+}
+
+static long clk_osm_round_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long *parent_rate)
+{
+	int i;
+	unsigned long rrate = 0;
+
+	/*
+	 * If the rate passed in is 0, return the first frequency in the
+	 * FMAX table.
+	 */
+	if (!rate)
+		return hw->init->rate_max[0];
+
+	for (i = 0; i < hw->init->num_rate_max; i++) {
+		if (is_better_rate(rate, rrate, hw->init->rate_max[i])) {
+			rrate = hw->init->rate_max[i];
+			if (rate == rrate)
+				break;
+		}
+	}
+
+	pr_debug("%s: rate %lu, rrate %ld, Rate max %ld\n", __func__, rate,
+						rrate, hw->init->rate_max[i]);
+
+	return rrate;
+}
+
+static int clk_osm_search_table(struct osm_entry *table, int entries, long rate)
+{
+	int quad_core_index, single_core_index = 0;
+	int core_count;
+
+	for (quad_core_index = 0; quad_core_index < entries;
+						quad_core_index++) {
+		core_count = CORE_COUNT_VAL(table[quad_core_index].freq_data);
+		if (rate == table[quad_core_index].frequency &&
+					core_count == SINGLE_CORE) {
+			single_core_index = quad_core_index;
+			continue;
+		}
+		if (rate == table[quad_core_index].frequency &&
+					core_count == MAX_CORE_COUNT)
+			return quad_core_index;
+	}
+	if (single_core_index)
+		return single_core_index;
+
+	return -EINVAL;
+}
+
+static int clk_osm_enable(struct clk_hw *hw)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+
+	clk_osm_write_reg(cpuclk, 1, ENABLE_REG);
+
+	/* Make sure the write goes through before proceeding */
+	clk_osm_mb(cpuclk, OSM_BASE);
+
+	/* Wait for 5us for OSM hardware to enable */
+	udelay(5);
+
+	pr_debug("OSM clk enabled for cluster=%d\n", cpuclk->cluster_num);
+
+	return 0;
+}
+
+const struct clk_ops clk_ops_cpu_osm = {
+	.enable = clk_osm_enable,
+	.round_rate = clk_osm_round_rate,
+	.list_rate = clk_osm_list_rate,
+};
+
+static struct clk_ops clk_ops_core;
+
+static int cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+				    unsigned long parent_rate)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+	struct clk_hw *p_hw = clk_hw_get_parent(hw);
+	struct clk_osm *parent = to_clk_osm(p_hw);
+	int index = 0;
+	unsigned long r_rate;
+
+	if (!cpuclk || !parent)
+		return -EINVAL;
+
+	r_rate = clk_osm_round_rate(p_hw, rate, NULL);
+
+	if (rate != r_rate) {
+		pr_err("invalid requested rate=%ld\n", rate);
+		return -EINVAL;
+	}
+
+	/* Convert rate to table index */
+	index = clk_osm_search_table(parent->osm_table,
+				     parent->num_entries, r_rate);
+	if (index < 0) {
+		pr_err("cannot set %s to %lu\n", clk_hw_get_name(hw), rate);
+		return -EINVAL;
+	}
+	pr_debug("rate: %lu --> index %d\n", rate, index);
+	/*
+	 * Choose index and send request to OSM hardware.
+	 * TODO: Program INACTIVE_OS_REQUEST if needed.
+	 */
+	clk_osm_write_reg(parent, index,
+			DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num));
+
+	/* Make sure the write goes through before proceeding */
+	clk_osm_mb(parent, OSM_BASE);
+
+	return 0;
+}
+
+static int l3_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+				    unsigned long parent_rate)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+	int index = 0;
+	unsigned long r_rate;
+
+	if (!cpuclk)
+		return -EINVAL;
+
+	r_rate = clk_osm_round_rate(hw, rate, NULL);
+
+	if (rate != r_rate) {
+		pr_err("invalid requested rate=%ld\n", rate);
+		return -EINVAL;
+	}
+
+	/* Convert rate to table index */
+	index = clk_osm_search_table(cpuclk->osm_table,
+				     cpuclk->num_entries, r_rate);
+	if (index < 0) {
+		pr_err("cannot set %s to %lu\n", clk_hw_get_name(hw), rate);
+		return -EINVAL;
+	}
+	pr_debug("rate: %lu --> index %d\n", rate, index);
+
+	clk_osm_write_reg(cpuclk, index, DCVS_PERF_STATE_DESIRED_REG_0);
+
+	/* Make sure the write goes through before proceeding */
+	clk_osm_mb(cpuclk, OSM_BASE);
+
+	return 0;
+}
+
+static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+	struct clk_hw *p_hw = clk_hw_get_parent(hw);
+	struct clk_osm *parent = to_clk_osm(p_hw);
+	int index = 0;
+
+	if (!cpuclk || !parent)
+		return -EINVAL;
+
+	index = clk_osm_read_reg(parent,
+			DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num));
+
+	pr_debug("%s: Index %d, freq %ld\n", __func__, index,
+				parent->osm_table[index].frequency);
+
+	/* Convert index to frequency */
+	return parent->osm_table[index].frequency;
+}
+
+static unsigned long l3_clk_recalc_rate(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+	int index = 0;
+
+	if (!cpuclk)
+		return -EINVAL;
+
+	index = clk_osm_read_reg(cpuclk, DCVS_PERF_STATE_DESIRED_REG_0);
+
+	pr_debug("%s: Index %d, freq %ld\n", __func__, index,
+				cpuclk->osm_table[index].frequency);
+
+	/* Convert index to frequency */
+	return cpuclk->osm_table[index].frequency;
+}
+
+
+const struct clk_ops clk_ops_l3_osm = {
+	.enable = clk_osm_enable,
+	.round_rate = clk_osm_round_rate,
+	.list_rate = clk_osm_list_rate,
+	.recalc_rate = l3_clk_recalc_rate,
+	.set_rate = l3_clk_set_rate,
+};
+
+enum {
+	P_XO,
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+	{ P_XO, 0 },
+};
+
+static const char * const gcc_parent_names_1[] = {
+	"xo",
+};
+
+static struct clk_init_data osm_clks_init[] = {
+	[0] = {
+		.name = "l3_clk",
+		.parent_names = (const char *[]){ "bi_tcxo" },
+		.num_parents = 1,
+		.ops = &clk_ops_l3_osm,
+	},
+	[1] = {
+		.name = "pwrcl_clk",
+		.parent_names = (const char *[]){ "bi_tcxo" },
+		.num_parents = 1,
+		.ops = &clk_ops_cpu_osm,
+	},
+	[2] = {
+		.name = "perfcl_clk",
+		.parent_names = (const char *[]){ "bi_tcxo" },
+		.num_parents = 1,
+		.ops = &clk_ops_cpu_osm,
+	},
+};
+
+static struct clk_osm l3_clk = {
+	.cluster_num = 0,
+	.cpu_reg_mask = 0x0,
+	.hw.init = &osm_clks_init[0],
+};
+
+static DEFINE_CLK_VOTER(l3_cluster0_vote_clk, l3_clk, 0);
+static DEFINE_CLK_VOTER(l3_cluster1_vote_clk, l3_clk, 0);
+
+static struct clk_osm pwrcl_clk = {
+	.cluster_num = 1,
+	.cpu_reg_mask = 0x300,
+	.hw.init = &osm_clks_init[1],
+};
+
+static struct clk_osm cpu0_pwrcl_clk = {
+	.core_num = 0,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu0_pwrcl_clk",
+		.parent_names = (const char *[]){ "pwrcl_clk" },
+		.num_parents = 1,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu1_pwrcl_clk = {
+	.core_num = 1,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu1_pwrcl_clk",
+		.parent_names = (const char *[]){ "pwrcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu2_pwrcl_clk = {
+	.core_num = 2,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu2_pwrcl_clk",
+		.parent_names = (const char *[]){ "pwrcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu3_pwrcl_clk = {
+	.core_num = 3,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu3_pwrcl_clk",
+		.parent_names = (const char *[]){ "pwrcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm perfcl_clk = {
+	.cluster_num = 2,
+	.cpu_reg_mask = 0x700,
+	.hw.init = &osm_clks_init[2],
+};
+
+
+static struct clk_osm cpu4_perfcl_clk = {
+	.core_num = 0,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu4_perfcl_clk",
+		.parent_names = (const char *[]){ "perfcl_clk" },
+		.num_parents = 1,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu5_perfcl_clk = {
+	.core_num = 1,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu5_perfcl_clk",
+		.parent_names = (const char *[]){ "perfcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu6_perfcl_clk = {
+	.core_num = 2,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu6_perfcl_clk",
+		.parent_names = (const char *[]){ "perfcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu7_perfcl_clk = {
+	.core_num = 3,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu7_perfcl_clk",
+		.parent_names = (const char *[]){ "perfcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+/*
+ * Use the cpu* clocks only for writing to the PERF_STATE_DESIRED registers.
+ * Note that we are currently NOT programming the APSS_LMH_GFMUX_CFG &
+ * APSS_OSM_GFMUX_CFG registers.
+ */
+
+static struct clk_hw *osm_qcom_clk_hws[] = {
+	[L3_CLK] = &l3_clk.hw,
+	[L3_CLUSTER0_VOTE_CLK] = &l3_cluster0_vote_clk.hw,
+	[L3_CLUSTER1_VOTE_CLK] = &l3_cluster1_vote_clk.hw,
+	[PWRCL_CLK] = &pwrcl_clk.hw,
+	[CPU0_PWRCL_CLK] = &cpu0_pwrcl_clk.hw,
+	[CPU1_PWRCL_CLK] = &cpu1_pwrcl_clk.hw,
+	[CPU2_PWRCL_CLK] = &cpu2_pwrcl_clk.hw,
+	[CPU3_PWRCL_CLK] = &cpu3_pwrcl_clk.hw,
+	[PERFCL_CLK] = &perfcl_clk.hw,
+	[CPU4_PERFCL_CLK] = &cpu4_perfcl_clk.hw,
+	[CPU5_PERFCL_CLK] = &cpu5_perfcl_clk.hw,
+	[CPU6_PERFCL_CLK] = &cpu6_perfcl_clk.hw,
+	[CPU7_PERFCL_CLK] = &cpu7_perfcl_clk.hw,
+};
+
+static struct clk_osm *logical_cpu_to_clk(int cpu)
+{
+	struct device_node *cpu_node;
+	const u32 *cell;
+	u64 hwid;
+	static struct clk_osm *cpu_clk_map[NR_CPUS];
+
+	if (cpu_clk_map[cpu])
+		return cpu_clk_map[cpu];
+
+	cpu_node = of_get_cpu_node(cpu, NULL);
+	if (!cpu_node)
+		goto fail;
+
+	cell = of_get_property(cpu_node, "reg", NULL);
+	if (!cell) {
+		pr_err("%s: missing reg property\n", cpu_node->full_name);
+		goto fail;
+	}
+
+	hwid = of_read_number(cell, of_n_addr_cells(cpu_node));
+	if ((hwid | pwrcl_clk.cpu_reg_mask) == pwrcl_clk.cpu_reg_mask) {
+		switch (cpu) {
+		case 0:
+			cpu_clk_map[cpu] = &cpu0_pwrcl_clk;
+			break;
+		case 1:
+			cpu_clk_map[cpu] = &cpu1_pwrcl_clk;
+			break;
+		case 2:
+			cpu_clk_map[cpu] = &cpu2_pwrcl_clk;
+			break;
+		case 3:
+			cpu_clk_map[cpu] = &cpu3_pwrcl_clk;
+			break;
+		default:
+			pr_err("unsupported CPU number for power cluster\n");
+			return NULL;
+		}
+		return cpu_clk_map[cpu];
+	}
+
+	if ((hwid | perfcl_clk.cpu_reg_mask) == perfcl_clk.cpu_reg_mask) {
+		switch (cpu) {
+		case 4:
+			cpu_clk_map[cpu] = &cpu4_perfcl_clk;
+			break;
+		case 5:
+			cpu_clk_map[cpu] = &cpu5_perfcl_clk;
+			break;
+		case 6:
+			cpu_clk_map[cpu] = &cpu6_perfcl_clk;
+			break;
+		case 7:
+			cpu_clk_map[cpu] = &cpu7_perfcl_clk;
+			break;
+		default:
+			pr_err("unsupported CPU number for perf cluster\n");
+			return NULL;
+		}
+		return cpu_clk_map[cpu];
+	}
+
+fail:
+	return NULL;
+}
+
+static inline int clk_osm_count_ns(struct clk_osm *c, u64 nsec)
+{
+	u64 temp;
+
+	temp = (u64)c->osm_clk_rate * nsec;
+	do_div(temp, 1000000000);
+
+	return temp;
+}
+
+static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
+{
+	int curr_level, i, j = 0;
+	int mem_acc_level_map[MAX_MEM_ACC_LEVELS] = {MAX_VC, MAX_VC, MAX_VC};
+
+	curr_level = c->osm_table[0].mem_acc_level;
+	for (i = 0; i < c->num_entries; i++) {
+		if (curr_level == MAX_MEM_ACC_LEVELS)
+			break;
+
+		if (c->osm_table[i].mem_acc_level != curr_level) {
+			mem_acc_level_map[j++] =
+				c->osm_table[i].virtual_corner;
+			curr_level = c->osm_table[i].mem_acc_level;
+		}
+	}
+
+	if (c->secure_init) {
+		clk_osm_write_seq_reg(c,
+				c->pbases[OSM_BASE] + MEMACC_CROSSOVER_VC,
+				DATA_MEM(57));
+		clk_osm_write_seq_reg(c, c->mem_acc_addr[0], DATA_MEM(48));
+		clk_osm_write_seq_reg(c, c->mem_acc_addr[1], DATA_MEM(49));
+		clk_osm_write_seq_reg(c, c->mem_acc_addr[2], DATA_MEM(50));
+		clk_osm_write_seq_reg(c, c->mem_acc_crossover_vc,
+							DATA_MEM(78));
+		clk_osm_write_seq_reg(c, mem_acc_level_map[0], DATA_MEM(79));
+		if (c == &perfcl_clk)
+			clk_osm_write_seq_reg(c, c->mem_acc_threshold_vc,
+								DATA_MEM(80));
+		else
+			clk_osm_write_seq_reg(c, mem_acc_level_map[1],
+								DATA_MEM(80));
+		/*
+		 * Note that DATA_MEM[81] -> DATA_MEM[89] values will be
+		 * confirmed post-si. Use a value of 1 for DATA_MEM[89] and
+		 * leave the rest of them as 0.
+		 */
+		clk_osm_write_seq_reg(c, 1, DATA_MEM(89));
+	} else {
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(78),
+						c->mem_acc_crossover_vc);
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(79),
+						mem_acc_level_map[0]);
+		if (c == &perfcl_clk)
+			scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(80),
+						c->mem_acc_threshold_vc);
+		else
+			scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(80),
+						mem_acc_level_map[1]);
+	}
+}
+
+static void clk_osm_program_apm_regs(struct clk_osm *c)
+{
+	if (c == &l3_clk || c == &pwrcl_clk)
+		return;
+
+	/*
+	 * Program address of the control register used to configure
+	 * the Array Power Mux controller
+	 */
+	clk_osm_write_seq_reg(c, c->apm_mode_ctl, DATA_MEM(41));
+
+	/* Program address of controller status register */
+	clk_osm_write_seq_reg(c, c->apm_status_ctl, DATA_MEM(43));
+
+	/* Program address of crossover register */
+	clk_osm_write_seq_reg(c, c->pbases[OSM_BASE] + APM_CROSSOVER_VC,
+						DATA_MEM(44));
+
+	/* Program mode value to switch APM to VDD_APC */
+	clk_osm_write_seq_reg(c, APM_APC_MODE, DATA_MEM(72));
+
+	/* Program mode value to switch APM to VDD_MX */
+	clk_osm_write_seq_reg(c, APM_MX_MODE, DATA_MEM(73));
+
+	/* Program mask used to move into read_mask port */
+	clk_osm_write_seq_reg(c, APM_READ_DATA_MASK, DATA_MEM(74));
+
+	/* Value used to move into read_exp port */
+	clk_osm_write_seq_reg(c, APM_APC_READ_VAL, DATA_MEM(75));
+	clk_osm_write_seq_reg(c, APM_MX_READ_VAL, DATA_MEM(76));
+}
+
+static void clk_osm_do_additional_setup(struct clk_osm *c,
+					struct platform_device *pdev)
+{
+	if (!c->secure_init)
+		return;
+
+	dev_info(&pdev->dev, "Performing additional OSM setup due to lack of TZ for cluster=%d\n",
+						 c->cluster_num);
+
+	/* PLL L_VAL & post-div programming */
+	clk_osm_write_seq_reg(c, c->apcs_pll_min_freq, DATA_MEM(32));
+	clk_osm_write_seq_reg(c, c->l_val_base, DATA_MEM(33));
+	clk_osm_write_seq_reg(c, c->apcs_pll_user_ctl, DATA_MEM(34));
+	clk_osm_write_seq_reg(c, PLL_POST_DIV1, DATA_MEM(35));
+	clk_osm_write_seq_reg(c, PLL_POST_DIV2, DATA_MEM(36));
+
+	/* APM Programming */
+	clk_osm_program_apm_regs(c);
+
+	/* GFMUX Programming */
+	clk_osm_write_seq_reg(c, c->cfg_gfmux_addr, DATA_MEM(37));
+	clk_osm_write_seq_reg(c, 0x1, DATA_MEM(65));
+	clk_osm_write_seq_reg(c, 0x2, DATA_MEM(66));
+	clk_osm_write_seq_reg(c, 0x3, DATA_MEM(67));
+	clk_osm_write_seq_reg(c, 0x40000000, DATA_MEM(68));
+	clk_osm_write_seq_reg(c, 0x20000000, DATA_MEM(69));
+	clk_osm_write_seq_reg(c, 0x10000000, DATA_MEM(70));
+	clk_osm_write_seq_reg(c, 0x70000000, DATA_MEM(71));
+
+	/* Override programming */
+	clk_osm_write_seq_reg(c, c->pbases[OSM_BASE] +
+			OVERRIDE_CLUSTER_IDLE_ACK, DATA_MEM(54));
+	clk_osm_write_seq_reg(c, 0x3, DATA_MEM(55));
+	clk_osm_write_seq_reg(c, c->pbases[OSM_BASE] + PDN_FSM_CTRL_REG,
+					DATA_MEM(40));
+	clk_osm_write_seq_reg(c, c->pbases[OSM_BASE] + REQ_GEN_FSM_STATUS,
+					DATA_MEM(60));
+	clk_osm_write_seq_reg(c, 0x10, DATA_MEM(61));
+	clk_osm_write_seq_reg(c, 0x70, DATA_MEM(62));
+	clk_osm_write_seq_reg(c, c->apcs_cbc_addr, DATA_MEM(112));
+	clk_osm_write_seq_reg(c, 0x2, DATA_MEM(113));
+
+	if (c == &perfcl_clk) {
+		int rc;
+		u32 isense_addr;
+
+		/* Performance cluster isense programming */
+		rc = of_property_read_u32(pdev->dev.of_node,
+				"qcom,perfcl-isense-addr", &isense_addr);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,perfcl-isense-addr property, rc=%d\n",
+				rc);
+			return;
+		}
+		clk_osm_write_seq_reg(c, isense_addr, DATA_MEM(45));
+		clk_osm_write_seq_reg(c, ISENSE_ON_DATA, DATA_MEM(46));
+		clk_osm_write_seq_reg(c, ISENSE_OFF_DATA, DATA_MEM(47));
+	}
+
+	clk_osm_write_seq_reg(c, c->ramp_ctl_addr, DATA_MEM(105));
+	clk_osm_write_seq_reg(c, CONSTANT_32, DATA_MEM(92));
+
+	/* Enable/disable CPR ramp settings */
+	clk_osm_write_seq_reg(c, 0x101C031, DATA_MEM(106));
+	clk_osm_write_seq_reg(c, 0x1010031, DATA_MEM(107));
+}
+
+static void clk_osm_setup_fsms(struct clk_osm *c)
+{
+	u32 val;
+
+	/* Voltage Reduction FSM */
+	if (c->red_fsm_en) {
+		val = clk_osm_read_reg(c, VMIN_REDUCTION_ENABLE_REG) | BIT(0);
+		val |= BVAL(6, 1, c->min_cpr_vc);
+		clk_osm_write_reg(c, val, VMIN_REDUCTION_ENABLE_REG);
+
+		clk_osm_write_reg(c, clk_osm_count_ns(c, 10000),
+				  VMIN_REDUCTION_TIMER_REG);
+	}
+
+	/* Boost FSM */
+	if (c->boost_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		val |= DELTA_DEX_VAL | CC_BOOST_FSM_EN | IGNORE_PLL_LOCK;
+		clk_osm_write_reg(c, val, PDN_FSM_CTRL_REG);
+
+		val = clk_osm_read_reg(c, CC_BOOST_FSM_TIMERS_REG0);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+		clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG0);
+
+		val = clk_osm_read_reg(c, CC_BOOST_FSM_TIMERS_REG1);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG1);
+
+		val = clk_osm_read_reg(c, CC_BOOST_FSM_TIMERS_REG2);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
+		clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG2);
+	}
+
+	/* Safe Freq FSM */
+	if (c->safe_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		clk_osm_write_reg(c, val | DCVS_BOOST_FSM_EN_MASK,
+				  PDN_FSM_CTRL_REG);
+
+		val = clk_osm_read_reg(c, DCVS_BOOST_FSM_TIMERS_REG0);
+		val |= BVAL(31, 16, clk_osm_count_ns(c, 1000));
+		clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG0);
+
+		val = clk_osm_read_reg(c, DCVS_BOOST_FSM_TIMERS_REG1);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+		clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG1);
+
+		val = clk_osm_read_reg(c, DCVS_BOOST_FSM_TIMERS_REG2);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
+		clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG2);
+
+	}
+
+	/* Pulse Swallowing FSM */
+	if (c->ps_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		clk_osm_write_reg(c, val | PS_BOOST_FSM_EN_MASK,
+							PDN_FSM_CTRL_REG);
+
+		val = clk_osm_read_reg(c, PS_BOOST_FSM_TIMERS_REG0);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, 1000));
+		clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG0);
+
+		val = clk_osm_read_reg(c, PS_BOOST_FSM_TIMERS_REG1);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, 1000));
+		clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG1);
+
+		val = clk_osm_read_reg(c, PS_BOOST_FSM_TIMERS_REG2);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
+		clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG2);
+	}
+
+	/* PLL signal timing control */
+	if (c->boost_fsm_en || c->safe_fsm_en || c->ps_fsm_en)
+		clk_osm_write_reg(c, 0x2, BOOST_PROG_SYNC_DELAY_REG);
+
+	/* DCVS droop FSM - only if RCGwRC is not used for di/dt control */
+	if (c->droop_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		clk_osm_write_reg(c, val | DCVS_DROOP_FSM_EN_MASK,
+				  PDN_FSM_CTRL_REG);
+	}
+
+	if (c->ps_fsm_en || c->droop_fsm_en) {
+		clk_osm_write_reg(c, 0x1, DROOP_PROG_SYNC_DELAY_REG);
+		clk_osm_write_reg(c, clk_osm_count_ns(c, 100),
+				  DROOP_RELEASE_TIMER_CTRL);
+		clk_osm_write_reg(c, clk_osm_count_ns(c, 150),
+				  DCVS_DROOP_TIMER_CTRL);
+		/*
+		 * TODO: Check if DCVS_DROOP_CODE used is correct. Also check
+		 * if RESYNC_CTRL should be set for L3.
+		 */
+		val = BIT(31) | BVAL(22, 16, 0x2) | BVAL(6, 0, 0x8);
+		clk_osm_write_reg(c, val, DROOP_CTRL_REG);
+	}
+}
+
+static int clk_osm_set_llm_volt_policy(struct platform_device *pdev)
+{
+	struct device_node *of = pdev->dev.of_node;
+	u32 *array;
+	int rc = 0, val, regval;
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM up voltage request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-volt-up-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No LLM voltage up timer value, rc=%d\n",
+			rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val,
+					LLM_VOLTAGE_VOTE_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+						array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val,
+					LLM_VOLTAGE_VOTE_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+						array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val,
+					LLM_VOLTAGE_VOTE_INC_HYSTERESIS);
+	}
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM down voltage request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-volt-down-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No LLM Voltage down timer value: %d\n",
+									rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val,
+					LLM_VOLTAGE_VOTE_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+					       array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val,
+					LLM_VOLTAGE_VOTE_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+					       array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val,
+					LLM_VOLTAGE_VOTE_DEC_HYSTERESIS);
+	}
+
+	/* Enable or disable honoring of LLM Voltage requests */
+	rc = of_property_read_bool(pdev->dev.of_node,
+					"qcom,enable-llm-volt-vote");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Honoring LLM Voltage requests\n");
+		val = 0;
+	} else
+		val = 1;
+
+	/* Enable or disable LLM VOLT DVCS */
+	regval = val | clk_osm_read_reg(&l3_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&l3_clk, regval, LLM_INTF_DCVS_DISABLE);
+	regval = val | clk_osm_read_reg(&pwrcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+	regval = val | clk_osm_read_reg(&perfcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+
+	/* Wait for the writes to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	devm_kfree(&pdev->dev, array);
+	return 0;
+}
+
+static int clk_osm_set_llm_freq_policy(struct platform_device *pdev)
+{
+	struct device_node *of = pdev->dev.of_node;
+	u32 *array;
+	int rc = 0, val, regval;
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM up frequency request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-freq-up-timer", array,
+					MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "Unable to get CC up timer value: %d\n",
+			rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val, LLM_FREQ_VOTE_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+						array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val,
+						LLM_FREQ_VOTE_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+						array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val,
+						LLM_FREQ_VOTE_INC_HYSTERESIS);
+	}
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM down frequency request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-freq-down-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No LLM Frequency down timer value: %d\n",
+			rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val, LLM_FREQ_VOTE_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+					       array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val,
+						LLM_FREQ_VOTE_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+					       array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val,
+						LLM_FREQ_VOTE_DEC_HYSTERESIS);
+	}
+
+	/* Enable or disable honoring of LLM frequency requests */
+	rc = of_property_read_bool(pdev->dev.of_node,
+					"qcom,enable-llm-freq-vote");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Honoring LLM Frequency requests\n");
+		val = 0;
+	} else
+		val = BIT(1);
+
+	/* Enable or disable LLM FREQ DVCS */
+	regval = val | clk_osm_read_reg(&l3_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&l3_clk, regval, LLM_INTF_DCVS_DISABLE);
+	regval = val | clk_osm_read_reg(&pwrcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+	regval = val | clk_osm_read_reg(&perfcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+
+	/* Wait for the write to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	devm_kfree(&pdev->dev, array);
+	return 0;
+}
+
+static int clk_osm_set_cc_policy(struct platform_device *pdev)
+{
+	int rc = 0, val;
+	u32 *array;
+	struct device_node *of = pdev->dev.of_node;
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of, "qcom,up-timer", array,
+					MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No up timer value, rc=%d\n",
+			 rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk,
+					array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val, SPM_CC_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+					array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+					array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CC_INC_HYSTERESIS);
+	}
+
+	rc = of_property_read_u32_array(of, "qcom,down-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No down timer value, rc=%d\n", rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk,
+				       array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val, SPM_CC_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+				       array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_DEC_HYSTERESIS);
+
+		clk_osm_count_ns(&perfcl_clk,
+				       array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DEC_HYSTERESIS);
+	}
+
+	/* OSM index override for cluster PC */
+	rc = of_property_read_u32_array(of, "qcom,pc-override-index",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No PC override index value, rc=%d\n",
+			rc);
+		clk_osm_write_reg(&pwrcl_clk, 0, CC_ZERO_BEHAV_CTRL);
+		clk_osm_write_reg(&perfcl_clk, 0, CC_ZERO_BEHAV_CTRL);
+	} else {
+		val = BVAL(6, 1, array[pwrcl_clk.cluster_num])
+			| ENABLE_OVERRIDE;
+		clk_osm_write_reg(&pwrcl_clk, val, CC_ZERO_BEHAV_CTRL);
+		val = BVAL(6, 1, array[perfcl_clk.cluster_num])
+			| ENABLE_OVERRIDE;
+		clk_osm_write_reg(&perfcl_clk, val, CC_ZERO_BEHAV_CTRL);
+	}
+
+	/* Wait for the writes to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	rc = of_property_read_bool(pdev->dev.of_node, "qcom,set-c3-active");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Treat cores in C3 as active\n");
+
+		val = clk_osm_read_reg(&l3_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(2);
+		clk_osm_write_reg(&l3_clk, val, SPM_CORE_INACTIVE_MAPPING);
+
+		val = clk_osm_read_reg(&pwrcl_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(2);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+
+		val = clk_osm_read_reg(&perfcl_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(2);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+	}
+
+	rc = of_property_read_bool(pdev->dev.of_node, "qcom,set-c2-active");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Treat cores in C2 as active\n");
+
+		val = clk_osm_read_reg(&l3_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(1);
+		clk_osm_write_reg(&l3_clk, val, SPM_CORE_INACTIVE_MAPPING);
+
+		val = clk_osm_read_reg(&pwrcl_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(1);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+
+		val = clk_osm_read_reg(&perfcl_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(1);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+	}
+
+	rc = of_property_read_bool(pdev->dev.of_node, "qcom,disable-cc-dvcs");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Disabling CC based DCVS\n");
+		val = 1;
+	} else
+		val = 0;
+
+	clk_osm_write_reg(&l3_clk, val, SPM_CC_DCVS_DISABLE);
+	clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_DCVS_DISABLE);
+	clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DCVS_DISABLE);
+
+	/* Wait for the writes to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	devm_kfree(&pdev->dev, array);
+	return 0;
+}
+
+static void clk_osm_setup_cluster_pll(struct clk_osm *c)
+{
+	writel_relaxed(0x0, c->vbases[PLL_BASE] + PLL_MODE);
+	writel_relaxed(0x26, c->vbases[PLL_BASE] + PLL_L_VAL);
+	writel_relaxed(0x8, c->vbases[PLL_BASE] +
+			PLL_USER_CTRL);
+	writel_relaxed(0x20000AA8, c->vbases[PLL_BASE] +
+			PLL_CONFIG_CTL_LO);
+	writel_relaxed(0x000003D2, c->vbases[PLL_BASE] +
+			PLL_CONFIG_CTL_HI);
+	writel_relaxed(0x2, c->vbases[PLL_BASE] +
+			PLL_MODE);
+
+	/* Ensure writes complete before delaying */
+	clk_osm_mb(c, PLL_BASE);
+
+	udelay(PLL_WAIT_LOCK_TIME_US);
+
+	writel_relaxed(0x6, c->vbases[PLL_BASE] + PLL_MODE);
+
+	/* Ensure write completes before delaying */
+	clk_osm_mb(c, PLL_BASE);
+
+	usleep_range(50, 75);
+
+	writel_relaxed(0x7, c->vbases[PLL_BASE] + PLL_MODE);
+}
+
+static void clk_osm_misc_programming(struct clk_osm *c)
+{
+	u32 lval = 0xFF, val;
+	int i;
+
+	clk_osm_write_reg(c, BVAL(23, 16, 0xF), SPM_CORE_COUNT_CTRL);
+	clk_osm_write_reg(c, PLL_MIN_LVAL, PLL_MIN_FREQ_REG);
+
+	/* Pattern to set/clear PLL lock in PDN_FSM_CTRL_REG */
+	val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+	if (c->secure_init) {
+		val |= IGNORE_PLL_LOCK;
+		clk_osm_write_seq_reg(c, val, DATA_MEM(108));
+		val &= ~IGNORE_PLL_LOCK;
+		clk_osm_write_seq_reg(c, val, DATA_MEM(109));
+		clk_osm_write_seq_reg(c, MIN_VCO_VAL, DATA_MEM(110));
+	} else {
+		val |= IGNORE_PLL_LOCK;
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(108), val);
+		val &= ~IGNORE_PLL_LOCK;
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(109), val);
+	}
+
+	/* Program LVAL corresponding to first turbo VC */
+	for (i = 0; i < c->num_entries; i++) {
+		if (c->osm_table[i].mem_acc_level == MAX_MEM_ACC_LEVELS) {
+			lval = c->osm_table[i].freq_data & GENMASK(7, 0);
+			break;
+		}
+	}
+
+	if (c->secure_init)
+		clk_osm_write_seq_reg(c, lval, DATA_MEM(114));
+	else
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(114), lval);
+
+}
+
+static int clk_osm_setup_hw_table(struct clk_osm *c)
+{
+	struct osm_entry *entry = c->osm_table;
+	int i;
+	u32 freq_val = 0, volt_val = 0, override_val = 0;
+	u32 table_entry_offset, last_mem_acc_level, last_virtual_corner = 0;
+
+	for (i = 0; i < OSM_TABLE_SIZE; i++) {
+		if (i < c->num_entries) {
+			freq_val = entry[i].freq_data;
+			volt_val = BVAL(27, 24, entry[i].mem_acc_level)
+				| BVAL(21, 16, entry[i].virtual_corner)
+				| BVAL(11, 0, entry[i].open_loop_volt);
+			override_val = entry[i].override_data;
+
+			if (last_virtual_corner && last_virtual_corner ==
+			    entry[i].virtual_corner && last_mem_acc_level !=
+			    entry[i].mem_acc_level) {
+				pr_err("invalid LUT entry at row=%d virtual_corner=%d, mem_acc_level=%d\n",
+				       i, entry[i].virtual_corner,
+				       entry[i].mem_acc_level);
+				return -EINVAL;
+			}
+			last_virtual_corner = entry[i].virtual_corner;
+			last_mem_acc_level = entry[i].mem_acc_level;
+		}
+
+		table_entry_offset = i * OSM_REG_SIZE;
+		clk_osm_write_reg(c, freq_val, FREQ_REG + table_entry_offset);
+		clk_osm_write_reg(c, volt_val, VOLT_REG + table_entry_offset);
+		clk_osm_write_reg(c, override_val, OVERRIDE_REG +
+				  table_entry_offset);
+	}
+
+	/* Make sure all writes go through */
+	clk_osm_mb(c, OSM_BASE);
+
+	return 0;
+}
+
+static void clk_osm_print_osm_table(struct clk_osm *c)
+{
+	int i;
+	struct osm_entry *table = c->osm_table;
+	u32 pll_src, pll_div, lval, core_count;
+
+	pr_debug("Index, Frequency, VC, OLV (mv), Core Count, PLL Src, PLL Div, L-Val, ACC Level\n");
+	for (i = 0; i < c->num_entries; i++) {
+		pll_src = (table[i].freq_data & GENMASK(31, 30)) >> 30;
+		pll_div = (table[i].freq_data & GENMASK(29, 28)) >> 28;
+		lval = table[i].freq_data & GENMASK(7, 0);
+		core_count = (table[i].freq_data & GENMASK(18, 16)) >> 16;
+
+		pr_debug("%3d, %11lu, %2u, %5u, %2u, %6u, %8u, %7u, %5u\n",
+			i,
+			table[i].frequency,
+			table[i].virtual_corner,
+			table[i].open_loop_volt,
+			core_count,
+			pll_src,
+			pll_div,
+			lval,
+			table[i].mem_acc_level);
+	}
+	pr_debug("APM threshold corner=%d, crossover corner=%d\n",
+			c->apm_threshold_vc, c->apm_crossover_vc);
+	pr_debug("MEM-ACC threshold corner=%d, crossover corner=%d\n",
+			c->mem_acc_threshold_vc, c->mem_acc_crossover_vc);
+}
+
+static u32 find_voltage(struct clk_osm *c, unsigned long rate)
+{
+	struct osm_entry *table = c->osm_table;
+	int entries = c->num_entries, i;
+
+	for (i = 0; i < entries; i++) {
+		if (rate == table[i].frequency) {
+			/* OPP table voltages have units of mV */
+			return table[i].open_loop_volt * 1000;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int add_opp(struct clk_osm *c, struct device *dev)
+{
+	unsigned long rate = 0;
+	u32 uv;
+	long rc;
+	int j = 0;
+	unsigned long min_rate = c->hw.init->rate_max[0];
+	unsigned long max_rate =
+			c->hw.init->rate_max[c->hw.init->num_rate_max - 1];
+
+	while (1) {
+		rate = c->hw.init->rate_max[j++];
+		uv = find_voltage(c, rate);
+		if (uv <= 0) {
+			pr_warn("No voltage for %lu.\n", rate);
+			return -EINVAL;
+		}
+
+		rc = dev_pm_opp_add(dev, rate, uv);
+		if (rc) {
+			pr_warn("failed to add OPP for %lu\n", rate);
+			return rc;
+		}
+
+		/*
+		 * Print the OPP pair for the lowest and highest frequency for
+		 * each device that we're populating. This is important since
+		 * this information will be used by thermal mitigation and the
+		 * scheduler.
+		 */
+		if (rate == min_rate)
+			pr_info("Set OPP pair (%lu Hz, %d uv) on %s\n",
+				rate, uv, dev_name(dev));
+
+		if (rate == max_rate && max_rate != min_rate) {
+			pr_info("Set OPP pair (%lu Hz, %d uv) on %s\n",
+				rate, uv, dev_name(dev));
+			break;
+		}
+
+		if (min_rate == max_rate)
+			break;
+	}
+	return 0;
+}
+
+static void populate_opp_table(struct platform_device *pdev)
+{
+	int cpu;
+	struct device *cpu_dev;
+	struct clk_osm *c, *parent;
+	struct clk_hw *hw_parent;
+
+	for_each_possible_cpu(cpu) {
+		c = logical_cpu_to_clk(cpu);
+		if (!c) {
+			pr_err("no clock device for CPU=%d\n", cpu);
+			return;
+		}
+
+		hw_parent = clk_hw_get_parent(&c->hw);
+		parent = to_clk_osm(hw_parent);
+		cpu_dev = get_cpu_device(cpu);
+		if (cpu_dev)
+			if (add_opp(parent, cpu_dev))
+				pr_err("Failed to add OPP levels for %s\n",
+					dev_name(cpu_dev));
+	}
+
+	/*TODO: Figure out which device to tag the L3 table to */
+}
+
+static u64 clk_osm_get_cpu_cycle_counter(int cpu)
+{
+	u32 val;
+	unsigned long flags;
+	struct clk_osm *parent, *c = logical_cpu_to_clk(cpu);
+
+	if (IS_ERR_OR_NULL(c)) {
+		pr_err("no clock device for CPU=%d\n", cpu);
+		return 0;
+	}
+
+	parent = to_clk_osm(clk_hw_get_parent(&c->hw));
+
+	spin_lock_irqsave(&parent->lock, flags);
+	val = clk_osm_read_reg_no_log(parent,
+			OSM_CYCLE_COUNTER_STATUS_REG(c->core_num));
+
+	if (val < c->prev_cycle_counter) {
+		/* Handle counter overflow */
+		c->total_cycle_counter += UINT_MAX -
+			c->prev_cycle_counter + val;
+		c->prev_cycle_counter = val;
+	} else {
+		c->total_cycle_counter += val - c->prev_cycle_counter;
+		c->prev_cycle_counter = val;
+	}
+	spin_unlock_irqrestore(&parent->lock, flags);
+
+	return c->total_cycle_counter;
+}
+
+static void clk_osm_setup_cycle_counters(struct clk_osm *c)
+{
+	u32 ratio = c->osm_clk_rate;
+	u32 val = 0;
+
+	/* Enable cycle counter */
+	val = BIT(0);
+	/* Setup OSM clock to XO ratio */
+	do_div(ratio, c->xo_clk_rate);
+	val |= BVAL(5, 1, ratio - 1) | OSM_CYCLE_COUNTER_USE_XO_EDGE_EN;
+
+	clk_osm_write_reg(c, val, OSM_CYCLE_COUNTER_CTRL_REG);
+	pr_debug("OSM to XO clock ratio: %d\n", ratio);
+}
+
+static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
+					struct platform_device *pdev)
+{
+	struct regulator *regulator = c->vdd_reg;
+	int count, vc, i, memacc_threshold, apm_threshold;
+	int rc = 0;
+	u32 corner_volt;
+
+	if (c == &l3_clk || c == &pwrcl_clk)
+		return rc;
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+				  "qcom,perfcl-apcs-apm-threshold-voltage",
+				  &apm_threshold);
+	if (rc) {
+		pr_err("qcom,perfcl-apcs-apm-threshold-voltage property not specified\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+				  "qcom,perfcl-apcs-mem-acc-threshold-voltage",
+				  &memacc_threshold);
+	if (rc) {
+		pr_err("qcom,perfcl-apcs-mem-acc-threshold-voltage property not specified\n");
+		return rc;
+	}
+
+	/*
+	 * Initialize VC settings in case none of them go above the voltage
+	 * limits
+	 */
+	c->apm_threshold_vc = c->apm_crossover_vc = c->mem_acc_crossover_vc =
+				c->mem_acc_threshold_vc = MAX_VC;
+
+	count = regulator_count_voltages(regulator);
+	if (count < 0) {
+		pr_err("Failed to get the number of virtual corners supported\n");
+		return count;
+	}
+
+	c->apm_crossover_vc = count - 2;
+	c->mem_acc_crossover_vc = count - 1;
+
+	for (i = 0; i < OSM_TABLE_SIZE; i++) {
+		vc = c->osm_table[i].virtual_corner + 1;
+		corner_volt = regulator_list_corner_voltage(regulator, vc);
+
+		if (c->apm_threshold_vc == MAX_VC &&
+				corner_volt >= apm_threshold)
+			c->apm_threshold_vc = c->osm_table[i].virtual_corner;
+
+		if (c->mem_acc_threshold_vc == MAX_VC &&
+				corner_volt >= memacc_threshold)
+			c->mem_acc_threshold_vc =
+				c->osm_table[i].virtual_corner;
+	}
+
+	return rc;
+}
+
+static int clk_osm_resolve_open_loop_voltages(struct clk_osm *c)
+{
+	struct regulator *regulator = c->vdd_reg;
+	u32 vc, mv;
+	int i;
+
+	for (i = 0; i < OSM_TABLE_SIZE; i++) {
+		vc = c->osm_table[i].virtual_corner + 1;
+		/* Voltage is in uv. Convert to mv */
+		mv = regulator_list_corner_voltage(regulator, vc) / 1000;
+		c->osm_table[i].open_loop_volt = mv;
+	}
+
+	return 0;
+}
+
+static int clk_osm_get_lut(struct platform_device *pdev,
+			   struct clk_osm *c, char *prop_name)
+{
+	struct device_node *of = pdev->dev.of_node;
+	int prop_len, total_elems, num_rows, i, j, k;
+	int rc = 0;
+	u32 *array;
+	u32 *fmax_temp;
+	u32 data;
+	unsigned long abs_fmax = 0;
+	bool last_entry = false;
+
+	if (!of_find_property(of, prop_name, &prop_len)) {
+		dev_err(&pdev->dev, "missing %s\n", prop_name);
+		return -EINVAL;
+	}
+
+	total_elems = prop_len / sizeof(u32);
+	if (total_elems % NUM_FIELDS) {
+		dev_err(&pdev->dev, "bad length %d\n", prop_len);
+		return -EINVAL;
+	}
+
+	num_rows = total_elems / NUM_FIELDS;
+
+	fmax_temp = devm_kzalloc(&pdev->dev, num_rows * sizeof(unsigned long),
+					GFP_KERNEL);
+	if (!fmax_temp)
+		return -ENOMEM;
+
+	array = devm_kzalloc(&pdev->dev, prop_len, GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of, prop_name, array, total_elems);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to parse OSM table, rc=%d\n", rc);
+		goto exit;
+	}
+
+	pr_debug("%s: Entries in Table: %d\n", __func__, num_rows);
+	c->num_entries = num_rows;
+	if (c->num_entries > OSM_TABLE_SIZE) {
+		pr_err("LUT entries %d exceed maximum size %d\n",
+		       c->num_entries, OSM_TABLE_SIZE);
+		return -EINVAL;
+	}
+
+	for (i = 0, j = 0, k = 0; j < OSM_TABLE_SIZE; j++) {
+		c->osm_table[j].frequency = array[i + FREQ];
+		c->osm_table[j].freq_data = array[i + FREQ_DATA];
+		c->osm_table[j].override_data = array[i + PLL_OVERRIDES];
+		c->osm_table[j].mem_acc_level = array[i + MEM_ACC_LEVEL];
+		/* Voltage corners are 0 based in the OSM LUT */
+		c->osm_table[j].virtual_corner = array[i + VIRTUAL_CORNER] - 1;
+		pr_debug("index=%d freq=%ld virtual_corner=%d freq_data=0x%x override_data=0x%x mem_acc_level=0x%x\n",
+			 j, c->osm_table[j].frequency,
+			 c->osm_table[j].virtual_corner,
+			 c->osm_table[j].freq_data,
+			 c->osm_table[j].override_data,
+			 c->osm_table[j].mem_acc_level);
+
+		data = (array[i + FREQ_DATA] & GENMASK(18, 16)) >> 16;
+		if (!last_entry && data == MAX_CORE_COUNT) {
+			fmax_temp[k] = array[i];
+			k++;
+		}
+
+		if (i < total_elems - NUM_FIELDS)
+			i += NUM_FIELDS;
+		else {
+			abs_fmax = array[i];
+			last_entry = true;
+		}
+	}
+	fmax_temp[k] = abs_fmax;
+
+	osm_clks_init[c->cluster_num].rate_max = devm_kzalloc(&pdev->dev,
+						 k * sizeof(unsigned long),
+						       GFP_KERNEL);
+	if (!osm_clks_init[c->cluster_num].rate_max) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	for (i = 0; i < k; i++)
+		osm_clks_init[c->cluster_num].rate_max[i] = fmax_temp[i];
+
+	osm_clks_init[c->cluster_num].num_rate_max = k;
+exit:
+	devm_kfree(&pdev->dev, fmax_temp);
+	devm_kfree(&pdev->dev, array);
+	return rc;
+}
+
+static int clk_osm_parse_dt_configs(struct platform_device *pdev)
+{
+	struct device_node *of = pdev->dev.of_node;
+	u32 *array;
+	int rc = 0;
+	struct resource *res;
+	char l3_min_cpr_vc_str[] = "qcom,l3-min-cpr-vc-bin0";
+	char pwrcl_min_cpr_vc_str[] = "qcom,pwrcl-min-cpr-vc-bin0";
+	char perfcl_min_cpr_vc_str[] = "qcom,perfcl-min-cpr-vc-bin0";
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of, "qcom,l-val-base",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,l-val-base property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.l_val_base = array[l3_clk.cluster_num];
+	pwrcl_clk.l_val_base = array[pwrcl_clk.cluster_num];
+	perfcl_clk.l_val_base = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-pll-user-ctl",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-pll-user-ctl property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apcs_pll_user_ctl = array[l3_clk.cluster_num];
+	pwrcl_clk.apcs_pll_user_ctl = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apcs_pll_user_ctl = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-pll-min-freq",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-pll-min-freq property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apcs_pll_min_freq = array[l3_clk.cluster_num];
+	pwrcl_clk.apcs_pll_min_freq = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apcs_pll_min_freq = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apm-mode-ctl",
+				  array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apm-mode-ctl property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apm_mode_ctl = array[l3_clk.cluster_num];
+	pwrcl_clk.apm_mode_ctl = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apm_mode_ctl = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apm-status-ctrl",
+				  array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apm-status-ctrl property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apm_status_ctl = array[l3_clk.cluster_num];
+	pwrcl_clk.apm_status_ctl = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apm_status_ctl = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,cfg-gfmux-addr",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,cfg-gfmux-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.cfg_gfmux_addr = array[l3_clk.cluster_num];
+	pwrcl_clk.cfg_gfmux_addr = array[pwrcl_clk.cluster_num];
+	perfcl_clk.cfg_gfmux_addr = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-cbc-addr",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-cbc-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apcs_cbc_addr = array[l3_clk.cluster_num];
+	pwrcl_clk.apcs_cbc_addr = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apcs_cbc_addr = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-ramp-ctl-addr",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-ramp-ctl-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.ramp_ctl_addr = array[l3_clk.cluster_num];
+	pwrcl_clk.ramp_ctl_addr = array[pwrcl_clk.cluster_num];
+	perfcl_clk.ramp_ctl_addr = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32(of, "qcom,xo-clk-rate",
+				  &pwrcl_clk.xo_clk_rate);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,xo-clk-rate property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.xo_clk_rate = perfcl_clk.xo_clk_rate = pwrcl_clk.xo_clk_rate;
+
+	rc = of_property_read_u32(of, "qcom,osm-clk-rate",
+				  &pwrcl_clk.osm_clk_rate);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,osm-clk-rate property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+	l3_clk.osm_clk_rate = perfcl_clk.osm_clk_rate = pwrcl_clk.osm_clk_rate;
+
+	rc = of_property_read_u32(of, "qcom,cc-reads",
+				  &pwrcl_clk.cycle_counter_reads);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,cc-reads property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+	l3_clk.cycle_counter_reads = perfcl_clk.cycle_counter_reads =
+			pwrcl_clk.cycle_counter_reads;
+
+	rc = of_property_read_u32(of, "qcom,cc-delay",
+				  &pwrcl_clk.cycle_counter_delay);
+	if (rc)
+		dev_dbg(&pdev->dev, "no delays between cycle counter reads\n");
+	else
+		l3_clk.cycle_counter_delay = perfcl_clk.cycle_counter_delay =
+			pwrcl_clk.cycle_counter_delay;
+
+	rc = of_property_read_u32(of, "qcom,cc-factor",
+				  &pwrcl_clk.cycle_counter_factor);
+	if (rc)
+		dev_dbg(&pdev->dev, "no factor specified for cycle counter estimation\n");
+	else
+		l3_clk.cycle_counter_factor = perfcl_clk.cycle_counter_factor =
+			pwrcl_clk.cycle_counter_factor;
+
+	l3_clk.red_fsm_en = perfcl_clk.red_fsm_en = pwrcl_clk.red_fsm_en =
+		of_property_read_bool(of, "qcom,red-fsm-en");
+
+	l3_clk.boost_fsm_en = perfcl_clk.boost_fsm_en =
+		pwrcl_clk.boost_fsm_en =
+		of_property_read_bool(of, "qcom,boost-fsm-en");
+
+	l3_clk.safe_fsm_en = perfcl_clk.safe_fsm_en = pwrcl_clk.safe_fsm_en =
+		of_property_read_bool(of, "qcom,safe-fsm-en");
+
+	l3_clk.ps_fsm_en = perfcl_clk.ps_fsm_en = pwrcl_clk.ps_fsm_en =
+		of_property_read_bool(of, "qcom,ps-fsm-en");
+
+	l3_clk.droop_fsm_en = perfcl_clk.droop_fsm_en =
+		pwrcl_clk.droop_fsm_en =
+		of_property_read_bool(of, "qcom,droop-fsm-en");
+
+	devm_kfree(&pdev->dev, array);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"l3_sequencer");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for l3_sequencer\n");
+		return -ENOMEM;
+	}
+
+	l3_clk.pbases[SEQ_BASE] = (unsigned long)res->start;
+	l3_clk.vbases[SEQ_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!l3_clk.vbases[SEQ_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in l3_sequencer base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"pwrcl_sequencer");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for pwrcl_sequencer\n");
+		return -ENOMEM;
+	}
+
+	pwrcl_clk.pbases[SEQ_BASE] = (unsigned long)res->start;
+	pwrcl_clk.vbases[SEQ_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!pwrcl_clk.vbases[SEQ_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in pwrcl_sequencer base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"perfcl_sequencer");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for perfcl_sequencer\n");
+		return -ENOMEM;
+	}
+
+	perfcl_clk.pbases[SEQ_BASE] = (unsigned long)res->start;
+	perfcl_clk.vbases[SEQ_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!perfcl_clk.vbases[SEQ_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in perfcl_sequencer base\n");
+		return -ENOMEM;
+	}
+
+	snprintf(l3_min_cpr_vc_str, ARRAY_SIZE(l3_min_cpr_vc_str),
+			"qcom,l3-min-cpr-vc-bin%d", l3_clk.speedbin);
+	rc = of_property_read_u32(of, l3_min_cpr_vc_str, &l3_clk.min_cpr_vc);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find %s property, rc=%d\n",
+			l3_min_cpr_vc_str, rc);
+		return -EINVAL;
+	}
+
+	snprintf(pwrcl_min_cpr_vc_str, ARRAY_SIZE(pwrcl_min_cpr_vc_str),
+			"qcom,pwrcl-min-cpr-vc-bin%d", pwrcl_clk.speedbin);
+	rc = of_property_read_u32(of, pwrcl_min_cpr_vc_str,
+						&pwrcl_clk.min_cpr_vc);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find %s property, rc=%d\n",
+			pwrcl_min_cpr_vc_str, rc);
+		return -EINVAL;
+	}
+
+	snprintf(perfcl_min_cpr_vc_str, ARRAY_SIZE(perfcl_min_cpr_vc_str),
+			"qcom,perfcl-min-cpr-vc-bin%d", perfcl_clk.speedbin);
+	rc = of_property_read_u32(of, perfcl_min_cpr_vc_str,
+						&perfcl_clk.min_cpr_vc);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find %s property, rc=%d\n",
+			perfcl_min_cpr_vc_str, rc);
+		return -EINVAL;
+	}
+
+	l3_clk.secure_init = perfcl_clk.secure_init = pwrcl_clk.secure_init =
+		of_property_read_bool(pdev->dev.of_node, "qcom,osm-no-tz");
+
+	if (!pwrcl_clk.secure_init)
+		return rc;
+
+	rc = of_property_read_u32_array(of, "qcom,l3-mem-acc-addr",
+					l3_clk.mem_acc_addr, MEM_ACC_ADDRS);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,l3-mem-acc-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of, "qcom,pwrcl-mem-acc-addr",
+					pwrcl_clk.mem_acc_addr, MEM_ACC_ADDRS);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,pwrcl-mem-acc-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of, "qcom,perfcl-mem-acc-addr",
+					perfcl_clk.mem_acc_addr, MEM_ACC_ADDRS);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,perfcl-mem-acc-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int clk_osm_resources_init(struct platform_device *pdev)
+{
+	struct device_node *node;
+	struct resource *res;
+	unsigned long pbase;
+	int rc = 0;
+	void *vbase;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"osm_l3_base");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for osm_l3_base");
+		return -ENOMEM;
+	}
+
+	l3_clk.pbases[OSM_BASE] = (unsigned long)res->start;
+	l3_clk.vbases[OSM_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!l3_clk.vbases[OSM_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in osm_l3_base base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"osm_pwrcl_base");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for osm_pwrcl_base");
+		return -ENOMEM;
+	}
+
+	pwrcl_clk.pbases[OSM_BASE] = (unsigned long)res->start;
+	pwrcl_clk.vbases[OSM_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+	if (!pwrcl_clk.vbases[OSM_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in osm_pwrcl_base base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"osm_perfcl_base");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for osm_perfcl_base");
+		return -ENOMEM;
+	}
+
+	perfcl_clk.pbases[OSM_BASE] = (unsigned long)res->start;
+	perfcl_clk.vbases[OSM_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!perfcl_clk.vbases[OSM_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in osm_perfcl_base base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l3_pll");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for l3_pll\n");
+		return -ENOMEM;
+	}
+	pbase = (unsigned long)res->start;
+	vbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+
+	if (!vbase) {
+		dev_err(&pdev->dev, "Unable to map l3_pll base\n");
+		return -ENOMEM;
+	}
+
+	l3_clk.pbases[PLL_BASE] = pbase;
+	l3_clk.vbases[PLL_BASE] = vbase;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwrcl_pll");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for pwrcl_pll\n");
+		return -ENOMEM;
+	}
+	pbase = (unsigned long)res->start;
+	vbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+
+	if (!vbase) {
+		dev_err(&pdev->dev, "Unable to map pwrcl_pll base\n");
+		return -ENOMEM;
+	}
+
+	pwrcl_clk.pbases[PLL_BASE] = pbase;
+	pwrcl_clk.vbases[PLL_BASE] = vbase;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "perfcl_pll");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for perfcl_pll\n");
+		return -ENOMEM;
+	}
+	pbase = (unsigned long)res->start;
+	vbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+
+	if (!vbase) {
+		dev_err(&pdev->dev, "Unable to map perfcl_pll base\n");
+		return -ENOMEM;
+	}
+
+	perfcl_clk.pbases[PLL_BASE] = pbase;
+	perfcl_clk.vbases[PLL_BASE] = vbase;
+
+	/* efuse speed bin fuses are optional */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "pwrcl_efuse");
+	if (res) {
+		pbase = (unsigned long)res->start;
+		vbase = devm_ioremap(&pdev->dev, res->start,
+				     resource_size(res));
+		if (!vbase) {
+			dev_err(&pdev->dev, "Unable to map in pwrcl_efuse base\n");
+			return -ENOMEM;
+		}
+		pwrcl_clk.pbases[EFUSE_BASE] = pbase;
+		pwrcl_clk.vbases[EFUSE_BASE] = vbase;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "perfcl_efuse");
+	if (res) {
+		pbase = (unsigned long)res->start;
+		vbase = devm_ioremap(&pdev->dev, res->start,
+				     resource_size(res));
+		if (!vbase) {
+			dev_err(&pdev->dev, "Unable to map in perfcl_efuse base\n");
+			return -ENOMEM;
+		}
+		perfcl_clk.pbases[EFUSE_BASE] = pbase;
+		perfcl_clk.vbases[EFUSE_BASE] = vbase;
+	}
+
+	vdd_l3 = devm_regulator_get(&pdev->dev, "vdd-l3");
+	if (IS_ERR(vdd_l3)) {
+		rc = PTR_ERR(vdd_l3);
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get the l3 vreg, rc=%d\n",
+				rc);
+		return rc;
+	}
+	l3_clk.vdd_reg = vdd_l3;
+
+	vdd_pwrcl = devm_regulator_get(&pdev->dev, "vdd-pwrcl");
+	if (IS_ERR(vdd_pwrcl)) {
+		rc = PTR_ERR(vdd_pwrcl);
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get the pwrcl vreg, rc=%d\n",
+				rc);
+		return rc;
+	}
+	pwrcl_clk.vdd_reg = vdd_pwrcl;
+
+	vdd_perfcl = devm_regulator_get(&pdev->dev, "vdd-perfcl");
+	if (IS_ERR(vdd_perfcl)) {
+		rc = PTR_ERR(vdd_perfcl);
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get the perfcl vreg, rc=%d\n",
+				rc);
+		return rc;
+	}
+	perfcl_clk.vdd_reg = vdd_perfcl;
+
+	node = of_parse_phandle(pdev->dev.of_node, "vdd-l3-supply", 0);
+	if (!node) {
+		pr_err("Unable to find vdd-l3-supply\n");
+		return -EINVAL;
+	}
+
+	l3_clk.vdd_dev = of_find_device_by_node(node->parent->parent);
+	if (!l3_clk.vdd_dev) {
+		pr_err("Unable to find device for vdd-l3-supply node\n");
+		return -EINVAL;
+	}
+
+	node = of_parse_phandle(pdev->dev.of_node, "vdd-pwrcl-supply", 0);
+	if (!node) {
+		pr_err("Unable to find vdd-pwrcl-supply\n");
+		return -EINVAL;
+	}
+
+	pwrcl_clk.vdd_dev = of_find_device_by_node(node->parent->parent);
+	if (!pwrcl_clk.vdd_dev) {
+		pr_err("Unable to find device for vdd-pwrcl-supply node\n");
+		return -EINVAL;
+	}
+
+	node = of_parse_phandle(pdev->dev.of_node, "vdd-perfcl-supply", 0);
+	if (!node) {
+		pr_err("Unable to find vdd-perfcl-supply\n");
+		return -EINVAL;
+	}
+
+	perfcl_clk.vdd_dev = of_find_device_by_node(node->parent->parent);
+	if (!perfcl_clk.vdd_dev) {
+		pr_err("Unable to find device for vdd-perfcl-supply\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static unsigned long init_rate = 300000000;
+
+static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
+{
+	int rc = 0, cpu, i;
+	int speedbin = 0, pvs_ver = 0;
+	u32 pte_efuse, val;
+	int num_clks = ARRAY_SIZE(osm_qcom_clk_hws);
+	struct clk *ext_xo_clk, *clk;
+	struct clk_osm *c;
+	struct device *dev = &pdev->dev;
+	struct clk_onecell_data *clk_data;
+	struct resource *res;
+	void *vbase;
+	char l3speedbinstr[] = "qcom,l3-speedbin0-v0";
+	char perfclspeedbinstr[] = "qcom,perfcl-speedbin0-v0";
+	char pwrclspeedbinstr[] = "qcom,pwrcl-speedbin0-v0";
+	struct cpu_cycle_counter_cb cb = {
+		.get_cpu_cycle_counter = clk_osm_get_cpu_cycle_counter,
+	};
+
+	/*
+	 * Require the RPM-XO clock to be registered before OSM.
+	 * The cpuss_gpll0_clk_src is listed to be configured by BL.
+	 */
+	ext_xo_clk = devm_clk_get(dev, "xo_ao");
+	if (IS_ERR(ext_xo_clk)) {
+		if (PTR_ERR(ext_xo_clk) != -EPROBE_DEFER)
+			dev_err(dev, "Unable to get xo clock\n");
+		return PTR_ERR(ext_xo_clk);
+	}
+
+	clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
+								GFP_KERNEL);
+	if (!clk_data)
+		goto exit;
+
+	clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks *
+					sizeof(struct clk *)), GFP_KERNEL);
+	if (!clk_data->clks)
+		goto clk_err;
+
+	clk_data->clk_num = num_clks;
+
+	rc = clk_osm_parse_dt_configs(pdev);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to parse device tree configurations\n");
+		return rc;
+	}
+
+	rc = clk_osm_resources_init(pdev);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "resources init failed, rc=%d\n",
+									rc);
+		return rc;
+	}
+
+	if (l3_clk.vbases[EFUSE_BASE]) {
+		/* Multiple speed-bins are supported */
+		pte_efuse = readl_relaxed(l3_clk.vbases[EFUSE_BASE]);
+		l3_clk.speedbin = ((pte_efuse >> L3_EFUSE_SHIFT) &
+						    L3_EFUSE_MASK);
+		snprintf(l3speedbinstr, ARRAY_SIZE(l3speedbinstr),
+			 "qcom,l3-speedbin%d-v%d", speedbin, pvs_ver);
+	}
+
+	dev_info(&pdev->dev, "using L3 speed bin %u and pvs_ver %d\n",
+		 speedbin, pvs_ver);
+
+	rc = clk_osm_get_lut(pdev, &l3_clk, l3speedbinstr);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to get OSM LUT for L3, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (pwrcl_clk.vbases[EFUSE_BASE]) {
+		/* Multiple speed-bins are supported */
+		pte_efuse = readl_relaxed(pwrcl_clk.vbases[EFUSE_BASE]);
+		pwrcl_clk.speedbin = ((pte_efuse >> PWRCL_EFUSE_SHIFT) &
+						    PWRCL_EFUSE_MASK);
+		snprintf(pwrclspeedbinstr, ARRAY_SIZE(pwrclspeedbinstr),
+			 "qcom,pwrcl-speedbin%d-v%d", speedbin, pvs_ver);
+	}
+
+	dev_info(&pdev->dev, "using pwrcl speed bin %u and pvs_ver %d\n",
+		 speedbin, pvs_ver);
+
+	rc = clk_osm_get_lut(pdev, &pwrcl_clk, pwrclspeedbinstr);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to get OSM LUT for power cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (perfcl_clk.vbases[EFUSE_BASE]) {
+		/* Multiple speed-bins are supported */
+		pte_efuse = readl_relaxed(perfcl_clk.vbases[EFUSE_BASE]);
+		perfcl_clk.speedbin = ((pte_efuse >> PERFCL_EFUSE_SHIFT) &
+							PERFCL_EFUSE_MASK);
+		snprintf(perfclspeedbinstr, ARRAY_SIZE(perfclspeedbinstr),
+			 "qcom,perfcl-speedbin%d-v%d", speedbin, pvs_ver);
+	}
+
+	dev_info(&pdev->dev, "using perfcl speed bin %u and pvs_ver %d\n",
+		 speedbin, pvs_ver);
+
+	rc = clk_osm_get_lut(pdev, &perfcl_clk, perfclspeedbinstr);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to get OSM LUT for perf cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = clk_osm_resolve_open_loop_voltages(&l3_clk);
+	if (rc) {
+		if (rc == -EPROBE_DEFER)
+			return rc;
+		dev_err(&pdev->dev, "Unable to determine open-loop voltages for L3, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = clk_osm_resolve_open_loop_voltages(&pwrcl_clk);
+	if (rc) {
+		if (rc == -EPROBE_DEFER)
+			return rc;
+		dev_err(&pdev->dev, "Unable to determine open-loop voltages for power cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = clk_osm_resolve_open_loop_voltages(&perfcl_clk);
+	if (rc) {
+		if (rc == -EPROBE_DEFER)
+			return rc;
+		dev_err(&pdev->dev, "Unable to determine open-loop voltages for perf cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = clk_osm_resolve_crossover_corners(&l3_clk, pdev);
+	if (rc)
+		dev_info(&pdev->dev,
+			"No APM crossover corner programmed for L3\n");
+
+	rc = clk_osm_resolve_crossover_corners(&pwrcl_clk, pdev);
+	if (rc)
+		dev_info(&pdev->dev,
+			"No APM crossover corner programmed for pwrcl_clk\n");
+
+	rc = clk_osm_resolve_crossover_corners(&perfcl_clk, pdev);
+	if (rc)
+		dev_info(&pdev->dev, "No MEM-ACC crossover corner programmed\n");
+
+	clk_osm_setup_cycle_counters(&l3_clk);
+	clk_osm_setup_cycle_counters(&pwrcl_clk);
+	clk_osm_setup_cycle_counters(&perfcl_clk);
+
+	clk_osm_print_osm_table(&l3_clk);
+	clk_osm_print_osm_table(&pwrcl_clk);
+	clk_osm_print_osm_table(&perfcl_clk);
+
+	rc = clk_osm_setup_hw_table(&l3_clk);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to setup l3 hardware table\n");
+		goto exit;
+	}
+	rc = clk_osm_setup_hw_table(&pwrcl_clk);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to setup power cluster hardware table\n");
+		goto exit;
+	}
+	rc = clk_osm_setup_hw_table(&perfcl_clk);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to setup perf cluster hardware table\n");
+		goto exit;
+	}
+
+	/* Policy tuning */
+	rc = clk_osm_set_cc_policy(pdev);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "cc policy setup failed");
+		goto exit;
+	}
+
+	/* LLM Freq Policy Tuning */
+	rc = clk_osm_set_llm_freq_policy(pdev);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "LLM Frequency Policy setup failed");
+		goto exit;
+	}
+
+	/* LLM Voltage Policy Tuning */
+	rc = clk_osm_set_llm_volt_policy(pdev);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "Failed to set LLM voltage Policy");
+		goto exit;
+	}
+
+	clk_osm_setup_fsms(&l3_clk);
+	clk_osm_setup_fsms(&pwrcl_clk);
+	clk_osm_setup_fsms(&perfcl_clk);
+
+	/* Program VC at which the array power supply needs to be switched */
+	clk_osm_write_reg(&perfcl_clk, perfcl_clk.apm_threshold_vc,
+					APM_CROSSOVER_VC);
+	if (perfcl_clk.secure_init) {
+		clk_osm_write_seq_reg(&perfcl_clk, perfcl_clk.apm_crossover_vc,
+				DATA_MEM(77));
+		clk_osm_write_seq_reg(&perfcl_clk,
+				(0x39 | (perfcl_clk.apm_threshold_vc << 6)),
+				DATA_MEM(111));
+	} else {
+		scm_io_write(perfcl_clk.pbases[SEQ_BASE] + DATA_MEM(77),
+				perfcl_clk.apm_crossover_vc);
+		scm_io_write(perfcl_clk.pbases[SEQ_BASE] + DATA_MEM(111),
+				(0x39 | (perfcl_clk.apm_threshold_vc << 6)));
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"apps_itm_ctl");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for apps_itm_ctl\n");
+		return -ENOMEM;
+	}
+
+	vbase = devm_ioremap(&pdev->dev, res->start,
+						resource_size(res));
+	if (!vbase) {
+		dev_err(&pdev->dev,
+				"Unable to map in apps_itm_ctl base\n");
+		return -ENOMEM;
+	}
+
+	val = readl_relaxed(vbase + 0x0);
+	val &= ~BIT(0);
+	writel_relaxed(val, vbase + 0x0);
+
+	val = readl_relaxed(vbase + 0x4);
+	val &= ~BIT(0);
+	writel_relaxed(val, vbase + 0x4);
+
+	/*
+	 * Perform typical secure-world HW initialization
+	 * as necessary.
+	 */
+	clk_osm_do_additional_setup(&l3_clk, pdev);
+	clk_osm_do_additional_setup(&pwrcl_clk, pdev);
+	clk_osm_do_additional_setup(&perfcl_clk, pdev);
+
+	/* MEM-ACC Programming */
+	clk_osm_program_mem_acc_regs(&l3_clk);
+	clk_osm_program_mem_acc_regs(&pwrcl_clk);
+	clk_osm_program_mem_acc_regs(&perfcl_clk);
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,osm-pll-setup")) {
+		clk_osm_setup_cluster_pll(&l3_clk);
+		clk_osm_setup_cluster_pll(&pwrcl_clk);
+		clk_osm_setup_cluster_pll(&perfcl_clk);
+	}
+
+	/* Misc programming */
+	clk_osm_misc_programming(&l3_clk);
+	clk_osm_misc_programming(&pwrcl_clk);
+	clk_osm_misc_programming(&perfcl_clk);
+
+	if (of_property_read_bool(pdev->dev.of_node,
+				"qcom,enable-per-core-dcvs")) {
+		val = clk_osm_read_reg(&pwrcl_clk, CORE_DCVS_CTRL);
+		val |= BIT(0);
+		clk_osm_write_reg(&pwrcl_clk, val, CORE_DCVS_CTRL);
+
+		val = clk_osm_read_reg(&perfcl_clk, CORE_DCVS_CTRL);
+		val |= BIT(0);
+		clk_osm_write_reg(&perfcl_clk, val, CORE_DCVS_CTRL);
+	}
+
+	clk_ops_core = clk_dummy_ops;
+	clk_ops_core.set_rate = cpu_clk_set_rate;
+	clk_ops_core.recalc_rate = cpu_clk_recalc_rate;
+
+	spin_lock_init(&l3_clk.lock);
+	spin_lock_init(&pwrcl_clk.lock);
+	spin_lock_init(&perfcl_clk.lock);
+
+	/* Register OSM l3, pwr and perf clocks with Clock Framework */
+	for (i = 0; i < num_clks; i++) {
+		clk = devm_clk_register(&pdev->dev, osm_qcom_clk_hws[i]);
+		if (IS_ERR(clk)) {
+			dev_err(&pdev->dev, "Unable to register CPU clock at index %d\n",
+				i);
+			return PTR_ERR(clk);
+		}
+		clk_data->clks[i] = clk;
+	}
+
+	rc = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
+								clk_data);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to register CPU clocks\n");
+			goto provider_err;
+	}
+
+	get_online_cpus();
+
+	/* Enable OSM */
+	for_each_online_cpu(cpu) {
+		c = logical_cpu_to_clk(cpu);
+		if (!c) {
+			pr_err("no clock device for CPU=%d\n", cpu);
+			return -EINVAL;
+		}
+
+		rc = clk_set_rate(c->hw.clk, init_rate);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to set init rate on CPU %d, rc=%d\n",
+			cpu, rc);
+			goto provider_err;
+		}
+		WARN(clk_prepare_enable(c->hw.clk),
+		     "Failed to enable clock for cpu %d\n", cpu);
+		udelay(300);
+	}
+
+	rc = clk_set_rate(l3_clk.hw.clk, init_rate);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to set init rate on L3 cluster, rc=%d\n",
+			rc);
+		goto provider_err;
+	}
+	WARN(clk_prepare_enable(l3_clk.hw.clk),
+		     "Failed to enable clock for L3\n");
+	udelay(300);
+
+	populate_opp_table(pdev);
+
+	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	register_cpu_cycle_counter_cb(&cb);
+	pr_info("OSM driver inited\n");
+	put_online_cpus();
+
+	return 0;
+provider_err:
+	if (clk_data)
+		devm_kfree(&pdev->dev, clk_data->clks);
+clk_err:
+	devm_kfree(&pdev->dev, clk_data);
+exit:
+	dev_err(&pdev->dev, "OSM driver failed to initialize, rc=%d\n", rc);
+	panic("Unable to Setup OSM");
+}
+
+static const struct of_device_id match_table[] = {
+	{ .compatible = "qcom,clk-cpu-osm" },
+	{}
+};
+
+static struct platform_driver clk_cpu_osm_driver = {
+	.probe = clk_cpu_osm_driver_probe,
+	.driver = {
+		.name = "clk-cpu-osm",
+		.of_match_table = match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init clk_cpu_osm_init(void)
+{
+	return platform_driver_register(&clk_cpu_osm_driver);
+}
+arch_initcall(clk_cpu_osm_init);
+
+static void __exit clk_cpu_osm_exit(void)
+{
+	platform_driver_unregister(&clk_cpu_osm_driver);
+}
+module_exit(clk_cpu_osm_exit);
+
+MODULE_DESCRIPTION("QTI CPU clock driver for OSM");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index eface18..e728dec 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -57,4 +57,8 @@
 extern int qcom_cc_probe(struct platform_device *pdev,
 			 const struct qcom_cc_desc *desc);
 extern struct clk_ops clk_dummy_ops;
+
+#define BM(msb, lsb)	(((((uint32_t)-1) << (31-msb)) >> (31-msb+lsb)) << lsb)
+#define BVAL(msb, lsb, val)	(((val) << lsb) & BM(msb, lsb))
+
 #endif
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
new file mode 100644
index 0000000..a5a7488
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -0,0 +1,739 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <linux/clk.h>
+#include <linux/clk/qcom.h>
+#include <dt-bindings/clock/qcom,gpucc-sdm845.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "clk-alpha-pll.h"
+#include "vdd-level-sdm845.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+#define F_SLEW(f, s, h, m, n, sf) { (f), (s), (2 * (h) - 1), (m), (n), (sf) }
+
+static int vdd_gx_corner[] = {
+	RPMH_REGULATOR_LEVEL_OFF,		/* VDD_GX_NONE */
+	RPMH_REGULATOR_LEVEL_MIN_SVS,		/* VDD_GX_MIN */
+	RPMH_REGULATOR_LEVEL_LOW_SVS,		/* VDD_GX_LOWER */
+	RPMH_REGULATOR_LEVEL_SVS,		/* VDD_GX_LOW */
+	RPMH_REGULATOR_LEVEL_SVS_L1,		/* VDD_GX_LOW_L1 */
+	RPMH_REGULATOR_LEVEL_NOM,		/* VDD_GX_NOMINAL */
+	RPMH_REGULATOR_LEVEL_NOM_L1,		/* VDD_GX_NOMINAL_L1 */
+	RPMH_REGULATOR_LEVEL_TURBO,		/* VDD_GX_HIGH */
+	RPMH_REGULATOR_LEVEL_TURBO_L1,		/* VDD_GX_HIGH_L1 */
+	RPMH_REGULATOR_LEVEL_MAX,		/* VDD_GX_MAX */
+};
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_CX_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_mx, VDD_CX_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_gfx, VDD_GX_NUM, 1, vdd_gx_corner);
+
+enum {
+	P_BI_TCXO,
+	P_CORE_BI_PLL_TEST_SE,
+	P_GPLL0_OUT_MAIN,
+	P_GPLL0_OUT_MAIN_DIV,
+	P_GPU_CC_PLL0_OUT_EVEN,
+	P_GPU_CC_PLL0_OUT_MAIN,
+	P_GPU_CC_PLL0_OUT_ODD,
+	P_GPU_CC_PLL1_OUT_EVEN,
+	P_GPU_CC_PLL1_OUT_MAIN,
+	P_GPU_CC_PLL1_OUT_ODD,
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPU_CC_PLL0_OUT_MAIN, 1 },
+	{ P_GPU_CC_PLL1_OUT_MAIN, 3 },
+	{ P_GPLL0_OUT_MAIN, 5 },
+	{ P_GPLL0_OUT_MAIN_DIV, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpu_cc_parent_names_0[] = {
+	"bi_tcxo",
+	"gpu_cc_pll0",
+	"gpu_cc_pll1",
+	"gpll0",
+	"gpll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPU_CC_PLL0_OUT_EVEN, 1 },
+	{ P_GPU_CC_PLL0_OUT_ODD, 2 },
+	{ P_GPU_CC_PLL1_OUT_EVEN, 3 },
+	{ P_GPU_CC_PLL1_OUT_ODD, 4 },
+	{ P_GPLL0_OUT_MAIN, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpu_cc_parent_names_1[] = {
+	"bi_tcxo",
+	"gpu_cc_pll0_out_even",
+	"gpu_cc_pll0_out_odd",
+	"gpu_cc_pll1_out_even",
+	"gpu_cc_pll1_out_odd",
+	"gpll0",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gpu_cc_parent_map_2[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_MAIN, 5 },
+	{ P_GPLL0_OUT_MAIN_DIV, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpu_cc_parent_names_2[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll0",
+	"core_bi_pll_test_se",
+};
+
+static struct pll_vco fabia_vco[] = {
+	{ 250000000, 2000000000, 0 },
+	{ 125000000, 1000000000, 1 },
+};
+
+static const struct pll_config gpu_cc_pll0_config = {
+	.l = 0x1d,
+	.frac = 0x2aaa,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+	.offset = 0x0,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.type = FABIA_PLL,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_pll0",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_fabia_pll_ops,
+			VDD_MX_FMAX_MAP4(
+				MIN, 615000000,
+				LOW, 1066000000,
+				LOW_L1, 1600000000,
+				NOMINAL, 2000000000),
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_fabia_even[] = {
+	{ 0x0, 1 },
+	{ 0x1, 2 },
+	{ 0x3, 4 },
+	{ 0x7, 8 },
+	{},
+};
+
+static struct clk_alpha_pll_postdiv gpu_cc_pll0_out_even = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_pll0_out_even",
+		.parent_names = (const char *[]){ "gpu_cc_pll0" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_generic_pll_postdiv_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+	F(400000000, P_GPLL0_OUT_MAIN, 1.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+	.cmd_rcgr = 0x1120,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = gpu_cc_parent_map_0,
+	.freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_gmu_clk_src",
+		.parent_names = gpu_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP2(
+			MIN, 200000000,
+			LOW, 400000000),
+	},
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = {
+	F_SLEW(147000000, P_GPU_CC_PLL0_OUT_EVEN,  1, 0, 0,  294000000),
+	F_SLEW(210000000, P_GPU_CC_PLL0_OUT_EVEN,  1, 0, 0,  420000000),
+	F_SLEW(338000000, P_GPU_CC_PLL0_OUT_EVEN,  1, 0, 0,  676000000),
+	F_SLEW(425000000, P_GPU_CC_PLL0_OUT_EVEN,  1, 0, 0,  850000000),
+	F_SLEW(600000000, P_GPU_CC_PLL0_OUT_EVEN,  1, 0, 0, 1200000000),
+	{ }
+};
+
+static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = {
+	.cmd_rcgr = 0x101c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = gpu_cc_parent_map_1,
+	.freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_gx_gfx3d_clk_src",
+		.parent_names = gpu_cc_parent_names_1,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops =  &clk_rcg2_ops,
+		VDD_GX_FMAX_MAP8(
+			MIN, 147000000,
+			LOWER, 210000000,
+			LOW, 280000000,
+			LOW_L1, 338000000,
+			NOMINAL, 425000000,
+			NOMINAL_L1, 487000000,
+			HIGH, 548000000,
+			HIGH_L1, 600000000),
+	},
+};
+
+static const struct freq_tbl ftbl_gpu_cc_rbcpr_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gpu_cc_rbcpr_clk_src = {
+	.cmd_rcgr = 0x10b0,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gpu_cc_parent_map_2,
+	.freq_tbl = ftbl_gpu_cc_rbcpr_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpu_cc_rbcpr_clk_src",
+		.parent_names = gpu_cc_parent_names_2,
+		.num_parents = 4,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP2(
+			MIN, 19200000,
+			NOMINAL, 50000000),
+	},
+};
+
+static struct clk_branch gpu_cc_acd_ahb_clk = {
+	.halt_reg = 0x1168,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1168,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_acd_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_acd_cxo_clk = {
+	.halt_reg = 0x1164,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1164,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_acd_cxo_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+	.halt_reg = 0x1078,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1078,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+	.halt_reg = 0x107c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x107c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_crc_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_apb_clk = {
+	.halt_reg = 0x1088,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1088,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_apb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_clk = {
+	.halt_reg = 0x10a4,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10a4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_gfx3d_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_gx_gfx3d_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_slv_clk = {
+	.halt_reg = 0x10a8,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10a8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_gfx3d_slv_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_gx_gfx3d_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+	.halt_reg = 0x1098,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1098,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_gmu_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_gmu_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+	.halt_reg = 0x108c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x108c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cx_snoc_dvm_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+	.halt_reg = 0x1004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cxo_aon_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+	.halt_reg = 0x109c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x109c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_cxo_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_debug_clk = {
+	.halt_reg = 0x1100,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1100,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_debug_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_gx_cxo_clk = {
+	.halt_reg = 0x1060,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1060,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_gx_cxo_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_gx_gfx3d_clk = {
+	.halt_reg = 0x1054,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_gx_gfx3d_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_gx_gfx3d_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+	.halt_reg = 0x1064,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1064,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_gx_gmu_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_gmu_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_gx_vsense_clk = {
+	.halt_reg = 0x1058,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x1058,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_gx_vsense_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_pll_test_clk = {
+	.halt_reg = 0x110c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x110c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_pll_test_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_rbcpr_ahb_clk = {
+	.halt_reg = 0x10f4,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10f4,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_rbcpr_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gpu_cc_rbcpr_clk = {
+	.halt_reg = 0x10f0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x10f0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpu_cc_rbcpr_clk",
+			.parent_names = (const char *[]){
+				"gpu_cc_rbcpr_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *gpu_cc_sdm845_clocks[] = {
+	[GPU_CC_ACD_AHB_CLK] = &gpu_cc_acd_ahb_clk.clkr,
+	[GPU_CC_ACD_CXO_CLK] = &gpu_cc_acd_cxo_clk.clkr,
+	[GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+	[GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+	[GPU_CC_CX_APB_CLK] = &gpu_cc_cx_apb_clk.clkr,
+	[GPU_CC_CX_GFX3D_CLK] = &gpu_cc_cx_gfx3d_clk.clkr,
+	[GPU_CC_CX_GFX3D_SLV_CLK] = &gpu_cc_cx_gfx3d_slv_clk.clkr,
+	[GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+	[GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+	[GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+	[GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+	[GPU_CC_DEBUG_CLK] = &gpu_cc_debug_clk.clkr,
+	[GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+	[GPU_CC_GX_CXO_CLK] = &gpu_cc_gx_cxo_clk.clkr,
+	[GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+	[GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
+	[GPU_CC_PLL_TEST_CLK] = &gpu_cc_pll_test_clk.clkr,
+	[GPU_CC_RBCPR_AHB_CLK] = &gpu_cc_rbcpr_ahb_clk.clkr,
+	[GPU_CC_RBCPR_CLK] = &gpu_cc_rbcpr_clk.clkr,
+	[GPU_CC_RBCPR_CLK_SRC] = &gpu_cc_rbcpr_clk_src.clkr,
+};
+
+static struct clk_regmap *gpu_cc_gfx_sdm845_clocks[] = {
+	[GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+	[GPU_CC_PLL0_OUT_EVEN] = &gpu_cc_pll0_out_even.clkr,
+	[GPU_CC_GX_GFX3D_CLK_SRC] = &gpu_cc_gx_gfx3d_clk_src.clkr,
+	[GPU_CC_GX_GFX3D_CLK] = &gpu_cc_gx_gfx3d_clk.clkr,
+};
+
+static const struct qcom_reset_map gpu_cc_sdm845_resets[] = {
+	[GPUCC_GPU_CC_ACD_BCR] = { 0x1160 },
+	[GPUCC_GPU_CC_CX_BCR] = { 0x1068 },
+	[GPUCC_GPU_CC_GFX3D_AON_BCR] = { 0x10a0 },
+	[GPUCC_GPU_CC_GMU_BCR] = { 0x111c },
+	[GPUCC_GPU_CC_GX_BCR] = { 0x1008 },
+	[GPUCC_GPU_CC_RBCPR_BCR] = { 0x10ac },
+	[GPUCC_GPU_CC_SPDM_BCR] = { 0x1110 },
+	[GPUCC_GPU_CC_XO_BCR] = { 0x1000 },
+};
+
+static const struct regmap_config gpu_cc_sdm845_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= 0x8008,
+	.fast_io	= true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sdm845_desc = {
+	.config = &gpu_cc_sdm845_regmap_config,
+	.clks = gpu_cc_sdm845_clocks,
+	.num_clks = ARRAY_SIZE(gpu_cc_sdm845_clocks),
+	.resets = gpu_cc_sdm845_resets,
+	.num_resets = ARRAY_SIZE(gpu_cc_sdm845_resets),
+};
+
+static const struct qcom_cc_desc gpu_cc_gfx_sdm845_desc = {
+	.config = &gpu_cc_sdm845_regmap_config,
+	.clks = gpu_cc_gfx_sdm845_clocks,
+	.num_clks = ARRAY_SIZE(gpu_cc_gfx_sdm845_clocks),
+};
+
+static const struct of_device_id gpu_cc_sdm845_match_table[] = {
+	{ .compatible = "qcom,gpucc-sdm845" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sdm845_match_table);
+
+static const struct of_device_id gpu_cc_gfx_sdm845_match_table[] = {
+	{ .compatible = "qcom,gfxcc-sdm845" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_gfx_sdm845_match_table);
+
+static int gpu_cc_gfx_sdm845_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	struct resource *res;
+	void __iomem *base;
+	int ret = 0;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "Failed to get resources for clock_gfxcc.\n");
+		return -EINVAL;
+	}
+
+	base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (IS_ERR(base)) {
+		dev_err(&pdev->dev, "Failed to ioremap the GFX CC base.\n");
+		return PTR_ERR(base);
+	}
+
+	regmap = devm_regmap_init_mmio(&pdev->dev, base,
+				gpu_cc_gfx_sdm845_desc.config);
+	if (IS_ERR(regmap)) {
+		dev_err(&pdev->dev, "Failed to init regmap\n");
+		return PTR_ERR(regmap);
+	}
+
+	/* Get MX voltage regulator for GPU PLL graphic clock. */
+	vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx");
+	if (IS_ERR(vdd_mx.regulator[0])) {
+		if (!(PTR_ERR(vdd_mx.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_mx regulator\n");
+		return PTR_ERR(vdd_mx.regulator[0]);
+	}
+
+	/* GFX voltage regulators for GFX3D  graphic clock. */
+	vdd_gfx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_gfx");
+	if (IS_ERR(vdd_gfx.regulator[0])) {
+		if (PTR_ERR(vdd_gfx.regulator[0]) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get vdd_gfx regulator\n");
+		return PTR_ERR(vdd_gfx.regulator[0]);
+	}
+
+	clk_fabia_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+
+	ret = qcom_cc_really_probe(pdev, &gpu_cc_gfx_sdm845_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register GFX CC clocks\n");
+		return ret;
+	}
+
+	clk_prepare_enable(gpu_cc_cxo_clk.clkr.hw.clk);
+
+	dev_info(&pdev->dev, "Registered GFX CC clocks.\n");
+
+	return ret;
+}
+
+static struct platform_driver gpu_cc_gfx_sdm845_driver = {
+	.probe = gpu_cc_gfx_sdm845_probe,
+	.driver = {
+		.name = "gfxcc-sdm845",
+		.of_match_table = gpu_cc_gfx_sdm845_match_table,
+	},
+};
+
+static int __init gpu_cc_gfx_sdm845_init(void)
+{
+	return platform_driver_register(&gpu_cc_gfx_sdm845_driver);
+}
+arch_initcall(gpu_cc_gfx_sdm845_init);
+
+static void __exit gpu_cc_gfx_sdm845_exit(void)
+{
+	platform_driver_unregister(&gpu_cc_gfx_sdm845_driver);
+}
+module_exit(gpu_cc_gfx_sdm845_exit);
+
+static int gpu_cc_sdm845_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	int ret = 0;
+
+	regmap = qcom_cc_map(pdev, &gpu_cc_sdm845_desc);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	/* Get CX voltage regulator for CX and GMU clocks. */
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_cx regulator\n");
+		return PTR_ERR(vdd_cx.regulator[0]);
+	}
+
+	ret = qcom_cc_really_probe(pdev, &gpu_cc_sdm845_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register GPU CC clocks\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered GPU CC clocks.\n");
+
+	return ret;
+}
+
+static struct platform_driver gpu_cc_sdm845_driver = {
+	.probe = gpu_cc_sdm845_probe,
+	.driver = {
+		.name = "gpu_cc-sdm845",
+		.of_match_table = gpu_cc_sdm845_match_table,
+	},
+};
+
+static int __init gpu_cc_sdm845_init(void)
+{
+	return platform_driver_register(&gpu_cc_sdm845_driver);
+}
+core_initcall(gpu_cc_sdm845_init);
+
+static void __exit gpu_cc_sdm845_exit(void)
+{
+	platform_driver_unregister(&gpu_cc_sdm845_driver);
+}
+module_exit(gpu_cc_sdm845_exit);
+
+MODULE_DESCRIPTION("QTI GPU_CC SDM845 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gpu_cc-sdm845");
diff --git a/drivers/clk/qcom/vdd-level-sdm845.h b/drivers/clk/qcom/vdd-level-sdm845.h
index 1771c15..6f876ba 100644
--- a/drivers/clk/qcom/vdd-level-sdm845.h
+++ b/drivers/clk/qcom/vdd-level-sdm845.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -98,6 +98,31 @@
 	},					\
 	.num_rate_max = VDD_CX_NUM
 
+#define VDD_MX_FMAX_MAP4(l1, f1, l2, f2, l3, f3, l4, f4) \
+	.vdd_class = &vdd_mx,			\
+	.rate_max = (unsigned long[VDD_CX_NUM]) {	\
+		[VDD_CX_##l1] = (f1),		\
+		[VDD_CX_##l2] = (f2),		\
+		[VDD_CX_##l3] = (f3),		\
+		[VDD_CX_##l4] = (f4),		\
+	},					\
+	.num_rate_max = VDD_CX_NUM
+
+#define VDD_GX_FMAX_MAP8(l1, f1, l2, f2, l3, f3, l4, f4, l5, f5, l6, f6, \
+				l7, f7, l8, f8) \
+	.vdd_class = &vdd_gfx,			\
+	.rate_max = (unsigned long[VDD_GX_NUM]) {	\
+		[VDD_GX_##l1] = (f1),		\
+		[VDD_GX_##l2] = (f2),		\
+		[VDD_GX_##l3] = (f3),		\
+		[VDD_GX_##l4] = (f4),		\
+		[VDD_GX_##l5] = (f5),		\
+		[VDD_GX_##l6] = (f6),		\
+		[VDD_GX_##l7] = (f7),		\
+		[VDD_GX_##l8] = (f8),		\
+	},					\
+	.num_rate_max = VDD_GX_NUM
+
 enum vdd_cx_levels {
 	VDD_CX_NONE,
 	VDD_CX_MIN,		/* MIN SVS */
@@ -109,6 +134,19 @@
 	VDD_CX_NUM,
 };
 
+enum vdd_gx_levels {
+	VDD_GX_NONE,
+	VDD_GX_MIN,		/* MIN SVS */
+	VDD_GX_LOWER,		/* SVS2 */
+	VDD_GX_LOW,		/* SVS */
+	VDD_GX_LOW_L1,		/* SVSL1 */
+	VDD_GX_NOMINAL,		/* NOM */
+	VDD_GX_NOMINAL_L1,		/* NOM1 */
+	VDD_GX_HIGH,		/* TURBO */
+	VDD_GX_HIGH_L1,		/* TURBO1 */
+	VDD_GX_NUM,
+};
+
 /* Need to use the correct VI/VL mappings */
 static int vdd_corner[] = {
 	RPMH_REGULATOR_LEVEL_OFF,		/* VDD_CX_NONE */
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index fc75a33..8ca07fe 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -608,7 +608,7 @@
 				 0x150, 0, 4, 24, 2, BIT(31),
 				 CLK_SET_RATE_PARENT);
 
-static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(31), 0);
+static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0);
 
 static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0);
 
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index ebb1b31..ee78104 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -85,6 +85,10 @@
 	unsigned int m, p;
 	u32 reg;
 
+	/* Adjust parent_rate according to pre-dividers */
+	ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
+						-1, &parent_rate);
+
 	reg = readl(cmp->common.base + cmp->common.reg);
 
 	m = reg >> cmp->m.shift;
@@ -114,6 +118,10 @@
 	unsigned int m, p;
 	u32 reg;
 
+	/* Adjust parent_rate according to pre-dividers */
+	ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
+						-1, &parent_rate);
+
 	max_m = cmp->m.max ?: 1 << cmp->m.width;
 	max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
 
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index f7e1c1b..66e604e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -752,9 +752,11 @@
 					char *buf)
 {
 	unsigned int cur_freq = __cpufreq_get(policy);
-	if (!cur_freq)
-		return sprintf(buf, "<unknown>");
-	return sprintf(buf, "%u\n", cur_freq);
+
+	if (cur_freq)
+		return sprintf(buf, "%u\n", cur_freq);
+
+	return sprintf(buf, "<unknown>\n");
 }
 
 /**
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 1b8c739..12eb6d8 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -35,8 +35,12 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/cpufreq_interactive.h>
 
+static DEFINE_PER_CPU(struct update_util_data, update_util);
+
 struct cpufreq_interactive_policyinfo {
-	struct timer_list policy_timer;
+	bool work_in_progress;
+	struct irq_work irq_work;
+	spinlock_t irq_work_lock; /* protects work_in_progress */
 	struct timer_list policy_slack_timer;
 	struct hrtimer notif_timer;
 	spinlock_t load_lock; /* protects load tracking stat */
@@ -215,9 +219,6 @@
 			pcpu->cputime_speedadj_timestamp =
 						pcpu->time_in_idle_timestamp;
 		}
-		del_timer(&ppol->policy_timer);
-		ppol->policy_timer.expires = expires;
-		add_timer(&ppol->policy_timer);
 	}
 
 	if (tunables->timer_slack_val >= 0 &&
@@ -231,9 +232,51 @@
 	spin_unlock_irqrestore(&ppol->load_lock, flags);
 }
 
+static void update_util_handler(struct update_util_data *data, u64 time,
+				unsigned int sched_flags)
+{
+	struct cpufreq_interactive_policyinfo *ppol;
+	unsigned long flags;
+
+	ppol = *this_cpu_ptr(&polinfo);
+	spin_lock_irqsave(&ppol->irq_work_lock, flags);
+	/*
+	 * The irq-work may not be allowed to be queued up right now
+	 * because work has already been queued up or is in progress.
+	 */
+	if (ppol->work_in_progress ||
+	    sched_flags & SCHED_CPUFREQ_INTERCLUSTER_MIG)
+		goto out;
+
+	ppol->work_in_progress = true;
+	irq_work_queue(&ppol->irq_work);
+out:
+	spin_unlock_irqrestore(&ppol->irq_work_lock, flags);
+}
+
+static inline void gov_clear_update_util(struct cpufreq_policy *policy)
+{
+	int i;
+
+	for_each_cpu(i, policy->cpus)
+		cpufreq_remove_update_util_hook(i);
+
+	synchronize_sched();
+}
+
+static void gov_set_update_util(struct cpufreq_policy *policy)
+{
+	struct update_util_data *util;
+	int cpu;
+
+	for_each_cpu(cpu, policy->cpus) {
+		util = &per_cpu(update_util, cpu);
+		cpufreq_add_update_util_hook(cpu, util, update_util_handler);
+	}
+}
+
 /* The caller shall take enable_sem write semaphore to avoid any timer race.
- * The policy_timer and policy_slack_timer must be deactivated when calling
- * this function.
+ * The policy_slack_timer must be deactivated when calling this function.
  */
 static void cpufreq_interactive_timer_start(
 	struct cpufreq_interactive_tunables *tunables, int cpu)
@@ -245,8 +288,7 @@
 	int i;
 
 	spin_lock_irqsave(&ppol->load_lock, flags);
-	ppol->policy_timer.expires = expires;
-	add_timer(&ppol->policy_timer);
+	gov_set_update_util(ppol->policy);
 	if (tunables->timer_slack_val >= 0 &&
 	    ppol->target_freq > ppol->policy->min) {
 		expires += usecs_to_jiffies(tunables->timer_slack_val);
@@ -265,6 +307,7 @@
 	spin_unlock_irqrestore(&ppol->load_lock, flags);
 }
 
+
 static unsigned int freq_to_above_hispeed_delay(
 	struct cpufreq_interactive_tunables *tunables,
 	unsigned int freq)
@@ -448,7 +491,7 @@
 
 #define NEW_TASK_RATIO 75
 #define PRED_TOLERANCE_PCT 10
-static void cpufreq_interactive_timer(unsigned long data)
+static void cpufreq_interactive_timer(int data)
 {
 	s64 now;
 	unsigned int delta_time;
@@ -467,7 +510,7 @@
 	unsigned int index;
 	unsigned long flags;
 	unsigned long max_cpu;
-	int cpu, i;
+	int i, cpu;
 	int new_load_pct = 0;
 	int prev_l, pred_l = 0;
 	struct cpufreq_govinfo govinfo;
@@ -659,8 +702,7 @@
 	wake_up_process_no_notif(speedchange_task);
 
 rearm:
-	if (!timer_pending(&ppol->policy_timer))
-		cpufreq_interactive_timer_resched(data, false);
+	cpufreq_interactive_timer_resched(data, false);
 
 	/*
 	 * Send govinfo notification.
@@ -822,7 +864,6 @@
 	}
 	cpu = ppol->notif_cpu;
 	trace_cpufreq_interactive_load_change(cpu);
-	del_timer(&ppol->policy_timer);
 	del_timer(&ppol->policy_slack_timer);
 	cpufreq_interactive_timer(cpu);
 
@@ -1569,6 +1610,20 @@
 	return tunables;
 }
 
+static void irq_work(struct irq_work *irq_work)
+{
+	struct cpufreq_interactive_policyinfo *ppol;
+	unsigned long flags;
+
+	ppol = container_of(irq_work, struct cpufreq_interactive_policyinfo,
+			    irq_work);
+
+	cpufreq_interactive_timer(smp_processor_id());
+	spin_lock_irqsave(&ppol->irq_work_lock, flags);
+	ppol->work_in_progress = false;
+	spin_unlock_irqrestore(&ppol->irq_work_lock, flags);
+}
+
 static struct cpufreq_interactive_policyinfo *get_policyinfo(
 					struct cpufreq_policy *policy)
 {
@@ -1593,12 +1648,12 @@
 	}
 	ppol->sl = sl;
 
-	init_timer_deferrable(&ppol->policy_timer);
-	ppol->policy_timer.function = cpufreq_interactive_timer;
 	init_timer(&ppol->policy_slack_timer);
 	ppol->policy_slack_timer.function = cpufreq_interactive_nop_timer;
 	hrtimer_init(&ppol->notif_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	ppol->notif_timer.function = cpufreq_interactive_hrtimer;
+	init_irq_work(&ppol->irq_work, irq_work);
+	spin_lock_init(&ppol->irq_work_lock);
 	spin_lock_init(&ppol->load_lock);
 	spin_lock_init(&ppol->target_freq_lock);
 	init_rwsem(&ppol->enable_sem);
@@ -1775,9 +1830,7 @@
 	ppol->reject_notification = true;
 	ppol->notif_pending = false;
 	down_write(&ppol->enable_sem);
-	del_timer_sync(&ppol->policy_timer);
 	del_timer_sync(&ppol->policy_slack_timer);
-	ppol->policy_timer.data = policy->cpu;
 	ppol->last_evaluated_jiffy = get_jiffies_64();
 	cpufreq_interactive_timer_start(tunables, policy->cpu);
 	ppol->governor_enabled = 1;
@@ -1807,7 +1860,9 @@
 	down_write(&ppol->enable_sem);
 	ppol->governor_enabled = 0;
 	ppol->target_freq = 0;
-	del_timer_sync(&ppol->policy_timer);
+	gov_clear_update_util(ppol->policy);
+	irq_work_sync(&ppol->irq_work);
+	ppol->work_in_progress = false;
 	del_timer_sync(&ppol->policy_slack_timer);
 	up_write(&ppol->enable_sem);
 	ppol->reject_notification = false;
diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c
index 0caa8d1..f968ffd9 100644
--- a/drivers/cpufreq/qcom-cpufreq.c
+++ b/drivers/cpufreq/qcom-cpufreq.c
@@ -364,7 +364,7 @@
 	char clk_name[] = "cpu??_clk";
 	char tbl_name[] = "qcom,cpufreq-table-??";
 	struct clk *c;
-	int cpu;
+	int cpu, ret;
 	struct cpufreq_frequency_table *ftbl;
 
 	l2_clk = devm_clk_get(dev, "l2_clk");
@@ -431,7 +431,15 @@
 		per_cpu(freq_table, cpu) = ftbl;
 	}
 
-	return 0;
+	ret = register_pm_notifier(&msm_cpufreq_pm_notifier);
+	if (ret)
+		return ret;
+
+	ret = cpufreq_register_driver(&msm_cpufreq_driver);
+	if (ret)
+		unregister_pm_notifier(&msm_cpufreq_pm_notifier);
+
+	return ret;
 }
 
 static const struct of_device_id msm_cpufreq_match_table[] = {
@@ -467,8 +475,7 @@
 		return rc;
 	}
 
-	register_pm_notifier(&msm_cpufreq_pm_notifier);
-	return cpufreq_register_driver(&msm_cpufreq_driver);
+	return 0;
 }
 
 subsys_initcall(msm_cpufreq_register);
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index cafa633..f796e36 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -283,11 +283,14 @@
  */
 int ccp_enqueue_cmd(struct ccp_cmd *cmd)
 {
-	struct ccp_device *ccp = ccp_get_device();
+	struct ccp_device *ccp;
 	unsigned long flags;
 	unsigned int i;
 	int ret;
 
+	/* Some commands might need to be sent to a specific device */
+	ccp = cmd->ccp ? cmd->ccp : ccp_get_device();
+
 	if (!ccp)
 		return -ENODEV;
 
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index e5d9278..8d0eeb4 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -390,6 +390,7 @@
 			goto err;
 
 		ccp_cmd = &cmd->ccp_cmd;
+		ccp_cmd->ccp = chan->ccp;
 		ccp_pt = &ccp_cmd->u.passthru_nomap;
 		ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
 		ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 286447a..152552d 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -334,6 +334,7 @@
 	int rc = VM_FAULT_SIGBUS;
 	phys_addr_t phys;
 	pfn_t pfn;
+	unsigned int fault_size = PAGE_SIZE;
 
 	if (check_vma(dax_dev, vma, __func__))
 		return VM_FAULT_SIGBUS;
@@ -344,6 +345,9 @@
 		return VM_FAULT_SIGBUS;
 	}
 
+	if (fault_size != dax_region->align)
+		return VM_FAULT_SIGBUS;
+
 	phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
 	if (phys == -1) {
 		dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
@@ -389,6 +393,7 @@
 	phys_addr_t phys;
 	pgoff_t pgoff;
 	pfn_t pfn;
+	unsigned int fault_size = PMD_SIZE;
 
 	if (check_vma(dax_dev, vma, __func__))
 		return VM_FAULT_SIGBUS;
@@ -405,6 +410,16 @@
 		return VM_FAULT_SIGBUS;
 	}
 
+	if (fault_size < dax_region->align)
+		return VM_FAULT_SIGBUS;
+	else if (fault_size > dax_region->align)
+		return VM_FAULT_FALLBACK;
+
+	/* if we are outside of the VMA */
+	if (pmd_addr < vma->vm_start ||
+			(pmd_addr + PMD_SIZE) > vma->vm_end)
+		return VM_FAULT_SIGBUS;
+
 	pgoff = linear_page_index(vma, pmd_addr);
 	phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
 	if (phys == -1) {
diff --git a/drivers/devfreq/arm-memlat-mon.c b/drivers/devfreq/arm-memlat-mon.c
index 0fb63e9..ed83185 100644
--- a/drivers/devfreq/arm-memlat-mon.c
+++ b/drivers/devfreq/arm-memlat-mon.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -34,7 +34,7 @@
 
 enum ev_index {
 	INST_IDX,
-	L2DM_IDX,
+	CM_IDX,
 	CYC_IDX,
 	NUM_EVENTS
 };
@@ -51,6 +51,8 @@
 	struct event_data events[NUM_EVENTS];
 	ktime_t prev_ts;
 	bool init_pending;
+	unsigned long cache_miss_event;
+	unsigned long inst_event;
 };
 static DEFINE_PER_CPU(struct memlat_hwmon_data, pm_data);
 
@@ -111,7 +113,7 @@
 			read_event(&hw_data->events[INST_IDX]);
 
 	hw->core_stats[cpu_idx].mem_count =
-			read_event(&hw_data->events[L2DM_IDX]);
+			read_event(&hw_data->events[CM_IDX]);
 
 	cyc_cnt = read_event(&hw_data->events[CYC_IDX]);
 	hw->core_stats[cpu_idx].freq = compute_freq(hw_data, cyc_cnt);
@@ -192,19 +194,19 @@
 	if (IS_ERR(attr))
 		return PTR_ERR(attr);
 
-	attr->config = INST_EV;
+	attr->config = hw_data->inst_event;
 	pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
 	if (IS_ERR(pevent))
 		goto err_out;
 	hw_data->events[INST_IDX].pevent = pevent;
 	perf_event_enable(hw_data->events[INST_IDX].pevent);
 
-	attr->config = L2DM_EV;
+	attr->config = hw_data->cache_miss_event;
 	pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
 	if (IS_ERR(pevent))
 		goto err_out;
-	hw_data->events[L2DM_IDX].pevent = pevent;
-	perf_event_enable(hw_data->events[L2DM_IDX].pevent);
+	hw_data->events[CM_IDX].pevent = pevent;
+	perf_event_enable(hw_data->events[CM_IDX].pevent);
 
 	attr->config = CYC_EV;
 	pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
@@ -300,6 +302,7 @@
 	struct memlat_hwmon *hw;
 	struct cpu_grp_info *cpu_grp;
 	int cpu, ret;
+	u32 cachemiss_ev, inst_ev;
 
 	cpu_grp = devm_kzalloc(dev, sizeof(*cpu_grp), GFP_KERNEL);
 	if (!cpu_grp)
@@ -325,8 +328,26 @@
 	if (!hw->core_stats)
 		return -ENOMEM;
 
-	for_each_cpu(cpu, &cpu_grp->cpus)
+	ret = of_property_read_u32(dev->of_node, "qcom,cachemiss-ev",
+			&cachemiss_ev);
+	if (ret) {
+		dev_dbg(dev, "Cache Miss event not specified. Using def:0x%x\n",
+				L2DM_EV);
+		cachemiss_ev = L2DM_EV;
+	}
+
+	ret = of_property_read_u32(dev->of_node, "qcom,inst-ev", &inst_ev);
+	if (ret) {
+		dev_dbg(dev, "Inst event not specified. Using def:0x%x\n",
+				INST_EV);
+		inst_ev = INST_EV;
+	}
+
+	for_each_cpu(cpu, &cpu_grp->cpus) {
 		hw->core_stats[cpu - cpumask_first(&cpu_grp->cpus)].id = cpu;
+		(&per_cpu(pm_data, cpu))->cache_miss_event = cachemiss_ev;
+		(&per_cpu(pm_data, cpu))->inst_event = inst_ev;
+	}
 
 	hw->start_hwmon = &start_hwmon;
 	hw->stop_hwmon = &stop_hwmon;
diff --git a/drivers/esoc/esoc_dev.c b/drivers/esoc/esoc_dev.c
index 39090dc..0c9e428 100644
--- a/drivers/esoc/esoc_dev.c
+++ b/drivers/esoc/esoc_dev.c
@@ -215,7 +215,7 @@
 							esoc_clink->name);
 				return -EIO;
 			}
-			put_user(req, (unsigned long __user *)uarg);
+			put_user(req, (unsigned int __user *)uarg);
 
 		}
 		return err;
@@ -227,7 +227,7 @@
 		err = clink_ops->get_status(&status, esoc_clink);
 		if (err)
 			return err;
-		put_user(status, (unsigned long __user *)uarg);
+		put_user(status, (unsigned int __user *)uarg);
 		break;
 	case ESOC_WAIT_FOR_CRASH:
 		err = wait_event_interruptible(esoc_udev->evt_wait,
@@ -241,7 +241,7 @@
 							esoc_clink->name);
 				return -EIO;
 			}
-			put_user(evt, (unsigned long __user *)uarg);
+			put_user(evt, (unsigned int __user *)uarg);
 		}
 		return err;
 	default:
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index b447a01..6f3c891 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3506,6 +3506,16 @@
 			max_sclk = 75000;
 			max_mclk = 80000;
 		}
+	} else if (adev->asic_type == CHIP_OLAND) {
+		if ((adev->pdev->revision == 0xC7) ||
+		    (adev->pdev->revision == 0x80) ||
+		    (adev->pdev->revision == 0x81) ||
+		    (adev->pdev->revision == 0x83) ||
+		    (adev->pdev->revision == 0x87) ||
+		    (adev->pdev->device == 0x6604) ||
+		    (adev->pdev->device == 0x6605)) {
+			max_sclk = 75000;
+		}
 	}
 	/* Apply dpm quirks */
 	while (p && p->chip_device != 0) {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 6e0447f..72ec93d 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1382,6 +1382,7 @@
 
 	pm_runtime_enable(dev);
 
+	pm_runtime_get_sync(dev);
 	phy_power_on(dp->phy);
 
 	analogix_dp_init_dp(dp);
@@ -1414,9 +1415,15 @@
 		goto err_disable_pm_runtime;
 	}
 
+	phy_power_off(dp->phy);
+	pm_runtime_put(dev);
+
 	return 0;
 
 err_disable_pm_runtime:
+
+	phy_power_off(dp->phy);
+	pm_runtime_put(dev);
 	pm_runtime_disable(dev);
 
 	return ret;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 2e42a05..50acd79 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1382,6 +1382,15 @@
 	return ret < 0 ? ret : 0;
 }
 
+void release_crtc_commit(struct completion *completion)
+{
+	struct drm_crtc_commit *commit = container_of(completion,
+						      typeof(*commit),
+						      flip_done);
+
+	drm_crtc_commit_put(commit);
+}
+
 /**
  * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
  * @state: new modeset state to be committed
@@ -1474,6 +1483,8 @@
 		}
 
 		crtc_state->event->base.completion = &commit->flip_done;
+		crtc_state->event->base.completion_release = release_crtc_commit;
+		drm_crtc_commit_get(commit);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index e84faec..f5815e1 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -686,8 +686,8 @@
 	assert_spin_locked(&dev->event_lock);
 
 	if (e->completion) {
-		/* ->completion might disappear as soon as it signalled. */
 		complete_all(e->completion);
+		e->completion_release(e->completion);
 		e->completion = NULL;
 	}
 
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 71000d5..70b47ca 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -113,3 +113,13 @@
           driver during fatal errors and enable some display-driver logging
           into an internal buffer (this avoids logging overhead).
 
+config DRM_SDE_RSC
+	bool "Enable sde resource state coordinator(rsc) driver"
+	depends on DRM_MSM
+	help
+          The SDE DRM RSC provides display Resource State Coordinator support
+          to vote the ab/ib bandwidth for primary display. Each rsc client
+          can vote their active state. Any active request from any client
+          avoids the display core power collapse. A client can also register
+          for display core power collapse events on rsc.
+
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index d9a49f8..b5d78b1 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -50,10 +50,11 @@
 	sde_io_util.o \
 	sde/sde_hw_reg_dma_v1_color_proc.o \
 	sde/sde_hw_color_proc_v4.o \
-	sde_rsc.o \
-	sde_rsc_hw.o \
 	sde/sde_hw_ad4.o \
 
+msm_drm-$(CONFIG_DRM_SDE_RSC) += sde_rsc.o \
+	sde_rsc_hw.o \
+
 # use drm gpu driver only if qcom_kgsl driver not available
 ifneq ($(CONFIG_QCOM_KGSL),y)
 msm_drm-y += adreno/adreno_device.o \
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h b/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
index 0e2e7ec..2a84a2d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
@@ -19,6 +19,7 @@
 #include <linux/platform_device.h>
 #include <linux/types.h>
 #include <linux/clk.h>
+#include "sde_power_handle.h"
 
 #define MAX_STRING_LEN 32
 #define MAX_DSI_CTRL 2
@@ -67,6 +68,8 @@
  * @core_mmss_clk:       Handle to MMSS core clock.
  * @bus_clk:             Handle to bus clock.
  * @mnoc_clk:            Handle to MMSS NOC clock.
+ * @dsi_core_client:	 Pointer to SDE power client
+ * @phandle:             Pointer to SDE power handle
  */
 struct dsi_core_clk_info {
 	struct clk *mdp_core_clk;
@@ -74,6 +77,8 @@
 	struct clk *core_mmss_clk;
 	struct clk *bus_clk;
 	struct clk *mnoc_clk;
+	struct sde_power_client *dsi_core_client;
+	struct sde_power_handle *phandle;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
index 9650a0b..2fcf10ba 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
@@ -185,10 +185,12 @@
 {
 	int rc = 0;
 
-	rc = clk_prepare_enable(c_clks->clks.mdp_core_clk);
-	if (rc) {
-		pr_err("failed to enable mdp_core_clk, rc=%d\n", rc);
-		goto error;
+	if (c_clks->clks.mdp_core_clk) {
+		rc = clk_prepare_enable(c_clks->clks.mdp_core_clk);
+		if (rc) {
+			pr_err("failed to enable mdp_core_clk, rc=%d\n", rc);
+			goto error;
+		}
 	}
 
 	if (c_clks->clks.mnoc_clk) {
@@ -199,28 +201,36 @@
 		}
 	}
 
-	rc = clk_prepare_enable(c_clks->clks.iface_clk);
-	if (rc) {
-		pr_err("failed to enable iface_clk, rc=%d\n", rc);
-		goto error_disable_mnoc_clk;
+	if (c_clks->clks.iface_clk) {
+		rc = clk_prepare_enable(c_clks->clks.iface_clk);
+		if (rc) {
+			pr_err("failed to enable iface_clk, rc=%d\n", rc);
+			goto error_disable_mnoc_clk;
+		}
 	}
 
-	rc = clk_prepare_enable(c_clks->clks.bus_clk);
-	if (rc) {
-		pr_err("failed to enable bus_clk, rc=%d\n", rc);
-		goto error_disable_iface_clk;
+	if (c_clks->clks.bus_clk) {
+		rc = clk_prepare_enable(c_clks->clks.bus_clk);
+		if (rc) {
+			pr_err("failed to enable bus_clk, rc=%d\n", rc);
+			goto error_disable_iface_clk;
+		}
 	}
 
-	rc = clk_prepare_enable(c_clks->clks.core_mmss_clk);
-	if (rc) {
-		pr_err("failed to enable core_mmss_clk, rc=%d\n", rc);
-		goto error_disable_bus_clk;
+	if (c_clks->clks.core_mmss_clk) {
+		rc = clk_prepare_enable(c_clks->clks.core_mmss_clk);
+		if (rc) {
+			pr_err("failed to enable core_mmss_clk, rc=%d\n", rc);
+			goto error_disable_bus_clk;
+		}
 	}
 
-	rc = msm_bus_scale_client_update_request(c_clks->bus_handle, 1);
-	if (rc) {
-		pr_err("bus scale client enable failed, rc=%d\n", rc);
-		goto error_disable_mmss_clk;
+	if (c_clks->bus_handle) {
+		rc = msm_bus_scale_client_update_request(c_clks->bus_handle, 1);
+		if (rc) {
+			pr_err("bus scale client enable failed, rc=%d\n", rc);
+			goto error_disable_mmss_clk;
+		}
 	}
 
 	return rc;
@@ -458,11 +468,18 @@
 	 */
 
 	m_clks = &clks[master_ndx];
+	rc = sde_power_resource_enable(m_clks->clks.phandle,
+			m_clks->clks.dsi_core_client, true);
+
+	if (rc) {
+		pr_err("Power resource enable failed, rc=%d\n", rc);
+		goto error;
+	}
 
 	rc = dsi_core_clk_start(m_clks);
 	if (rc) {
 		pr_err("failed to turn on master clocks, rc=%d\n", rc);
-		goto error;
+		goto error_disable_master_resource;
 	}
 
 	/* Turn on rest of the core clocks */
@@ -471,15 +488,28 @@
 		if (!clk || (clk == m_clks))
 			continue;
 
+		rc = sde_power_resource_enable(clk->clks.phandle,
+				clk->clks.dsi_core_client, true);
+		if (rc) {
+			pr_err("Power resource enable failed, rc=%d\n", rc);
+			goto error_disable_master;
+		}
+
 		rc = dsi_core_clk_start(clk);
 		if (rc) {
 			pr_err("failed to turn on clocks, rc=%d\n", rc);
+			(void)sde_power_resource_enable(clk->clks.phandle,
+					clk->clks.dsi_core_client, false);
 			goto error_disable_master;
 		}
 	}
-	return rc;
+
 error_disable_master:
 	(void)dsi_core_clk_stop(m_clks);
+
+error_disable_master_resource:
+	(void)sde_power_resource_enable(m_clks->clks.phandle,
+				m_clks->clks.dsi_core_client, false);
 error:
 	return rc;
 }
@@ -547,14 +577,30 @@
 			continue;
 
 		rc = dsi_core_clk_stop(clk);
-		if (rc)
-			pr_err("failed to turn off clocks, rc=%d\n", rc);
+		if (rc) {
+			pr_debug("failed to turn off clocks, rc=%d\n", rc);
+			goto error;
+		}
+
+		rc = sde_power_resource_enable(clk->clks.phandle,
+				clk->clks.dsi_core_client, false);
+		if (rc) {
+			pr_err("Power resource disable failed: %d\n", rc);
+			goto error;
+		}
 	}
 
 	rc = dsi_core_clk_stop(m_clks);
-	if (rc)
+	if (rc) {
 		pr_err("failed to turn off master clocks, rc=%d\n", rc);
+		goto error;
+	}
 
+	rc = sde_power_resource_enable(m_clks->clks.phandle,
+				m_clks->clks.dsi_core_client, false);
+	if (rc)
+		pr_err("Power resource disable failed: %d\n", rc);
+error:
 	return rc;
 }
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index cd851bc..5df48c3 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -490,30 +490,26 @@
 
 	core->mdp_core_clk = devm_clk_get(&pdev->dev, "mdp_core_clk");
 	if (IS_ERR(core->mdp_core_clk)) {
-		rc = PTR_ERR(core->mdp_core_clk);
-		pr_err("failed to get mdp_core_clk, rc=%d\n", rc);
-		goto fail;
+		core->mdp_core_clk = NULL;
+		pr_debug("failed to get mdp_core_clk, rc=%d\n", rc);
 	}
 
 	core->iface_clk = devm_clk_get(&pdev->dev, "iface_clk");
 	if (IS_ERR(core->iface_clk)) {
-		rc = PTR_ERR(core->iface_clk);
-		pr_err("failed to get iface_clk, rc=%d\n", rc);
-		goto fail;
+		core->iface_clk = NULL;
+		pr_debug("failed to get iface_clk, rc=%d\n", rc);
 	}
 
 	core->core_mmss_clk = devm_clk_get(&pdev->dev, "core_mmss_clk");
 	if (IS_ERR(core->core_mmss_clk)) {
-		rc = PTR_ERR(core->core_mmss_clk);
-		pr_err("failed to get core_mmss_clk, rc=%d\n", rc);
-		goto fail;
+		core->core_mmss_clk = NULL;
+		pr_debug("failed to get core_mmss_clk, rc=%d\n", rc);
 	}
 
 	core->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
 	if (IS_ERR(core->bus_clk)) {
-		rc = PTR_ERR(core->bus_clk);
-		pr_err("failed to get bus_clk, rc=%d\n", rc);
-		goto fail;
+		core->bus_clk = NULL;
+		pr_debug("failed to get bus_clk, rc=%d\n", rc);
 	}
 
 	core->mnoc_clk = devm_clk_get(&pdev->dev, "mnoc_clk");
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index ddf791c..bcaf428 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -2228,10 +2228,12 @@
 	struct dsi_display *display;
 	struct dsi_clk_info info;
 	struct clk_ctrl_cb clk_cb;
+	struct msm_drm_private *priv;
 	void *handle = NULL;
 	struct platform_device *pdev = to_platform_device(dev);
 	char *client1 = "dsi_clk_client";
 	char *client2 = "mdp_event_client";
+	char dsi_client_name[DSI_CLIENT_NAME_SIZE];
 	int i, rc = 0;
 
 	if (!dev || !pdev || !master) {
@@ -2247,6 +2249,7 @@
 				drm, display);
 		return -EINVAL;
 	}
+	priv = drm->dev_private;
 
 	mutex_lock(&display->display_lock);
 
@@ -2260,7 +2263,6 @@
 
 	for (i = 0; i < display->ctrl_count; i++) {
 		display_ctrl = &display->ctrl[i];
-
 		rc = dsi_ctrl_drv_init(display_ctrl->ctrl, display->root);
 		if (rc) {
 			pr_err("[%s] failed to initialize ctrl[%d], rc=%d\n",
@@ -2280,9 +2282,19 @@
 			sizeof(struct dsi_core_clk_info));
 		memcpy(&info.l_clks[i], &display_ctrl->ctrl->clk_info.link_clks,
 			sizeof(struct dsi_link_clk_info));
+		info.c_clks[i].phandle = &priv->phandle;
 		info.bus_handle[i] =
 			display_ctrl->ctrl->axi_bus_info.bus_handle;
 		info.ctrl_index[i] = display_ctrl->ctrl->cell_index;
+		snprintf(dsi_client_name, DSI_CLIENT_NAME_SIZE,
+						"dsi_core_client%u", i);
+		info.c_clks[i].dsi_core_client = sde_power_client_create(
+				info.c_clks[i].phandle, dsi_client_name);
+		if (IS_ERR_OR_NULL(info.c_clks[i].dsi_core_client)) {
+			pr_err("[%s] client creation failed for ctrl[%d]",
+					dsi_client_name, i);
+			goto error_ctrl_deinit;
+		}
 	}
 
 	info.pre_clkoff_cb = dsi_pre_clkoff_cb;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index 89bba96..cfbb14ec 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -29,7 +29,7 @@
 #include "dsi_panel.h"
 
 #define MAX_DSI_CTRLS_PER_DISPLAY             2
-
+#define DSI_CLIENT_NAME_SIZE		20
 /*
  * DSI Validate Mode modifiers
  * @DSI_VALIDATE_FLAG_ALLOW_ADJUST:	Allow mode validation to also do fixup
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index a01383b..a904dcd 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -134,6 +134,10 @@
 	CRTC_PROP_CORE_CLK,
 	CRTC_PROP_CORE_AB,
 	CRTC_PROP_CORE_IB,
+	CRTC_PROP_MEM_AB,
+	CRTC_PROP_MEM_IB,
+	CRTC_PROP_ROT_PREFILL_BW,
+	CRTC_PROP_ROT_CLK,
 
 	/* total # of properties */
 	CRTC_PROP_COUNT
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 9caadca..5d4648e 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -399,9 +399,12 @@
 		/* convert fb val to drm framebuffer and prepare it */
 		c_state->out_fb =
 			drm_framebuffer_lookup(connector->dev, val);
-		if (!c_state->out_fb) {
+		if (!c_state->out_fb && val) {
 			SDE_ERROR("failed to look up fb %lld\n", val);
 			rc = -EFAULT;
+		} else if (!c_state->out_fb && !val) {
+			SDE_DEBUG("cleared fb_id\n");
+			rc = 0;
 		} else {
 			msm_framebuffer_set_kmap(c_state->out_fb,
 					c_conn->fb_kmap);
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
index 307c617..db2c515 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -18,13 +18,13 @@
 #include <linux/sort.h>
 #include <linux/clk.h>
 #include <linux/bitmap.h>
+#include <linux/sde_rsc.h>
 
 #include "msm_prop.h"
 
 #include "sde_kms.h"
 #include "sde_trace.h"
 #include "sde_crtc.h"
-#include "sde_rsc.h"
 #include "sde_core_perf.h"
 
 static struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 44f6169..ec15215 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -365,9 +365,11 @@
 		if (!sbuf_mode) {
 			cstate->sbuf_cfg.rot_op_mode =
 					SDE_CTL_ROT_OP_MODE_OFFLINE;
+			cstate->sbuf_prefill_line = 0;
 		} else {
 			cstate->sbuf_cfg.rot_op_mode =
 					SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
+			cstate->sbuf_prefill_line = prefill;
 		}
 
 		ctl->ops.setup_sbuf_cfg(ctl, &cstate->sbuf_cfg);
@@ -465,12 +467,6 @@
 			sde_connector_prepare_fence(conn);
 		}
 
-	if (cstate->num_connectors > 0 && cstate->connectors[0]->encoder)
-		cstate->intf_mode = sde_encoder_get_intf_mode(
-				cstate->connectors[0]->encoder);
-	else
-		cstate->intf_mode = INTF_MODE_NONE;
-
 	/* prepare main output fence */
 	sde_fence_prepare(&sde_crtc->output_fence);
 }
@@ -512,6 +508,22 @@
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
+enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder;
+
+	if (!crtc || !crtc->dev) {
+		SDE_ERROR("invalid crtc\n");
+		return INTF_MODE_NONE;
+	}
+
+	drm_for_each_encoder(encoder, crtc->dev)
+		if (encoder->crtc == crtc)
+			return sde_encoder_get_intf_mode(encoder);
+
+	return INTF_MODE_NONE;
+}
+
 static void sde_crtc_vblank_cb(void *data)
 {
 	struct drm_crtc *crtc = (struct drm_crtc *)data;
@@ -1024,6 +1036,7 @@
 	struct sde_crtc *sde_crtc;
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
+	struct sde_crtc_state *cstate;
 
 	if (!crtc) {
 		SDE_ERROR("invalid argument\n");
@@ -1033,8 +1046,11 @@
 	sde_crtc = to_sde_crtc(crtc);
 	sde_kms = _sde_crtc_get_kms(crtc);
 	priv = sde_kms->dev->dev_private;
+	cstate = to_sde_crtc_state(crtc->state);
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct sde_encoder_kickoff_params params = { 0 };
+
 		if (encoder->crtc != crtc)
 			continue;
 
@@ -1042,7 +1058,8 @@
 		 * Encoder will flush/start now, unless it has a tx pending.
 		 * If so, it may delay and flush at an irq event (e.g. ppdone)
 		 */
-		sde_encoder_prepare_for_kickoff(encoder);
+		params.inline_rotate_prefill = cstate->sbuf_prefill_line;
+		sde_encoder_prepare_for_kickoff(encoder, &params);
 	}
 
 	if (atomic_read(&sde_crtc->frame_pending) > 2) {
@@ -1715,6 +1732,22 @@
 			"core_ib", 0x0, 0, U64_MAX,
 			SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA,
 			CRTC_PROP_CORE_IB);
+	msm_property_install_range(&sde_crtc->property_info,
+			"mem_ab", 0x0, 0, U64_MAX,
+			SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
+			CRTC_PROP_MEM_AB);
+	msm_property_install_range(&sde_crtc->property_info,
+			"mem_ib", 0x0, 0, U64_MAX,
+			SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
+			CRTC_PROP_MEM_IB);
+	msm_property_install_range(&sde_crtc->property_info,
+			"rot_prefill_bw", 0, 0, U64_MAX,
+			catalog->perf.max_bw_high * 1000ULL,
+			CRTC_PROP_ROT_PREFILL_BW);
+	msm_property_install_range(&sde_crtc->property_info,
+			"rot_clk", 0, 0, U64_MAX,
+			sde_kms->perf.max_core_clk_rate,
+			CRTC_PROP_ROT_CLK);
 
 	msm_property_install_blob(&sde_crtc->property_info, "capabilities",
 		DRM_MODE_PROP_IMMUTABLE, CRTC_PROP_INFO);
@@ -2010,7 +2043,7 @@
 
 	seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
 	seq_printf(s, "client type: %d\n", sde_crtc_get_client_type(crtc));
-	seq_printf(s, "intf_mode: %d\n", cstate->intf_mode);
+	seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc));
 	seq_printf(s, "bw_ctl: %llu\n", cstate->cur_perf.bw_ctl);
 	seq_printf(s, "core_clk_rate: %u\n", cstate->cur_perf.core_clk_rate);
 	seq_printf(s, "max_per_pipe_ib: %llu\n",
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index d288752..0647ff4 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -202,6 +202,7 @@
  * @cur_perf: current performance state
  * @new_perf: new performance state
  * @sbuf_cfg: stream buffer configuration
+ * @sbuf_prefill_line: number of line for inline rotator prefetch
  */
 struct sde_crtc_state {
 	struct drm_crtc_state base;
@@ -221,6 +222,7 @@
 	struct sde_core_perf_params cur_perf;
 	struct sde_core_perf_params new_perf;
 	struct sde_ctl_sbuf_cfg sbuf_cfg;
+	u64 sbuf_prefill_line;
 };
 
 #define to_sde_crtc_state(x) \
@@ -314,16 +316,10 @@
 void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
 
 /**
- * sde_crtc_get_intf_mode - get interface mode of the given crtc
+ * sde_crtc_get_intf_mode - get primary interface mode of the given crtc
  * @crtc: Pointert to crtc
  */
-static inline enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc)
-{
-	struct sde_crtc_state *cstate =
-			crtc ? to_sde_crtc_state(crtc->state) : NULL;
-
-	return cstate ? cstate->intf_mode : INTF_MODE_NONE;
-}
+enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc);
 
 /**
  * sde_crtc_get_client_type - check the crtc type- rt, nrt, rsc, etc.
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 7db44d3..274dc6f 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -19,6 +19,7 @@
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
+#include <linux/sde_rsc.h>
 
 #include "msm_drv.h"
 #include "sde_kms.h"
@@ -32,7 +33,6 @@
 #include "sde_formats.h"
 #include "sde_encoder_phys.h"
 #include "sde_color_processing.h"
-#include "sde_rsc.h"
 #include "sde_power_handle.h"
 #include "sde_hw_dsc.h"
 
@@ -1203,7 +1203,8 @@
 	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
 }
 
-void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
+void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
+		struct sde_encoder_kickoff_params *params)
 {
 	struct sde_encoder_virt *sde_enc;
 	struct sde_encoder_phys *phys;
@@ -1224,7 +1225,7 @@
 		phys = sde_enc->phys_encs[i];
 		if (phys) {
 			if (phys->ops.prepare_for_kickoff)
-				phys->ops.prepare_for_kickoff(phys);
+				phys->ops.prepare_for_kickoff(phys, params);
 			if (phys->enable_state == SDE_ENC_ERR_NEEDS_HW_RESET)
 				needs_hw_reset = true;
 		}
@@ -1856,7 +1857,7 @@
 	sde_enc->rsc_client = sde_rsc_client_create(SDE_RSC_INDEX, name,
 					disp_info->is_primary);
 	if (IS_ERR_OR_NULL(sde_enc->rsc_client)) {
-		SDE_ERROR("sde rsc client create failed :%ld\n",
+		SDE_DEBUG("sde rsc client create failed :%ld\n",
 						PTR_ERR(sde_enc->rsc_client));
 		sde_enc->rsc_client = NULL;
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index bd7ef69..cdecd08 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -45,6 +45,14 @@
 };
 
 /**
+ * sde_encoder_kickoff_params - info encoder requires at kickoff
+ * @inline_rotate_prefill: number of lines to prefill for inline rotation
+ */
+struct sde_encoder_kickoff_params {
+	u32 inline_rotate_prefill;
+};
+
+/**
  * sde_encoder_get_hw_resources - Populate table of required hardware resources
  * @encoder:	encoder pointer
  * @hw_res:	resource table to populate with encoder required resources
@@ -89,8 +97,10 @@
  *	Immediately: if no previous commit is outstanding.
  *	Delayed: Block until next trigger can be issued.
  * @encoder:	encoder pointer
+ * @params:	kickoff time parameters
  */
-void sde_encoder_prepare_for_kickoff(struct drm_encoder *encoder);
+void sde_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
+		struct sde_encoder_kickoff_params *params);
 
 /**
  * sde_encoder_kickoff - trigger a double buffer flip of the ctl path
@@ -116,6 +126,24 @@
 enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder);
 
 /**
+ * enum sde_encoder_property - property tags for sde enoder
+ * @SDE_ENCODER_PROPERTY_INLINE_ROTATE_REFILL: # of prefill line, 0 to disable
+ */
+enum sde_encoder_property {
+	SDE_ENCODER_PROPERTY_INLINE_ROTATE_PREFILL,
+	SDE_ENCODER_PROPERTY_MAX,
+};
+
+/*
+ * sde_encoder_set_property - set the property tag to the given value
+ * @encoder: Pointer to drm encoder object
+ * @tag: property tag
+ * @val: property value
+ * return: 0 if success; errror code otherwise
+ */
+int sde_encoder_set_property(struct drm_encoder *encoder, u32 tag, u64 val);
+
+/**
  * sde_encoder_init - initialize virtual encoder object
  * @dev:        Pointer to drm device structure
  * @disp_info:  Pointer to display information structure
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 6d50c53..b9e802f 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -16,6 +16,7 @@
 #define __SDE_ENCODER_PHYS_H__
 
 #include <linux/jiffies.h>
+#include <linux/sde_rsc.h>
 
 #include "sde_kms.h"
 #include "sde_hw_intf.h"
@@ -27,8 +28,6 @@
 #include "sde_encoder.h"
 #include "sde_connector.h"
 
-#include "sde_rsc.h"
-
 #define SDE_ENCODER_NAME_MAX	16
 
 /* wait for at most 2 vsync for lowest refresh rate (24hz) */
@@ -141,7 +140,8 @@
 			struct drm_connector_state *conn_state);
 	int (*control_vblank_irq)(struct sde_encoder_phys *enc, bool enable);
 	int (*wait_for_commit_done)(struct sde_encoder_phys *phys_enc);
-	void (*prepare_for_kickoff)(struct sde_encoder_phys *phys_enc);
+	void (*prepare_for_kickoff)(struct sde_encoder_phys *phys_enc,
+			struct sde_encoder_kickoff_params *params);
 	void (*handle_post_kickoff)(struct sde_encoder_phys *phys_enc);
 	void (*trigger_start)(struct sde_encoder_phys *phys_enc);
 	bool (*needs_single_flush)(struct sde_encoder_phys *phys_enc);
@@ -238,12 +238,16 @@
  * @irq_idx:	IRQ interface lookup index
  * @irq_cb:	interrupt callback
  * @hw_intf:	Hardware interface to the intf registers
+ * @timing_params: Current timing parameter
+ * @rot_prefill_line: number of line to prefill for inline rotation; 0 disable
  */
 struct sde_encoder_phys_vid {
 	struct sde_encoder_phys base;
 	int irq_idx[INTR_IDX_MAX];
 	struct sde_irq_callback irq_cb[INTR_IDX_MAX];
 	struct sde_hw_intf *hw_intf;
+	struct intf_timing_params timing_params;
+	u64 rot_prefill_line;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index afc21ed..86e292f 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -653,7 +653,8 @@
 }
 
 static void sde_encoder_phys_cmd_prepare_for_kickoff(
-		struct sde_encoder_phys *phys_enc)
+		struct sde_encoder_phys *phys_enc,
+		struct sde_encoder_kickoff_params *params)
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 			to_sde_encoder_phys_cmd(phys_enc);
@@ -687,7 +688,7 @@
 			to_sde_encoder_phys_cmd(phys_enc);
 
 	if (cmd_enc->serialize_wait4pp)
-		sde_encoder_phys_cmd_prepare_for_kickoff(phys_enc);
+		sde_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
 
 	/*
 	 * following statement is true serialize_wait4pp is false.
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 01dd982..82d32dc 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -211,6 +211,54 @@
 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
 }
 
+/*
+ * programmable_rot_fetch_config: Programs ROT to prefetch lines by offsetting
+ *	the start of fetch into the vertical front porch for cases where the
+ *	vsync pulse width and vertical back porch time is insufficient
+ *
+ *	Gets # of lines to pre-fetch, then calculate VSYNC counter value.
+ *	HW layer requires VSYNC counter of first pixel of tgt VFP line.
+ * @phys_enc: Pointer to physical encoder
+ * @rot_fetch_lines: number of line to prefill, or 0 to disable
+ */
+static void programmable_rot_fetch_config(struct sde_encoder_phys *phys_enc,
+		u64 rot_fetch_lines)
+{
+	struct sde_encoder_phys_vid *vid_enc =
+		to_sde_encoder_phys_vid(phys_enc);
+	struct intf_prog_fetch f = { 0 };
+	struct intf_timing_params *timing = &vid_enc->timing_params;
+	u32 vfp_fetch_lines = 0;
+	u32 horiz_total = 0;
+	u32 vert_total = 0;
+	u32 rot_fetch_start_vsync_counter = 0;
+	unsigned long lock_flags;
+
+	if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_rot_start))
+		return;
+
+	vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
+	if (vfp_fetch_lines && rot_fetch_lines) {
+		vert_total = get_vertical_total(timing);
+		horiz_total = get_horizontal_total(timing);
+		if (vert_total >= (vfp_fetch_lines + rot_fetch_lines)) {
+			rot_fetch_start_vsync_counter =
+			    (vert_total - vfp_fetch_lines - rot_fetch_lines) *
+			    horiz_total + 1;
+			f.enable = 1;
+			f.fetch_start = rot_fetch_start_vsync_counter;
+		}
+	}
+
+	SDE_DEBUG_VIDENC(vid_enc,
+		"rot_fetch_lines %llu rot_fetch_start_vsync_counter %u\n",
+		rot_fetch_lines, rot_fetch_start_vsync_counter);
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	vid_enc->hw_intf->ops.setup_rot_start(vid_enc->hw_intf, &f);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+}
+
 static bool sde_encoder_phys_vid_mode_fixup(
 		struct sde_encoder_phys *phys_enc,
 		const struct drm_display_mode *mode,
@@ -281,6 +329,8 @@
 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
 
 	programmable_fetch_config(phys_enc, &timing_params);
+
+	vid_enc->timing_params = timing_params;
 }
 
 static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
@@ -655,14 +705,15 @@
 }
 
 static void sde_encoder_phys_vid_prepare_for_kickoff(
-		struct sde_encoder_phys *phys_enc)
+		struct sde_encoder_phys *phys_enc,
+		struct sde_encoder_kickoff_params *params)
 {
 	struct sde_encoder_phys_vid *vid_enc;
 	struct sde_hw_ctl *ctl;
 	int rc;
 
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
+	if (!phys_enc || !params) {
+		SDE_ERROR("invalid encoder/parameters\n");
 		return;
 	}
 	vid_enc = to_sde_encoder_phys_vid(phys_enc);
@@ -681,6 +732,8 @@
 				ctl->idx, rc);
 		SDE_DBG_DUMP("panic");
 	}
+
+	programmable_rot_fetch_config(phys_enc, params->inline_rotate_prefill);
 }
 
 static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 5187627..28a2b16 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -777,9 +777,11 @@
 /**
  * sde_encoder_phys_wb_prepare_for_kickoff - pre-kickoff processing
  * @phys_enc:	Pointer to physical encoder
+ * @params:	kickoff parameters
  */
 static void sde_encoder_phys_wb_prepare_for_kickoff(
-		struct sde_encoder_phys *phys_enc)
+		struct sde_encoder_phys *phys_enc,
+		struct sde_encoder_kickoff_params *params)
 {
 	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
 	int ret;
@@ -992,7 +994,7 @@
 		goto exit;
 
 	phys_enc->enable_state = SDE_ENC_DISABLING;
-	sde_encoder_phys_wb_prepare_for_kickoff(phys_enc);
+	sde_encoder_phys_wb_prepare_for_kickoff(phys_enc, NULL);
 	if (phys_enc->hw_ctl->ops.trigger_flush)
 		phys_enc->hw_ctl->ops.trigger_flush(phys_enc->hw_ctl);
 	sde_encoder_helper_trigger_start(phys_enc);
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index 2adb4cb..00b6c85 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -779,6 +779,24 @@
 	return _sde_format_get_plane_sizes_linear(fmt, w, h, layout);
 }
 
+int sde_format_get_block_size(const struct sde_format *fmt,
+		uint32_t *w, uint32_t *h)
+{
+	if (!fmt || !w || !h) {
+		DRM_ERROR("invalid pointer\n");
+		return -EINVAL;
+	}
+
+	/* TP10 is 96x96 and all others are 128x128 */
+	if (SDE_FORMAT_IS_YUV(fmt) && SDE_FORMAT_IS_DX(fmt) &&
+			(fmt->num_planes == 2) && fmt->unpack_tight)
+		*w = *h = 96;
+	else
+		*w = *h = 128;
+
+	return 0;
+}
+
 uint32_t sde_format_get_framebuffer_size(
 		const uint32_t format,
 		const uint32_t width,
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.h b/drivers/gpu/drm/msm/sde/sde_formats.h
index 544f436..40aab22 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.h
+++ b/drivers/gpu/drm/msm/sde/sde_formats.h
@@ -73,6 +73,18 @@
 		struct sde_hw_fmt_layout *layout);
 
 /**
+ * sde_format_get_block_size - get block size of given format when
+ *	operating in block mode
+ * @fmt:             pointer to sde_format
+ * @w:               pointer to width of the block
+ * @h:               pointer to height of the block
+ *
+ * Return: 0 if success; error oode otherwise
+ */
+int sde_format_get_block_size(const struct sde_format *fmt,
+		uint32_t *w, uint32_t *h);
+
+/**
  * sde_format_check_modified_format - validate format and buffers for
  *                   sde non-standard, i.e. modified format
  * @kms:             kms driver
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 25222c3..9285487 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -1378,6 +1378,9 @@
 			intf->controller_id = none_count;
 			none_count++;
 		}
+
+		if (sde_cfg->has_sbuf)
+			set_bit(SDE_INTF_ROT_START, &intf->features);
 	}
 
 end:
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 962cd0d..97da08f 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -212,6 +212,16 @@
 };
 
 /**
+ * INTF sub-blocks
+ * @SDE_INTF_ROT_START          INTF supports rotator start trigger
+ * @SDE_INTF_MAX
+ */
+enum {
+	SDE_INTF_ROT_START = 0x1,
+	SDE_INTF_MAX
+};
+
+/**
  * WB sub-blocks and features
  * @SDE_WB_LINE_MODE        Writeback module supports line/linear mode
  * @SDE_WB_BLOCK_MODE       Writeback module supports block mode read
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
index c17844d..d96e49a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
@@ -58,6 +58,7 @@
 #define   INTF_TPG_BLK_WHITE_PATTERN_FRAMES   0x118
 #define   INTF_TPG_RGB_MAPPING          0x11C
 #define   INTF_PROG_FETCH_START         0x170
+#define   INTF_PROG_ROT_START           0x174
 
 #define   INTF_FRAME_LINE_COUNT_EN      0x0A8
 #define   INTF_FRAME_COUNT              0x0AC
@@ -234,6 +235,25 @@
 	SDE_REG_WRITE(c, INTF_CONFIG, fetch_enable);
 }
 
+static void sde_hw_intf_setup_rot_start(
+		struct sde_hw_intf *intf,
+		const struct intf_prog_fetch *fetch)
+{
+	struct sde_hw_blk_reg_map *c = &intf->hw;
+	int fetch_enable;
+
+	fetch_enable = SDE_REG_READ(c, INTF_CONFIG);
+	if (fetch->enable) {
+		fetch_enable |= BIT(19);
+		SDE_REG_WRITE(c, INTF_PROG_ROT_START,
+				fetch->fetch_start);
+	} else {
+		fetch_enable &= ~BIT(19);
+	}
+
+	SDE_REG_WRITE(c, INTF_CONFIG, fetch_enable);
+}
+
 static void sde_hw_intf_get_status(
 		struct sde_hw_intf *intf,
 		struct intf_status *s)
@@ -303,6 +323,8 @@
 	ops->enable_timing = sde_hw_intf_enable_timing_engine;
 	ops->setup_misr = sde_hw_intf_set_misr;
 	ops->collect_misr = sde_hw_intf_collect_misr;
+	if (cap & BIT(SDE_INTF_ROT_START))
+		ops->setup_rot_start = sde_hw_intf_setup_rot_start;
 }
 
 struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.h b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
index f4a01cb..c6428ca 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -74,6 +74,7 @@
  *  Assumption is these functions will be called after clocks are enabled
  * @ setup_timing_gen : programs the timing engine
  * @ setup_prog_fetch : enables/disables the programmable fetch logic
+ * @ setup_rot_start  : enables/disables the rotator start trigger
  * @ enable_timing: enable/disable timing engine
  * @ get_status: returns if timing engine is enabled or not
  * @ setup_misr: enables/disables MISR in HW register
@@ -87,6 +88,9 @@
 	void (*setup_prg_fetch)(struct sde_hw_intf *intf,
 			const struct intf_prog_fetch *fetch);
 
+	void (*setup_rot_start)(struct sde_hw_intf *intf,
+			const struct intf_prog_fetch *fetch);
+
 	void (*enable_timing)(struct sde_hw_intf *intf,
 			u8 enable);
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.c b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
index 049d877..f79dc08 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
@@ -180,10 +180,16 @@
 		break;
 	case DRM_FORMAT_NV12:
 		if (SDE_MODIFIER_IS_UBWC(drm_modifier)) {
-			if (SDE_MODIFIER_IS_10B(drm_modifier))
-				*pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC;
-			else
+			if (SDE_MODIFIER_IS_10B(drm_modifier)) {
+				if (SDE_MODIFIER_IS_TIGHT(drm_modifier))
+					*pixfmt =
+					SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC;
+				else
+					*pixfmt =
+					SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC;
+			} else {
 				*pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_UBWC;
+			}
 		} else if (SDE_MODIFIER_IS_TILE(drm_modifier)) {
 			if (SDE_MODIFIER_IS_10B(drm_modifier)) {
 				if (SDE_MODIFIER_IS_TIGHT(drm_modifier))
@@ -452,6 +458,12 @@
 		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE |
 				DRM_FORMAT_MOD_QCOM_DX;
 		break;
+	case SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC:
+		*drm_pixfmt = DRM_FORMAT_NV12;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
+				DRM_FORMAT_MOD_QCOM_TILE |
+				DRM_FORMAT_MOD_QCOM_DX;
+		break;
 	case SDE_PIX_FMT_Y_CBCR_H2V2_TP10:
 		*drm_pixfmt = DRM_FORMAT_NV12;
 		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE |
@@ -557,6 +569,8 @@
 	rot_cmd.vflip = data->vflip;
 	rot_cmd.secure = data->secure;
 	rot_cmd.clkrate = data->clkrate;
+	rot_cmd.data_bw = 0;
+	rot_cmd.prefill_bw = data->prefill_bw;
 	rot_cmd.src_width = data->src_width;
 	rot_cmd.src_height = data->src_height;
 	rot_cmd.src_rect_x = data->src_rect_x;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.h b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
index 37215bb..949f9bd 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
@@ -49,6 +49,7 @@
  * @secure: true if image content is in secure domain
  * @video_mode: true if rotator is feeding into video interface
  * @clkrate : clock rate in Hz
+ * @prefill_bw: prefill bandwidth in Bps (video mode only)
  * @src_iova: source i/o virtual address
  * @src_len: source i/o buffer length
  * @src_planes: source plane number
@@ -84,6 +85,7 @@
 	bool secure;
 	bool video_mode;
 	u64 clkrate;
+	u64 prefill_bw;
 	dma_addr_t src_iova[4];
 	u32 src_len[4];
 	u32 src_planes;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 3add5e5..ef2c80e 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -929,13 +929,9 @@
 		fbo->dma_buf = NULL;
 	}
 
-	for (i = 0; i < fbo->layout.num_planes; i++) {
-		if (fbo->bo[i]) {
-			mutex_lock(&dev->struct_mutex);
-			drm_gem_object_unreference(fbo->bo[i]);
-			mutex_unlock(&dev->struct_mutex);
-			fbo->bo[i] = NULL;
-		}
+	if (sde_kms->iclient && fbo->ihandle) {
+		ion_free(sde_kms->iclient, fbo->ihandle);
+		fbo->ihandle = NULL;
 	}
 }
 
@@ -994,17 +990,52 @@
 	}
 
 	/* allocate backing buffer object */
-	mutex_lock(&dev->struct_mutex);
-	fbo->bo[0] = msm_gem_new(dev, fbo->layout.total_size,
-			MSM_BO_SCANOUT | MSM_BO_WC);
-	if (IS_ERR(fbo->bo[0])) {
+	if (sde_kms->iclient) {
+		u32 heap_id = fbo->flags & DRM_MODE_FB_SECURE ?
+				ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID) :
+				ION_HEAP(ION_SYSTEM_HEAP_ID);
+
+		fbo->ihandle = ion_alloc(sde_kms->iclient,
+				fbo->layout.total_size, SZ_4K, heap_id, 0);
+		if (IS_ERR_OR_NULL(fbo->ihandle)) {
+			SDE_ERROR("failed to alloc ion memory\n");
+			ret = PTR_ERR(fbo->ihandle);
+			fbo->ihandle = NULL;
+			goto done;
+		}
+
+		fbo->dma_buf = ion_share_dma_buf(sde_kms->iclient,
+				fbo->ihandle);
+		if (IS_ERR(fbo->dma_buf)) {
+			SDE_ERROR("failed to share ion memory\n");
+			ret = -ENOMEM;
+			fbo->dma_buf = NULL;
+			goto done;
+		}
+
+		fbo->bo[0] = dev->driver->gem_prime_import(dev,
+				fbo->dma_buf);
+		if (IS_ERR(fbo->bo[0])) {
+			SDE_ERROR("failed to import ion memory\n");
+			ret = PTR_ERR(fbo->bo[0]);
+			fbo->bo[0] = NULL;
+			goto done;
+		}
+	} else {
+		mutex_lock(&dev->struct_mutex);
+		fbo->bo[0] = msm_gem_new(dev, fbo->layout.total_size,
+				MSM_BO_SCANOUT | MSM_BO_WC);
+		if (IS_ERR(fbo->bo[0])) {
+			mutex_unlock(&dev->struct_mutex);
+			SDE_ERROR("failed to new gem buffer\n");
+			ret = PTR_ERR(fbo->bo[0]);
+			fbo->bo[0] = NULL;
+			goto done;
+		}
 		mutex_unlock(&dev->struct_mutex);
-		SDE_ERROR("failed to new gem buffer\n");
-		ret = PTR_ERR(fbo->bo[0]);
-		fbo->bo[0] = NULL;
-		goto done;
 	}
 
+	mutex_lock(&dev->struct_mutex);
 	for (i = 1; i < fbo->layout.num_planes; i++) {
 		fbo->bo[i] = fbo->bo[0];
 		drm_gem_object_reference(fbo->bo[i]);
@@ -1110,6 +1141,11 @@
 	_sde_debugfs_destroy(sde_kms);
 	_sde_kms_mmu_destroy(sde_kms);
 
+	if (sde_kms->iclient) {
+		ion_client_destroy(sde_kms->iclient);
+		sde_kms->iclient = NULL;
+	}
+
 	if (sde_kms->catalog) {
 		for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
 			u32 vbif_idx = sde_kms->catalog->vbif[i].id;
@@ -1462,6 +1498,13 @@
 		}
 	}
 
+	sde_kms->iclient = msm_ion_client_create(dev->unique);
+	if (IS_ERR(sde_kms->iclient)) {
+		rc = PTR_ERR(sde_kms->iclient);
+		SDE_DEBUG("msm_ion_client not available: %d\n", rc);
+		sde_kms->iclient = NULL;
+	}
+
 	/*
 	 * Now we need to read the HW catalog and initialize resources such as
 	 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 83edde3..ebc277e 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -19,6 +19,8 @@
 #ifndef __SDE_KMS_H__
 #define __SDE_KMS_H__
 
+#include <linux/msm_ion.h>
+
 #include "msm_drv.h"
 #include "msm_kms.h"
 #include "msm_mmu.h"
@@ -140,6 +142,7 @@
 	int nplane;
 	const struct sde_format *fmt;
 	struct sde_hw_fmt_layout layout;
+	struct ion_handle *ihandle;
 	struct dma_buf *dma_buf;
 	struct drm_gem_object *bo[4];
 	struct list_head fb_list;
@@ -155,6 +158,8 @@
 	int mmu_id[MSM_SMMU_DOMAIN_MAX];
 	struct sde_power_client *core_client;
 
+	struct ion_client *iclient;
+
 	/* directory entry for debugfs */
 	struct dentry *debugfs_danger;
 	struct dentry *debugfs_vbif;
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index fc0bce6..0be17e4 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -38,6 +38,14 @@
 #include "sde_color_processing.h"
 #include "sde_hw_rot.h"
 
+static bool suspend_blank = true;
+module_param(suspend_blank, bool, 0400);
+MODULE_PARM_DESC(suspend_blank,
+		"If set, active planes will force their outputs to black,\n"
+		"by temporarily enabling the color fill, when recovering\n"
+		"from a system resume instead of attempting to display the\n"
+		"last provided frame buffer.");
+
 #define SDE_DEBUG_PLANE(pl, fmt, ...) SDE_DEBUG("plane%d " fmt,\
 		(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
 
@@ -165,6 +173,27 @@
 	return to_sde_kms(priv->kms);
 }
 
+/**
+ * _sde_plane_get_crtc_state - obtain crtc state attached to given plane state
+ * @pstate: Pointer to drm plane state
+ * return: Pointer to crtc state if success; pointer error, otherwise
+ */
+static struct drm_crtc_state *_sde_plane_get_crtc_state(
+		struct drm_plane_state *pstate)
+{
+	struct drm_crtc_state *cstate;
+
+	if (!pstate || !pstate->crtc)
+		return NULL;
+
+	if (pstate->state)
+		cstate = drm_atomic_get_crtc_state(pstate->state, pstate->crtc);
+	else
+		cstate = pstate->crtc->state;
+
+	return cstate;
+}
+
 static bool sde_plane_enabled(struct drm_plane_state *state)
 {
 	return state && state->fb && state->crtc;
@@ -740,8 +769,6 @@
 				psde->pipe_cfg.horz_decimation);
 		scale_cfg->src_height[i] = DECIMATED_DIMENSION(src_h,
 				psde->pipe_cfg.vert_decimation);
-		if (SDE_FORMAT_IS_YUV(fmt))
-			scale_cfg->src_width[i] &= ~0x1;
 		if (i == SDE_SSPP_COMP_1_2 || i == SDE_SSPP_COMP_2) {
 			scale_cfg->src_width[i] /= chroma_subsmpl_h;
 			scale_cfg->src_height[i] /= chroma_subsmpl_v;
@@ -1172,6 +1199,7 @@
 		psde->pipe_cfg.src_rect.y = 0;
 		psde->pipe_cfg.src_rect.w = psde->pipe_cfg.dst_rect.w;
 		psde->pipe_cfg.src_rect.h = psde->pipe_cfg.dst_rect.h;
+		_sde_plane_setup_scaler(psde, fmt, 0);
 
 		if (psde->pipe_hw->ops.setup_format)
 			psde->pipe_hw->ops.setup_format(psde->pipe_hw,
@@ -1183,7 +1211,6 @@
 					&psde->pipe_cfg,
 					pstate->multirect_index);
 
-		_sde_plane_setup_scaler(psde, fmt, 0);
 		if (psde->pipe_hw->ops.setup_pe)
 			psde->pipe_hw->ops.setup_pe(psde->pipe_hw,
 					&psde->pixel_ext);
@@ -1193,7 +1220,7 @@
 }
 
 /**
- * sde_plane_rot_calc_perfill - calculate rotator start prefill
+ * sde_plane_rot_calc_prefill - calculate rotator start prefill
  * @plane: Pointer to drm plane
  * return: prefill time in line
  */
@@ -1225,8 +1252,8 @@
 		return 0;
 	}
 
-	/* if rstate->out_fb_format is TP10, then block size is 96 */
-
+	sde_format_get_block_size(rstate->out_fb_format, &blocksize,
+			&blocksize);
 	prefill_line = blocksize + sde_kms->catalog->sbuf_headroom;
 
 	SDE_DEBUG("plane%d prefill:%u\n", plane->base.id, prefill_line);
@@ -1442,6 +1469,8 @@
 	struct sde_plane_state *pstate = to_sde_plane_state(state);
 	struct sde_plane_rot_state *rstate = &pstate->rot;
 	struct sde_hw_rot_cmd *rot_cmd;
+	struct drm_crtc_state *cstate;
+	struct sde_crtc_state *sde_cstate;
 	int ret, i;
 
 	if (!plane || !state || !state->fb || !rstate->rot_hw) {
@@ -1449,6 +1478,13 @@
 		return -EINVAL;
 	}
 
+	cstate = _sde_plane_get_crtc_state(state);
+	if (IS_ERR_OR_NULL(cstate)) {
+		SDE_ERROR("invalid crtc state %ld\n", PTR_ERR(cstate));
+		return -EINVAL;
+	}
+	sde_cstate = to_sde_crtc_state(cstate);
+
 	rot_cmd = &rstate->rot_cmd;
 
 	rot_cmd->master = (rstate->out_xpos == 0);
@@ -1460,6 +1496,10 @@
 	rot_cmd->hflip = rstate->hflip;
 	rot_cmd->vflip = rstate->vflip;
 	rot_cmd->secure = state->fb->flags & DRM_MODE_FB_SECURE ? true : false;
+	rot_cmd->prefill_bw = sde_crtc_get_property(sde_cstate,
+			CRTC_PROP_ROT_PREFILL_BW);
+	rot_cmd->clkrate = sde_crtc_get_property(sde_cstate,
+			CRTC_PROP_ROT_CLK);
 	rot_cmd->dst_writeback = psde->sbuf_writeback;
 
 	if (sde_crtc_get_intf_mode(state->crtc) == INTF_MODE_VIDEO)
@@ -2254,6 +2294,8 @@
 		psde->pipe_cfg.src_rect = src;
 		psde->pipe_cfg.dst_rect = dst;
 
+		_sde_plane_setup_scaler(psde, fmt, pstate);
+
 		/* check for color fill */
 		psde->color_fill = (uint32_t)sde_plane_get_property(pstate,
 				PLANE_PROP_COLOR_FILL);
@@ -2266,7 +2308,6 @@
 					pstate->multirect_index);
 		}
 
-		_sde_plane_setup_scaler(psde, fmt, pstate);
 		if (psde->pipe_hw->ops.setup_pe)
 			psde->pipe_hw->ops.setup_pe(psde->pipe_hw,
 					&psde->pixel_ext);
@@ -2863,6 +2904,10 @@
 	else if (psde->pipe_hw && psde->csc_ptr && psde->pipe_hw->ops.setup_csc)
 		psde->pipe_hw->ops.setup_csc(psde->pipe_hw, psde->csc_ptr);
 
+	/* force black color fill during suspend */
+	if (msm_is_suspend_state(plane->dev) && suspend_blank)
+		_sde_plane_color_fill(psde, 0x0, 0x0);
+
 	/* flag h/w flush complete */
 	if (plane->state)
 		to_sde_plane_state(plane->state)->pending = false;
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index 4c172a4..4e262a3 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -21,6 +21,8 @@
 #define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	64000
 #define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
 
+#include <linux/sde_io_util.h>
+
 /**
  * mdss_bus_vote_type: register bus vote type
  * VOTE_INDEX_DISABLE: removes the client vote
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index 2464551..a9a7d4f 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -27,7 +27,7 @@
 #include <soc/qcom/rpmh.h>
 #include <drm/drmP.h>
 #include <drm/drm_irq.h>
-#include "sde_rsc.h"
+#include "sde_rsc_priv.h"
 
 /* this time is ~0.02ms */
 #define RSC_BACKOFF_TIME_NS		 20000
@@ -105,6 +105,7 @@
 
 	return client;
 }
+EXPORT_SYMBOL(sde_rsc_client_create);
 
 /**
  * sde_rsc_client_destroy() - Destroy the sde rsc client.
@@ -141,6 +142,7 @@
 end:
 	return;
 }
+EXPORT_SYMBOL(sde_rsc_client_destroy);
 
 struct sde_rsc_event *sde_rsc_register_event(int rsc_index, uint32_t event_type,
 		void (*cb_func)(uint32_t event_type, void *usr), void *usr)
@@ -178,6 +180,7 @@
 
 	return evt;
 }
+EXPORT_SYMBOL(sde_rsc_register_event);
 
 void sde_rsc_unregister_event(struct sde_rsc_event *event)
 {
@@ -204,6 +207,7 @@
 end:
 	return;
 }
+EXPORT_SYMBOL(sde_rsc_unregister_event);
 
 static int sde_rsc_clk_enable(struct sde_power_handle *phandle,
 	struct sde_power_client *pclient, bool enable)
@@ -583,6 +587,7 @@
 	mutex_unlock(&rsc->client_lock);
 	return rc;
 }
+EXPORT_SYMBOL(sde_rsc_client_state_update);
 
 /**
  * sde_rsc_client_vote() - ab/ib vote from rsc client
@@ -661,6 +666,7 @@
 	mutex_unlock(&rsc->client_lock);
 	return rc;
 }
+EXPORT_SYMBOL(sde_rsc_client_vote);
 
 static int _sde_debugfs_status_show(struct seq_file *s, void *data)
 {
diff --git a/drivers/gpu/drm/msm/sde_rsc.h b/drivers/gpu/drm/msm/sde_rsc.h
deleted file mode 100644
index 2775d21..0000000
--- a/drivers/gpu/drm/msm/sde_rsc.h
+++ /dev/null
@@ -1,368 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _SDE_RSC_H_
-#define _SDE_RSC_H_
-
-#include <linux/kernel.h>
-#include <linux/sde_io_util.h>
-
-#include <soc/qcom/tcs.h>
-#include "sde_power_handle.h"
-
-#define SDE_RSC_COMPATIBLE "disp_rscc"
-
-#define MAX_RSC_CLIENT_NAME_LEN 128
-
-/* primary display rsc index */
-#define SDE_RSC_INDEX		0
-
-/* rsc index max count */
-#define MAX_RSC_COUNT		5
-
-struct sde_rsc_priv;
-
-/**
- * event will be triggered before sde core power collapse,
- * mdss gdsc is still on
- */
-#define SDE_RSC_EVENT_PRE_CORE_PC 0x1
-/**
- * event will be triggered after sde core collapse complete,
- * mdss gdsc is off now
- */
-#define SDE_RSC_EVENT_POST_CORE_PC 0x2
-/**
- * event will be triggered before restoring the sde core from power collapse,
- * mdss gdsc is still off
- */
-#define SDE_RSC_EVENT_PRE_CORE_RESTORE 0x4
-/**
- * event will be triggered after restoring the sde core from power collapse,
- * mdss gdsc is on now
- */
-#define SDE_RSC_EVENT_POST_CORE_RESTORE 0x8
-/**
- * event attached with solver state enabled
- * all clients in clk_state or cmd_state
- */
-#define SDE_RSC_EVENT_SOLVER_ENABLED 0x10
-/**
- * event attached with solver state disabled
- * one of the client requested for vid state
- */
-#define SDE_RSC_EVENT_SOLVER_DISABLED 0x20
-
-/**
- * rsc_mode_req: sde rsc mode request information
- * MODE_READ: read vsync status
- * MODE0_UPDATE: mode0 status , this should be 0x0
- * MODE1_UPDATE: mode1 status , this should be 0x1
- * MODE2_UPDATE: mode2 status , this should be 0x2
- */
-enum rsc_mode_req {
-	MODE_READ,
-	MODE0_UPDATE = 0x1,
-	MODE1_UPDATE = 0x2,
-	MODE2_UPDATE = 0x3,
-};
-
-/**
- * rsc_vsync_req: sde rsc vsync request information
- * VSYNC_READ: read vsync status
- * VSYNC_ENABLE: enable rsc wrapper vsync status
- * VSYNC_DISABLE: disable rsc wrapper vsync status
- */
-enum rsc_vsync_req {
-	VSYNC_READ,
-	VSYNC_ENABLE,
-	VSYNC_DISABLE,
-};
-
-/**
- * sde_rsc_state: sde rsc state information
- * SDE_RSC_IDLE_STATE: A client requests for idle state when there is no
- *                    pixel or cmd transfer expected. An idle vote from
- *                    all clients lead to power collapse state.
- * SDE_RSC_CLK_STATE:  A client requests for clk state when it wants to
- *                    only avoid mode-2 entry/exit. For ex: V4L2 driver,
- *                    sde power handle, etc.
- * SDE_RSC_CMD_STATE:  A client requests for cmd state when it wants to
- *                    enable the solver mode.
- * SDE_RSC_VID_STATE:  A client requests for vid state it wants to avoid
- *                    solver enable because client is fetching data from
- *                    continuously.
- */
-enum sde_rsc_state {
-	SDE_RSC_IDLE_STATE,
-	SDE_RSC_CLK_STATE,
-	SDE_RSC_CMD_STATE,
-	SDE_RSC_VID_STATE,
-};
-
-/**
- * struct sde_rsc_client: stores the rsc client for sde driver
- * @name:	name of the client
- * @current_state:   current client state
- * @crtc_id:		crtc_id associated with this rsc client.
- * @rsc_index:	rsc index of a client - only index "0" valid.
- * @list:	list to attach client master list
- */
-struct sde_rsc_client {
-	char name[MAX_RSC_CLIENT_NAME_LEN];
-	short current_state;
-	int crtc_id;
-	u32 rsc_index;
-	struct list_head list;
-};
-
-/**
- * struct sde_rsc_event: local event registration entry structure
- * @cb_func:	Pointer to desired callback function
- * @usr:	User pointer to pass to callback on event trigger
- * @rsc_index:	rsc index of a client - only index "0" valid.
- * @event_type:	refer comments in event_register
- * @list:	list to attach event master list
- */
-struct sde_rsc_event {
-	void (*cb_func)(uint32_t event_type, void *usr);
-	void *usr;
-	u32 rsc_index;
-	uint32_t event_type;
-	struct list_head list;
-};
-
-/**
- * struct sde_rsc_hw_ops - sde resource state coordinator hardware ops
- * @init:			Initialize the sequencer, solver, qtimer,
-				etc. hardware blocks on RSC.
- * @tcs_wait:			Waits for TCS block OK to allow sending a
- *				TCS command.
- * @hw_vsync:			Enables the vsync on RSC block.
- * @tcs_use_ok:			set TCS set to high to allow RSC to use it.
- * @mode2_entry:		Request to entry mode2 when all clients are
- *                              requesting power collapse.
- * @mode2_exit:			Request to exit mode2 when one of the client
- *                              is requesting against the power collapse
- * @is_amc_mode:		Check current amc mode status
- * @state_update:		Enable/override the solver based on rsc state
- *                              status (command/video)
- * @mode_show:			shows current mode status, mode0/1/2
- * @debug_show:			Show current debug status.
- */
-
-struct sde_rsc_hw_ops {
-	int (*init)(struct sde_rsc_priv *rsc);
-	int (*tcs_wait)(struct sde_rsc_priv *rsc);
-	int (*hw_vsync)(struct sde_rsc_priv *rsc, enum rsc_vsync_req request,
-		char *buffer, int buffer_size, u32 mode);
-	int (*tcs_use_ok)(struct sde_rsc_priv *rsc);
-	int (*mode2_entry)(struct sde_rsc_priv *rsc);
-	int (*mode2_exit)(struct sde_rsc_priv *rsc);
-	bool (*is_amc_mode)(struct sde_rsc_priv *rsc);
-	int (*state_update)(struct sde_rsc_priv *rsc, enum sde_rsc_state state);
-	int (*debug_show)(struct seq_file *s, struct sde_rsc_priv *rsc);
-	int (*mode_ctrl)(struct sde_rsc_priv *rsc, enum rsc_mode_req request,
-		char *buffer, int buffer_size, bool mode);
-};
-
-/**
- * struct sde_rsc_cmd_config: provides panel configuration to rsc
- * when client is command mode. It is not required to set it during
- * video mode.
- *
- * @fps:	panel te interval
- * @vtotal:	current vertical total (height + vbp + vfp)
- * @jitter:	panel can set the jitter to wake up rsc/solver early
- *              This value causes mdp core to exit certain mode
- *              early. Default is 10% jitter
- * @prefill_lines:	max prefill lines based on panel
- */
-struct sde_rsc_cmd_config {
-	u32 fps;
-	u32 vtotal;
-	u32 jitter;
-	u32 prefill_lines;
-};
-
-/**
- * struct sde_rsc_timer_config: this is internal configuration between
- * rsc and rsc_hw API.
- *
- * @static_wakeup_time_ns:	wrapper backoff time in nano seconds
- * @rsc_backoff_time_ns:	rsc backoff time in nano seconds
- * @pdc_backoff_time_ns:	pdc backoff time in nano seconds
- * @rsc_mode_threshold_time_ns:	rsc mode threshold time in nano seconds
- * @rsc_time_slot_0_ns:		mode-0 time slot threshold in nano seconds
- * @rsc_time_slot_1_ns:		mode-1 time slot threshold in nano seconds
- * @rsc_time_slot_2_ns:		mode-2 time slot threshold in nano seconds
- */
-struct sde_rsc_timer_config {
-	u32 static_wakeup_time_ns;
-
-	u32 rsc_backoff_time_ns;
-	u32 pdc_backoff_time_ns;
-	u32 rsc_mode_threshold_time_ns;
-	u32 rsc_time_slot_0_ns;
-	u32 rsc_time_slot_1_ns;
-	u32 rsc_time_slot_2_ns;
-};
-
-/**
- * struct sde_rsc_priv: sde resource state coordinator(rsc) private handle
- * @version:		rsc sequence version
- * @phandle:		module power handle for clocks
- * @pclient:		module power client of phandle
- * @fs:			"MDSS GDSC" handle
- *
- * @drv_io:		sde drv io data mapping
- * @wrapper_io:		wrapper io data mapping
- *
- * @client_list:	current rsc client list handle
- * @event_list:		current rsc event list handle
- * @client_lock:	current rsc client synchronization lock
- *
- * timer_config:	current rsc timer configuration
- * cmd_config:		current panel config
- * current_state:	current rsc state (video/command), solver
- *                      override/enabled.
- * debug_mode:		enables the logging for each register read/write
- * debugfs_root:	debugfs file system root node
- *
- * hw_ops:		sde rsc hardware operations
- * power_collapse:	if all clients are in IDLE state then it enters in
- *			mode2 state and enable the power collapse state
- * power_collapse_block:By default, rsc move to mode-2 if all clients are in
- *			invalid state. It can be blocked by this boolean entry.
- * primary_client:	A client which is allowed to make command state request
- *			and ab/ib vote on display rsc
- * master_drm:		Primary client waits for vsync on this drm object based
- *			on crtc id
- */
-struct sde_rsc_priv {
-	u32 version;
-	struct sde_power_handle phandle;
-	struct sde_power_client *pclient;
-	struct regulator *fs;
-
-	struct dss_io_data drv_io;
-	struct dss_io_data wrapper_io;
-
-	struct list_head client_list;
-	struct list_head event_list;
-	struct mutex client_lock;
-
-	struct sde_rsc_timer_config timer_config;
-	struct sde_rsc_cmd_config cmd_config;
-	u32	current_state;
-
-	u32 debug_mode;
-	struct dentry *debugfs_root;
-
-	struct sde_rsc_hw_ops hw_ops;
-	bool power_collapse;
-	bool power_collapse_block;
-	struct sde_rsc_client *primary_client;
-
-	struct drm_device *master_drm;
-};
-
-/**
- * sde_rsc_client_create() - create the client for sde rsc.
- * Different displays like DSI, HDMI, DP, WB, etc should call this
- * api to register their vote for rpmh. They still need to vote for
- * power handle to get the clocks.
-
- * @rsc_index:   A client will be created on this RSC. As of now only
- *               SDE_RSC_INDEX is valid rsc index.
- * @name:	 Caller needs to provide some valid string to identify
- *               the client. "primary", "dp", "hdmi" are suggested name.
- * @is_primary:	 Caller needs to provide information if client is primary
- *               or not. Primary client votes will be redirected to
- *               display rsc.
- * @config:	 fps, vtotal, porches, etc configuration for command mode
- *               panel
- *
- * Return: client node pointer.
- */
-struct sde_rsc_client *sde_rsc_client_create(u32 rsc_index, char *name,
-		bool is_primary_display);
-
-/**
- * sde_rsc_client_destroy() - Destroy the sde rsc client.
- *
- * @client:	 Client pointer provided by sde_rsc_client_create().
- *
- * Return: none
- */
-void sde_rsc_client_destroy(struct sde_rsc_client *client);
-
-/**
- * sde_rsc_client_state_update() - rsc client state update
- * Video mode, cmd mode and clk state are supported as modes. A client need to
- * set this property during panel time. A switching client can set the
- * property to change the state
- *
- * @client:	 Client pointer provided by sde_rsc_client_create().
- * @state:	 Client state - video/cmd
- * @config:	 fps, vtotal, porches, etc configuration for command mode
- *               panel
- * @crtc_id:	 current client's crtc id
- *
- * Return: error code.
- */
-int sde_rsc_client_state_update(struct sde_rsc_client *client,
-	enum sde_rsc_state state,
-	struct sde_rsc_cmd_config *config, int crtc_id);
-
-/**
- * sde_rsc_client_vote() - ab/ib vote from rsc client
- *
- * @client:	 Client pointer provided by sde_rsc_client_create().
- * @ab:		 aggregated bandwidth vote from client.
- * @ib:		 instant bandwidth vote from client.
- *
- * Return: error code.
- */
-int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
-	u64 ab_vote, u64 ib_vote);
-
-/**
- * sde_rsc_hw_register() - register hardware API
- *
- * @client:	 Client pointer provided by sde_rsc_client_create().
- *
- * Return: error code.
- */
-int sde_rsc_hw_register(struct sde_rsc_priv *rsc);
-
-/**
- * sde_rsc_register_event - register a callback function for an event
- * @rsc_index:   A client will be created on this RSC. As of now only
- *               SDE_RSC_INDEX is valid rsc index.
- * @event_type:  event type to register; client sets 0x3 if it wants
- *               to register for CORE_PC and CORE_RESTORE - both events.
- * @cb_func:     Pointer to desired callback function
- * @usr:         User pointer to pass to callback on event trigger
- * Returns: sde_rsc_event pointer on success
- */
-struct sde_rsc_event *sde_rsc_register_event(int rsc_index, uint32_t event_type,
-		void (*cb_func)(uint32_t event_type, void *usr), void *usr);
-
-/**
- * sde_rsc_unregister_event - unregister callback for an event
- * @sde_rsc_event: event returned by sde_rsc_register_event
- */
-void sde_rsc_unregister_event(struct sde_rsc_event *event);
-
-#endif /* _SDE_RSC_H_ */
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index dd7f37a..fb963ee 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -17,7 +17,7 @@
 #include <linux/debugfs.h>
 #include <linux/delay.h>
 
-#include "sde_rsc.h"
+#include "sde_rsc_priv.h"
 
 /* display rsc offset */
 #define SDE_RSCC_PDC_SEQ_START_ADDR_REG_OFFSET_DRV0	0x020
diff --git a/drivers/gpu/drm/msm/sde_rsc_priv.h b/drivers/gpu/drm/msm/sde_rsc_priv.h
new file mode 100644
index 0000000..2563c85
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_rsc_priv.h
@@ -0,0 +1,181 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_RSC_PRIV_H_
+#define _SDE_RSC_PRIV_H_
+
+#include <linux/kernel.h>
+#include <linux/sde_io_util.h>
+#include <linux/sde_rsc.h>
+
+#include <soc/qcom/tcs.h>
+#include "sde_power_handle.h"
+
+#define SDE_RSC_COMPATIBLE "disp_rscc"
+
+#define MAX_RSC_COUNT		5
+
+struct sde_rsc_priv;
+
+/**
+ * rsc_mode_req: sde rsc mode request information
+ * MODE_READ: read vsync status
+ * MODE0_UPDATE: mode0 status , this should be 0x0
+ * MODE1_UPDATE: mode1 status , this should be 0x1
+ * MODE2_UPDATE: mode2 status , this should be 0x2
+ */
+enum rsc_mode_req {
+	MODE_READ,
+	MODE0_UPDATE = 0x1,
+	MODE1_UPDATE = 0x2,
+	MODE2_UPDATE = 0x3,
+};
+
+/**
+ * rsc_vsync_req: sde rsc vsync request information
+ * VSYNC_READ: read vsync status
+ * VSYNC_ENABLE: enable rsc wrapper vsync status
+ * VSYNC_DISABLE: disable rsc wrapper vsync status
+ */
+enum rsc_vsync_req {
+	VSYNC_READ,
+	VSYNC_ENABLE,
+	VSYNC_DISABLE,
+};
+
+/**
+ * struct sde_rsc_hw_ops - sde resource state coordinator hardware ops
+ * @init:			Initialize the sequencer, solver, qtimer,
+				etc. hardware blocks on RSC.
+ * @tcs_wait:			Waits for TCS block OK to allow sending a
+ *				TCS command.
+ * @hw_vsync:			Enables the vsync on RSC block.
+ * @tcs_use_ok:			set TCS set to high to allow RSC to use it.
+ * @mode2_entry:		Request to entry mode2 when all clients are
+ *                              requesting power collapse.
+ * @mode2_exit:			Request to exit mode2 when one of the client
+ *                              is requesting against the power collapse
+ * @is_amc_mode:		Check current amc mode status
+ * @state_update:		Enable/override the solver based on rsc state
+ *                              status (command/video)
+ * @mode_show:			shows current mode status, mode0/1/2
+ * @debug_show:			Show current debug status.
+ */
+
+struct sde_rsc_hw_ops {
+	int (*init)(struct sde_rsc_priv *rsc);
+	int (*tcs_wait)(struct sde_rsc_priv *rsc);
+	int (*hw_vsync)(struct sde_rsc_priv *rsc, enum rsc_vsync_req request,
+		char *buffer, int buffer_size, u32 mode);
+	int (*tcs_use_ok)(struct sde_rsc_priv *rsc);
+	int (*mode2_entry)(struct sde_rsc_priv *rsc);
+	int (*mode2_exit)(struct sde_rsc_priv *rsc);
+	bool (*is_amc_mode)(struct sde_rsc_priv *rsc);
+	int (*state_update)(struct sde_rsc_priv *rsc, enum sde_rsc_state state);
+	int (*debug_show)(struct seq_file *s, struct sde_rsc_priv *rsc);
+	int (*mode_ctrl)(struct sde_rsc_priv *rsc, enum rsc_mode_req request,
+		char *buffer, int buffer_size, bool mode);
+};
+
+/**
+ * struct sde_rsc_timer_config: this is internal configuration between
+ * rsc and rsc_hw API.
+ *
+ * @static_wakeup_time_ns:	wrapper backoff time in nano seconds
+ * @rsc_backoff_time_ns:	rsc backoff time in nano seconds
+ * @pdc_backoff_time_ns:	pdc backoff time in nano seconds
+ * @rsc_mode_threshold_time_ns:	rsc mode threshold time in nano seconds
+ * @rsc_time_slot_0_ns:		mode-0 time slot threshold in nano seconds
+ * @rsc_time_slot_1_ns:		mode-1 time slot threshold in nano seconds
+ * @rsc_time_slot_2_ns:		mode-2 time slot threshold in nano seconds
+ */
+struct sde_rsc_timer_config {
+	u32 static_wakeup_time_ns;
+
+	u32 rsc_backoff_time_ns;
+	u32 pdc_backoff_time_ns;
+	u32 rsc_mode_threshold_time_ns;
+	u32 rsc_time_slot_0_ns;
+	u32 rsc_time_slot_1_ns;
+	u32 rsc_time_slot_2_ns;
+};
+
+/**
+ * struct sde_rsc_priv: sde resource state coordinator(rsc) private handle
+ * @version:		rsc sequence version
+ * @phandle:		module power handle for clocks
+ * @pclient:		module power client of phandle
+ * @fs:			"MDSS GDSC" handle
+ *
+ * @drv_io:		sde drv io data mapping
+ * @wrapper_io:		wrapper io data mapping
+ *
+ * @client_list:	current rsc client list handle
+ * @event_list:		current rsc event list handle
+ * @client_lock:	current rsc client synchronization lock
+ *
+ * timer_config:	current rsc timer configuration
+ * cmd_config:		current panel config
+ * current_state:	current rsc state (video/command), solver
+ *                      override/enabled.
+ * debug_mode:		enables the logging for each register read/write
+ * debugfs_root:	debugfs file system root node
+ *
+ * hw_ops:		sde rsc hardware operations
+ * power_collapse:	if all clients are in IDLE state then it enters in
+ *			mode2 state and enable the power collapse state
+ * power_collapse_block:By default, rsc move to mode-2 if all clients are in
+ *			invalid state. It can be blocked by this boolean entry.
+ * primary_client:	A client which is allowed to make command state request
+ *			and ab/ib vote on display rsc
+ * master_drm:		Primary client waits for vsync on this drm object based
+ *			on crtc id
+ */
+struct sde_rsc_priv {
+	u32 version;
+	struct sde_power_handle phandle;
+	struct sde_power_client *pclient;
+	struct regulator *fs;
+
+	struct dss_io_data drv_io;
+	struct dss_io_data wrapper_io;
+
+	struct list_head client_list;
+	struct list_head event_list;
+	struct mutex client_lock;
+
+	struct sde_rsc_timer_config timer_config;
+	struct sde_rsc_cmd_config cmd_config;
+	u32	current_state;
+
+	u32 debug_mode;
+	struct dentry *debugfs_root;
+
+	struct sde_rsc_hw_ops hw_ops;
+	bool power_collapse;
+	bool power_collapse_block;
+	struct sde_rsc_client *primary_client;
+
+	struct drm_device *master_drm;
+};
+
+/**
+ * sde_rsc_hw_register() - register hardware API
+ *
+ * @client:	 Client pointer provided by sde_rsc_client_create().
+ *
+ * Return: error code.
+ */
+int sde_rsc_hw_register(struct sde_rsc_priv *rsc);
+
+#endif /* _SDE_RSC_PRIV_H_ */
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 8703f56..246d1ae 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -61,21 +61,24 @@
 		if (ret < 0)
 			return ret;
 		args->value = V3D_READ(V3D_IDENT0);
-		pm_runtime_put(&vc4->v3d->pdev->dev);
+		pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+		pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
 		break;
 	case DRM_VC4_PARAM_V3D_IDENT1:
 		ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
 		if (ret < 0)
 			return ret;
 		args->value = V3D_READ(V3D_IDENT1);
-		pm_runtime_put(&vc4->v3d->pdev->dev);
+		pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+		pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
 		break;
 	case DRM_VC4_PARAM_V3D_IDENT2:
 		ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
 		if (ret < 0)
 			return ret;
 		args->value = V3D_READ(V3D_IDENT2);
-		pm_runtime_put(&vc4->v3d->pdev->dev);
+		pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+		pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
 		break;
 	case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
 		args->value = true;
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 18e3717..ab30169 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -711,8 +711,10 @@
 	}
 
 	mutex_lock(&vc4->power_lock);
-	if (--vc4->power_refcount == 0)
-		pm_runtime_put(&vc4->v3d->pdev->dev);
+	if (--vc4->power_refcount == 0) {
+		pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+		pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+	}
 	mutex_unlock(&vc4->power_lock);
 
 	kfree(exec);
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index e6d3c60..7cc346a 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -222,6 +222,8 @@
 		return ret;
 	}
 
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */
 	pm_runtime_enable(dev);
 
 	return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 2543cf5..917321c 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -608,9 +608,7 @@
 vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
 {
 	uint32_t max_branch_target = 0;
-	bool found_shader_end = false;
 	int ip;
-	int shader_end_ip = 0;
 	int last_branch = -2;
 
 	for (ip = 0; ip < validation_state->max_ip; ip++) {
@@ -621,8 +619,13 @@
 		uint32_t branch_target_ip;
 
 		if (sig == QPU_SIG_PROG_END) {
-			shader_end_ip = ip;
-			found_shader_end = true;
+			/* There are two delay slots after program end is
+			 * signaled that are still executed, then we're
+			 * finished.  validation_state->max_ip is the
+			 * instruction after the last valid instruction in the
+			 * program.
+			 */
+			validation_state->max_ip = ip + 3;
 			continue;
 		}
 
@@ -676,15 +679,9 @@
 		}
 		set_bit(after_delay_ip, validation_state->branch_targets);
 		max_branch_target = max(max_branch_target, after_delay_ip);
-
-		/* There are two delay slots after program end is signaled
-		 * that are still executed, then we're finished.
-		 */
-		if (found_shader_end && ip == shader_end_ip + 2)
-			break;
 	}
 
-	if (max_branch_target > shader_end_ip) {
+	if (max_branch_target > validation_state->max_ip - 3) {
 		DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
 		return false;
 	}
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index f50dae9..7cdd2b2 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -676,7 +676,7 @@
 #define A6XX_GMU_HOST_INTERRUPT_CLR		0x23B04
 #define A6XX_GMU_HOST_INTERRUPT_STATUS		0x23B05
 #define A6XX_GMU_HOST_INTERRUPT_MASK		0x23B06
-#define A6XX_GMU_GPU_CX_BUSY_STATUS		0x23B0C
+#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS	0x23B0C
 #define A6XX_GMU_AHB_FENCE_STATUS		0x23B13
 #define A6XX_GMU_RBBM_INT_UNMASKED_STATUS	0x23B15
 #define A6XX_GMU_AO_SPARE_CNTL			0x23B16
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 787ca6b..1c37978 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1854,6 +1854,30 @@
 			status = 0;
 		}
 		break;
+	case KGSL_PROP_DEVICE_QTIMER:
+		{
+			struct kgsl_qtimer_prop qtimerprop = {0};
+			struct kgsl_memdesc *qtimer_desc =
+				kgsl_mmu_get_qtimer_global_entry(device);
+
+			if (sizebytes != sizeof(qtimerprop)) {
+				status = -EINVAL;
+				break;
+			}
+
+			if (qtimer_desc) {
+				qtimerprop.gpuaddr = qtimer_desc->gpuaddr;
+				qtimerprop.size = qtimer_desc->size;
+			}
+
+			if (copy_to_user(value, &qtimerprop,
+						sizeof(qtimerprop))) {
+				status = -EFAULT;
+				break;
+			}
+			status = 0;
+		}
+		break;
 	case KGSL_PROP_MMU_ENABLE:
 		{
 			/* Report MMU only if we can handle paged memory */
@@ -2243,6 +2267,11 @@
 {
 	const struct adreno_gpu_core *gpucore = adreno_dev->gpucore;
 	unsigned int reg_rbbm_status;
+	struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);
+
+	/* if hw driver implements idle check - use it */
+	if (gpudev->hw_isidle)
+		return gpudev->hw_isidle(adreno_dev);
 
 	if (adreno_is_a540(adreno_dev))
 		/**
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 1e08a5e..e23d6a0 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -852,10 +852,10 @@
 				unsigned int clear_mask);
 	void (*oob_clear)(struct adreno_device *adreno_dev,
 				unsigned int clear_mask);
-	bool (*hw_isidle)(struct adreno_device *);
 	int (*rpmh_gpu_pwrctrl)(struct adreno_device *, unsigned int ops,
 				unsigned int arg1, unsigned int arg2);
-	bool (*gmu_isidle)(struct adreno_device *);
+	bool (*hw_isidle)(struct adreno_device *);
+	int (*wait_for_gmu_idle)(struct adreno_device *);
 };
 
 /**
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index ca0e79d..0211a17 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -193,6 +193,11 @@
 	unsigned int bit, mal, mode, glbl_inv;
 	unsigned int amsbc = 0;
 
+	/* runtime adjust callbacks based on feature sets */
+	if (!kgsl_gmu_isenabled(device))
+		/* Legacy idle management if gmu is disabled */
+		ADRENO_GPU_DEVICE(adreno_dev)->hw_isidle = NULL;
+
 	adreno_vbif_start(adreno_dev, a6xx_vbif_platforms,
 			ARRAY_SIZE(a6xx_vbif_platforms));
 	/*
@@ -925,8 +930,6 @@
 	return ret;
 }
 
-#define GMU_POWER_STATE_SLUMBER 15
-
 /*
  * a6xx_notify_slumber() - initiate request to GMU to prepare to slumber
  * @device: Pointer to KGSL device
@@ -959,7 +962,7 @@
 		dev_err(&gmu->pdev->dev, "OOB set for slumber timed out\n");
 	else {
 		kgsl_gmu_regread(device, A6XX_GMU_RPMH_POWER_STATE, &state);
-		if (state != GMU_POWER_STATE_SLUMBER) {
+		if (state != GPU_HW_SLUMBER) {
 			dev_err(&gmu->pdev->dev,
 					"Failed to prepare for slumber\n");
 			ret = -EINVAL;
@@ -1258,29 +1261,35 @@
 	return ret;
 }
 
-static bool a6xx_gmu_isidle(struct adreno_device *adreno_dev)
+static bool a6xx_hw_isidle(struct adreno_device *adreno_dev)
+{
+	unsigned int reg;
+
+	kgsl_gmu_regread(KGSL_DEVICE(adreno_dev),
+		A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &reg);
+	return ((~reg & GPUBUSYIGNAHB) != 0);
+}
+
+static int a6xx_wait_for_gmu_idle(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct gmu_device *gmu = &device->gmu;
-	unsigned int value;
 
-	/* Check if GMU on */
-	if (!(gmu->flags & GMU_CLK_ON))
-		return true;
+	if (timed_poll_check(device, A6XX_GMU_RPMH_POWER_STATE,
+		gmu->idle_level, GMU_START_TIMEOUT, 0xf)) {
+		dev_err(&gmu->pdev->dev,
+			"GMU is not going to powerstate %d\n",
+			gmu->idle_level);
+		return -ETIMEDOUT;
+	}
 
-	/* Ensure GPU is in its lowest power state */
-	kgsl_gmu_regread(device, A6XX_GMU_RPMH_POWER_STATE, &value);
+	if (timed_poll_check(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS,
+		0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
+		dev_err(&gmu->pdev->dev, "GMU is not idling\n");
+		return -ETIMEDOUT;
+	}
 
-	if (value < gmu->idle_level)
-		return false;
-
-	/* Ensure GPU and GMU are both idle */
-	kgsl_gmu_regread(device->reg_virt, A6XX_GMU_GPU_CX_BUSY_STATUS,
-			&value);
-	if ((value & SLUMBER_CHECK_MASK) != SLUMBER_CHECK_MASK)
-		return false;
-
-	return true;
+	return 0;
 }
 
 /*
@@ -2040,5 +2049,6 @@
 	.oob_set = a6xx_oob_set,
 	.oob_clear = a6xx_oob_clear,
 	.rpmh_gpu_pwrctrl = a6xx_rpmh_gpu_pwrctrl,
-	.gmu_isidle = a6xx_gmu_isidle,
+	.hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
+	.wait_for_gmu_idle = a6xx_wait_for_gmu_idle
 };
diff --git a/drivers/gpu/msm/adreno_compat.c b/drivers/gpu/msm/adreno_compat.c
index d86a0c6..5a8d587 100644
--- a/drivers/gpu/msm/adreno_compat.c
+++ b/drivers/gpu/msm/adreno_compat.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -113,6 +113,30 @@
 			status = 0;
 		}
 		break;
+	case KGSL_PROP_DEVICE_QTIMER:
+		{
+			struct kgsl_qtimer_prop qtimerprop = {0};
+			struct kgsl_memdesc *qtimer_desc =
+				kgsl_mmu_get_qtimer_global_entry(device);
+
+			if (sizebytes != sizeof(qtimerprop)) {
+				status = -EINVAL;
+				break;
+			}
+
+			if (qtimer_desc) {
+				qtimerprop.gpuaddr = qtimer_desc->gpuaddr;
+				qtimerprop.size = qtimer_desc->size;
+			}
+
+			if (copy_to_user(value, &qtimerprop,
+						sizeof(qtimerprop))) {
+				status = -EFAULT;
+				break;
+			}
+			status = 0;
+		}
+		break;
 	default:
 		/*
 		 * Call the adreno_getproperty to check if the property type
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 3fa38fa..4d1f1ad 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -2055,12 +2055,13 @@
 		return 0;
 
 	/*
-	 * On A5xx, read RBBM_STATUS3:SMMU_STALLED_ON_FAULT (BIT 24) to
-	 * tell if this function was entered after a pagefault. If so, only
+	 * On A5xx and A6xx, read RBBM_STATUS3:SMMU_STALLED_ON_FAULT (BIT 24)
+	 * to tell if this function was entered after a pagefault. If so, only
 	 * proceed if the fault handler has already run in the IRQ thread,
 	 * else return early to give the fault handler a chance to run.
 	 */
-	if (!(fault & ADRENO_IOMMU_PAGE_FAULT) && adreno_is_a5xx(adreno_dev)) {
+	if (!(fault & ADRENO_IOMMU_PAGE_FAULT) &&
+		(adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev))) {
 		unsigned int val;
 
 		mutex_lock(&device->mutex);
@@ -2086,7 +2087,7 @@
 	 */
 	if (!(fault & ADRENO_HARD_FAULT)) {
 		adreno_readreg(adreno_dev, ADRENO_REG_CP_ME_CNTL, &reg);
-		if (adreno_is_a5xx(adreno_dev))
+		if (adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev))
 			reg |= 1 | (1 << 1);
 		else
 			reg |= (1 << 27) | (1 << 28);
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 78182b7..32175f5 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -796,10 +796,10 @@
 		dwords += 6;
 
 		/*
-		 * REG_TO_MEM packet on A5xx needs another ordinal.
+		 * REG_TO_MEM packet on A5xx and above needs another ordinal.
 		 * Add 2 more dwords since we do profiling before and after.
 		 */
-		if (adreno_is_a5xx(adreno_dev))
+		if (!ADRENO_LEGACY_PM4(adreno_dev))
 			dwords += 2;
 
 		/*
@@ -816,7 +816,7 @@
 	if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv)) {
 		kernel_profiling = true;
 		dwords += 6;
-		if (adreno_is_a5xx(adreno_dev))
+		if (!ADRENO_LEGACY_PM4(adreno_dev))
 			dwords += 2;
 	}
 
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 56e4f23..2e9f108 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -1086,7 +1086,7 @@
 
 	hfi_init(&gmu->hfi, mem_addr, HFI_QUEUE_SIZE);
 
-	gmu->idle_level = GPU_HW_CGC;
+	gmu->idle_level = GPU_HW_ACTIVE;
 
 	return 0;
 
@@ -1312,7 +1312,11 @@
 	if (!test_bit(GMU_CLK_ON, &gmu->flags))
 		return;
 
-	/* TODO: Check for conditions to enter slumber */
+	if (gpudev->wait_for_gmu_idle &&
+		!gpudev->wait_for_gmu_idle(adreno_dev)) {
+		dev_err(&gmu->pdev->dev, "Failure to stop gmu");
+		return;
+	}
 
 	gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_NOTIFY_SLUMBER, 0, 0);
 
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index b5c0c96..ac2c151 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -53,11 +53,9 @@
 				CX_VOTE_ENABLE		| \
 				GFX_VOTE_ENABLE)
 
-/* Bitmask for GMU idle status check */
-#define CXGX_CPUBUSY_IGNAHB_IDLE	BIT(30)
-#define GPUBUSY_IGNAHB_IDLE		BIT(23)
-#define SLUMBER_CHECK_MASK		(CXGX_CPUBUSY_IGNAHB_IDLE  | \
-					GPUBUSY_IGNAHB_IDLE)
+/* Bitmask for GPU idle status check */
+#define GPUBUSYIGNAHB		BIT(23)
+#define CXGXCPUBUSYIGNAHB	BIT(30)
 
 /* Constants for GMU OOBs */
 #define OOB_BOOT_OPTION         0
@@ -143,12 +141,13 @@
 };
 
 enum gpu_idle_level {
-	GPU_HW_ACTIVE,
-	GPU_HW_CGC,
-	GPU_HW_SPTP_PC,
-	GPU_HW_IFPC,
-	GPU_HW_NAP,
-	GPU_HW_MIN_VOLT,
+	GPU_HW_ACTIVE = 0x0,
+	GPU_HW_SPTP_PC = 0x2,
+	GPU_HW_IFPC = 0x3,
+	GPU_HW_NAP = 0x4,
+	GPU_HW_MIN_VOLT = 0x5,
+	GPU_HW_MIN_DDR = 0x6,
+	GPU_HW_SLUMBER = 0xF
 };
 
 /**
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index cfd5cd1..0325db8 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -113,6 +113,7 @@
 static int global_pt_count;
 uint64_t global_pt_alloc;
 static struct kgsl_memdesc gpu_qdss_desc;
+static struct kgsl_memdesc gpu_qtimer_desc;
 
 void kgsl_print_global_pt_entries(struct seq_file *s)
 {
@@ -272,6 +273,50 @@
 	kgsl_sharedmem_free(&gpu_qdss_desc);
 }
 
+struct kgsl_memdesc *kgsl_iommu_get_qtimer_global_entry(void)
+{
+	return &gpu_qtimer_desc;
+}
+
+static void kgsl_setup_qtimer_desc(struct kgsl_device *device)
+{
+	int result = 0;
+	uint32_t gpu_qtimer_entry[2];
+
+	if (!of_find_property(device->pdev->dev.of_node,
+		"qcom,gpu-qtimer", NULL))
+		return;
+
+	if (of_property_read_u32_array(device->pdev->dev.of_node,
+				"qcom,gpu-qtimer", gpu_qtimer_entry, 2)) {
+		KGSL_CORE_ERR("Failed to read gpu qtimer dts entry\n");
+		return;
+	}
+
+	gpu_qtimer_desc.flags = 0;
+	gpu_qtimer_desc.priv = 0;
+	gpu_qtimer_desc.physaddr = gpu_qtimer_entry[0];
+	gpu_qtimer_desc.size = gpu_qtimer_entry[1];
+	gpu_qtimer_desc.pagetable = NULL;
+	gpu_qtimer_desc.ops = NULL;
+	gpu_qtimer_desc.dev = device->dev->parent;
+	gpu_qtimer_desc.hostptr = NULL;
+
+	result = memdesc_sg_dma(&gpu_qtimer_desc, gpu_qtimer_desc.physaddr,
+			gpu_qtimer_desc.size);
+	if (result) {
+		KGSL_CORE_ERR("memdesc_sg_dma failed: %d\n", result);
+		return;
+	}
+
+	kgsl_mmu_add_global(device, &gpu_qtimer_desc, "gpu-qtimer");
+}
+
+static inline void kgsl_cleanup_qtimer_desc(struct kgsl_mmu *mmu)
+{
+	kgsl_iommu_remove_global(mmu, &gpu_qtimer_desc);
+	kgsl_sharedmem_free(&gpu_qtimer_desc);
+}
 
 static inline void _iommu_sync_mmu_pc(bool lock)
 {
@@ -1452,6 +1497,7 @@
 	kgsl_iommu_remove_global(mmu, &iommu->setstate);
 	kgsl_sharedmem_free(&iommu->setstate);
 	kgsl_cleanup_qdss_desc(mmu);
+	kgsl_cleanup_qtimer_desc(mmu);
 }
 
 static int _setstate_alloc(struct kgsl_device *device,
@@ -1523,6 +1569,7 @@
 
 	kgsl_iommu_add_global(mmu, &iommu->setstate, "setstate");
 	kgsl_setup_qdss_desc(device);
+	kgsl_setup_qtimer_desc(device);
 
 done:
 	if (status)
@@ -2671,6 +2718,7 @@
 	.mmu_remove_global = kgsl_iommu_remove_global,
 	.mmu_getpagetable = kgsl_iommu_getpagetable,
 	.mmu_get_qdss_global_entry = kgsl_iommu_get_qdss_global_entry,
+	.mmu_get_qtimer_global_entry = kgsl_iommu_get_qtimer_global_entry,
 	.probe = kgsl_iommu_probe,
 };
 
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 9e516e1..8ea4492 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -617,6 +617,18 @@
 }
 EXPORT_SYMBOL(kgsl_mmu_get_qdss_global_entry);
 
+struct kgsl_memdesc *kgsl_mmu_get_qtimer_global_entry(
+		struct kgsl_device *device)
+{
+	struct kgsl_mmu *mmu = &device->mmu;
+
+	if (MMU_OP_VALID(mmu, mmu_get_qtimer_global_entry))
+		return mmu->mmu_ops->mmu_get_qtimer_global_entry();
+
+	return NULL;
+}
+EXPORT_SYMBOL(kgsl_mmu_get_qtimer_global_entry);
+
 /*
  * NOMMU definitions - NOMMU really just means that the MMU is kept in pass
  * through and the GPU directly accesses physical memory. Used in debug mode
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 0f9f486..56bb317 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -83,6 +83,7 @@
 	struct kgsl_pagetable * (*mmu_getpagetable)(struct kgsl_mmu *mmu,
 			unsigned long name);
 	struct kgsl_memdesc* (*mmu_get_qdss_global_entry)(void);
+	struct kgsl_memdesc* (*mmu_get_qtimer_global_entry)(void);
 };
 
 struct kgsl_mmu_pt_ops {
@@ -233,6 +234,9 @@
 
 struct kgsl_memdesc *kgsl_mmu_get_qdss_global_entry(struct kgsl_device *device);
 
+struct kgsl_memdesc *kgsl_mmu_get_qtimer_global_entry(
+		struct kgsl_device *device);
+
 int kgsl_mmu_sparse_dummy_map(struct kgsl_pagetable *pagetable,
 		struct kgsl_memdesc *memdesc, uint64_t offset, uint64_t size);
 
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index be34547..1606e7f 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -506,12 +506,15 @@
 
 	wait_for_completion(&info->waitevent);
 
-	if (channel->rescind) {
-		ret = -ENODEV;
-		goto post_msg_err;
-	}
-
 post_msg_err:
+	/*
+	 * If the channel has been rescinded;
+	 * we will be awakened by the rescind
+	 * handler; set the error code to zero so we don't leak memory.
+	 */
+	if (channel->rescind)
+		ret = 0;
+
 	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
 	list_del(&info->msglistentry);
 	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index cb95315..d8bc4b9 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -779,6 +779,7 @@
 	/* Allocate the channel object and save this offer. */
 	newchannel = alloc_channel();
 	if (!newchannel) {
+		vmbus_release_relid(offer->child_relid);
 		pr_err("Unable to allocate channel object\n");
 		return;
 	}
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 6f0a51a..d439736 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -218,8 +218,10 @@
 	else
 		intel_th_trace_enable(thdev);
 
-	if (ret)
+	if (ret) {
 		pm_runtime_put(&thdev->dev);
+		module_put(thdrv->driver.owner);
+	}
 
 	return ret;
 }
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index d7b4363..58e8850 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -173,7 +173,7 @@
 		u32 m_param = 0;
 		u32 m_cmd = 0;
 
-		m_param |= (stretch ? STOP_STRETCH : ~(STOP_STRETCH));
+		m_param |= (stretch ? STOP_STRETCH : 0);
 		m_param |= ((msgs[i].addr & 0x7F) << SLV_ADDR_SHFT);
 
 		gi2c->cur = &msgs[i];
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index c3cfacc..2de1f52 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -151,7 +151,9 @@
 {
 	struct iio_dev *indio_dev = private;
 	struct tiadc_device *adc_dev = iio_priv(indio_dev);
-	unsigned int status, config;
+	unsigned int status, config, adc_fsm;
+	unsigned short count = 0;
+
 	status = tiadc_readl(adc_dev, REG_IRQSTATUS);
 
 	/*
@@ -165,6 +167,15 @@
 		tiadc_writel(adc_dev, REG_CTRL, config);
 		tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN
 				| IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES);
+
+		/* wait for idle state.
+		 * ADC needs to finish the current conversion
+		 * before disabling the module
+		 */
+		do {
+			adc_fsm = tiadc_readl(adc_dev, REG_ADCFSM);
+		} while (adc_fsm != 0x10 && count++ < 100);
+
 		tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB));
 		return IRQ_HANDLED;
 	} else if (status & IRQENB_FIFO1THRES) {
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index a3cce3a..ecf592d 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -51,8 +51,6 @@
 			st->report_state.report_id,
 			st->report_state.index,
 			HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM);
-
-		poll_value = hid_sensor_read_poll_value(st);
 	} else {
 		int val;
 
@@ -89,7 +87,9 @@
 	sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
 			       st->power_state.index,
 			       sizeof(state_val), &state_val);
-	if (state && poll_value)
+	if (state)
+		poll_value = hid_sensor_read_poll_value(st);
+	if (poll_value > 0)
 		msleep_interruptible(poll_value * 2);
 
 	return 0;
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 2173531..dd3fcd1 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -767,7 +767,7 @@
 	return ret;
 }
 
-static int __exit ak8974_remove(struct i2c_client *i2c)
+static int ak8974_remove(struct i2c_client *i2c)
 {
 	struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
 	struct ak8974 *ak8974 = iio_priv(indio_dev);
@@ -849,7 +849,7 @@
 		.of_match_table = of_match_ptr(ak8974_of_match),
 	},
 	.probe	  = ak8974_probe,
-	.remove	  = __exit_p(ak8974_remove),
+	.remove	  = ak8974_remove,
 	.id_table = ak8974_id,
 };
 module_i2c_driver(ak8974_driver);
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index d96aa27..db64adf 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -141,6 +141,9 @@
 
 	interface = intf->cur_altsetting;
 
+	if (interface->desc.bNumEndpoints < 2)
+		return -ENODEV;
+
 	epirq = &interface->endpoint[0].desc;
 	epout = &interface->endpoint[1].desc;
 
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 9cc6d05..23c191a 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -700,6 +700,10 @@
 	int error = -ENOMEM;
 
 	interface = intf->cur_altsetting;
+
+	if (interface->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	endpoint = &interface->endpoint[0].desc;
 
 	if (!usb_endpoint_is_int_in(endpoint))
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 9c0ea36..f4e8fbe 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1667,6 +1667,10 @@
 		return -EINVAL;
 
 	alt = pcu->ctrl_intf->cur_altsetting;
+
+	if (alt->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	pcu->ep_ctrl = &alt->endpoint[0].desc;
 	pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl);
 
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 79c964c..6e7ff95 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -875,6 +875,10 @@
 	int ret, pipe, i;
 
 	interface = intf->cur_altsetting;
+
+	if (interface->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	endpoint = &interface->endpoint[0].desc;
 	if (!usb_endpoint_is_int_in(endpoint))
 		return -ENODEV;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index b93fe83..518e8a7 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1290,10 +1290,8 @@
 	/* handle buttons */
 	if (pkt_id == SS4_PACKET_ID_STICK) {
 		f->ts_left = !!(SS4_BTN_V2(p) & 0x01);
-		if (!(priv->flags & ALPS_BUTTONPAD)) {
-			f->ts_right = !!(SS4_BTN_V2(p) & 0x02);
-			f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
-		}
+		f->ts_right = !!(SS4_BTN_V2(p) & 0x02);
+		f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
 	} else {
 		f->left = !!(SS4_BTN_V2(p) & 0x01);
 		if (!(priv->flags & ALPS_BUTTONPAD)) {
@@ -2461,14 +2459,34 @@
 	int num_y_electrode;
 	int x_pitch, y_pitch, x_phys, y_phys;
 
-	num_x_electrode = SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F);
-	num_y_electrode = SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F);
+	if (IS_SS4PLUS_DEV(priv->dev_id)) {
+		num_x_electrode =
+			SS4PLUS_NUMSENSOR_XOFFSET + (otp[0][2] & 0x0F);
+		num_y_electrode =
+			SS4PLUS_NUMSENSOR_YOFFSET + ((otp[0][2] >> 4) & 0x0F);
 
-	priv->x_max = (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
-	priv->y_max = (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
+		priv->x_max =
+			(num_x_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE;
+		priv->y_max =
+			(num_y_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE;
 
-	x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM;
-	y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM;
+		x_pitch = (otp[0][1] & 0x0F) + SS4PLUS_MIN_PITCH_MM;
+		y_pitch = ((otp[0][1] >> 4) & 0x0F) + SS4PLUS_MIN_PITCH_MM;
+
+	} else {
+		num_x_electrode =
+			SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F);
+		num_y_electrode =
+			SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F);
+
+		priv->x_max =
+			(num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
+		priv->y_max =
+			(num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE;
+
+		x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM;
+		y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM;
+	}
 
 	x_phys = x_pitch * (num_x_electrode - 1); /* In 0.1 mm units */
 	y_phys = y_pitch * (num_y_electrode - 1); /* In 0.1 mm units */
@@ -2484,7 +2502,10 @@
 {
 	unsigned char is_btnless;
 
-	is_btnless = (otp[1][1] >> 3) & 0x01;
+	if (IS_SS4PLUS_DEV(priv->dev_id))
+		is_btnless = (otp[1][0] >> 1) & 0x01;
+	else
+		is_btnless = (otp[1][1] >> 3) & 0x01;
 
 	if (is_btnless)
 		priv->flags |= ALPS_BUTTONPAD;
@@ -2492,6 +2513,21 @@
 	return 0;
 }
 
+static int alps_update_dual_info_ss4_v2(unsigned char otp[][4],
+				       struct alps_data *priv)
+{
+	bool is_dual = false;
+
+	if (IS_SS4PLUS_DEV(priv->dev_id))
+		is_dual = (otp[0][0] >> 4) & 0x01;
+
+	if (is_dual)
+		priv->flags |= ALPS_DUALPOINT |
+					ALPS_DUALPOINT_WITH_PRESSURE;
+
+	return 0;
+}
+
 static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
 				    struct alps_data *priv)
 {
@@ -2507,6 +2543,8 @@
 
 	alps_update_btn_info_ss4_v2(otp, priv);
 
+	alps_update_dual_info_ss4_v2(otp, priv);
+
 	return 0;
 }
 
@@ -2752,10 +2790,6 @@
 		if (alps_set_defaults_ss4_v2(psmouse, priv))
 			return -EIO;
 
-		if (priv->fw_ver[1] == 0x1)
-			priv->flags |= ALPS_DUALPOINT |
-					ALPS_DUALPOINT_WITH_PRESSURE;
-
 		break;
 	}
 
@@ -2826,10 +2860,7 @@
 			   ec[2] >= 0x90 && ec[2] <= 0x9d) {
 			protocol = &alps_v3_protocol_data;
 		} else if (e7[0] == 0x73 && e7[1] == 0x03 &&
-			   e7[2] == 0x14 && ec[1] == 0x02) {
-			protocol = &alps_v8_protocol_data;
-		} else if (e7[0] == 0x73 && e7[1] == 0x03 &&
-			   e7[2] == 0x28 && ec[1] == 0x01) {
+			   (e7[2] == 0x14 || e7[2] == 0x28)) {
 			protocol = &alps_v8_protocol_data;
 		} else {
 			psmouse_dbg(psmouse,
@@ -2839,7 +2870,8 @@
 	}
 
 	if (priv) {
-		/* Save the Firmware version */
+		/* Save Device ID and Firmware version */
+		memcpy(priv->dev_id, e7, 3);
 		memcpy(priv->fw_ver, ec, 3);
 		error = alps_set_protocol(psmouse, priv, protocol);
 		if (error)
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index b9417e2..dbfd260 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -54,6 +54,16 @@
 
 #define SS4_MASK_NORMAL_BUTTONS		0x07
 
+#define SS4PLUS_COUNT_PER_ELECTRODE	128
+#define SS4PLUS_NUMSENSOR_XOFFSET	16
+#define SS4PLUS_NUMSENSOR_YOFFSET	5
+#define SS4PLUS_MIN_PITCH_MM		37
+
+#define IS_SS4PLUS_DEV(_b)	(((_b[0]) == 0x73) &&	\
+				 ((_b[1]) == 0x03) &&	\
+				 ((_b[2]) == 0x28)		\
+				)
+
 #define SS4_1F_X_V2(_b)		((_b[0] & 0x0007) |		\
 				 ((_b[1] << 3) & 0x0078) |	\
 				 ((_b[1] << 2) & 0x0380) |	\
@@ -263,6 +273,7 @@
 	int addr_command;
 	u16 proto_version;
 	u8 byte0, mask0;
+	u8 dev_id[3];
 	u8 fw_ver[3];
 	int flags;
 	int x_max;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index ed1935f..da5458d 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -218,17 +218,19 @@
 
 static int elan_check_ASUS_special_fw(struct elan_tp_data *data)
 {
-	if (data->ic_type != 0x0E)
-		return false;
-
-	switch (data->product_id) {
-	case 0x05 ... 0x07:
-	case 0x09:
-	case 0x13:
+	if (data->ic_type == 0x0E) {
+		switch (data->product_id) {
+		case 0x05 ... 0x07:
+		case 0x09:
+		case 0x13:
+			return true;
+		}
+	} else if (data->ic_type == 0x08 && data->product_id == 0x26) {
+		/* ASUS EeeBook X205TA */
 		return true;
-	default:
-		return false;
 	}
+
+	return false;
 }
 
 static int __elan_initialize(struct elan_tp_data *data)
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 0cdd958..25eab45 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -120,6 +120,13 @@
 		},
 	},
 	{
+		/* Dell Embedded Box PC 3000 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
+		},
+	},
+	{
 		/* OQO Model 01 */
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c
index cd85205..df4bea9 100644
--- a/drivers/input/tablet/hanwang.c
+++ b/drivers/input/tablet/hanwang.c
@@ -340,6 +340,9 @@
 	int error;
 	int i;
 
+	if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL);
 	input_dev = input_allocate_device();
 	if (!hanwang || !input_dev) {
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index e850d7e..4d9d649 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -122,6 +122,9 @@
 	struct input_dev *input_dev;
 	int error = -ENOMEM;
 
+	if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
 	input_dev = input_allocate_device();
 	if (!kbtab || !input_dev)
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index aefb6e1..4c0eeca 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -527,6 +527,9 @@
 	if (iface_desc->desc.bInterfaceClass != 0xFF)
 		return -ENODEV;
 
+	if (iface_desc->desc.bNumEndpoints < 5)
+		return -ENODEV;
+
 	/* Use endpoint #4 (0x86). */
 	endpoint = &iface_desc->endpoint[4].desc;
 	if (endpoint->bEndpointAddress != TOUCH_ENDPOINT)
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 37dfe0a..0850563 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -322,9 +322,6 @@
 	void (*device_reset)(struct arm_smmu_device *smmu);
 	phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
 					 dma_addr_t iova);
-	void (*iova_to_phys_fault)(struct iommu_domain *domain,
-				dma_addr_t iova, phys_addr_t *phys1,
-				phys_addr_t *phys_post_tlbiall);
 };
 
 struct arm_smmu_impl_def_reg {
@@ -499,6 +496,7 @@
 
 struct arm_smmu_domain {
 	struct arm_smmu_device		*smmu;
+	struct device			*dev;
 	struct io_pgtable_ops		*pgtbl_ops;
 	struct io_pgtable_cfg		pgtbl_cfg;
 	spinlock_t			pgtbl_lock;
@@ -1141,32 +1139,21 @@
 					 dma_addr_t iova, u32 fsr)
 {
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-	struct arm_smmu_device *smmu;
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
 	phys_addr_t phys;
 	phys_addr_t phys_post_tlbiall;
 
-	smmu = smmu_domain->smmu;
-
-	if (smmu->arch_ops && smmu->arch_ops->iova_to_phys_fault) {
-		smmu->arch_ops->iova_to_phys_fault(domain, iova, &phys,
-		&phys_post_tlbiall);
-	} else {
-		phys = arm_smmu_iova_to_phys_hard(domain, iova);
-		arm_smmu_tlb_inv_context(smmu_domain);
-		phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
-	}
+	phys = arm_smmu_iova_to_phys_hard(domain, iova);
+	arm_smmu_tlb_inv_context(smmu_domain);
+	phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
 
 	if (phys != phys_post_tlbiall) {
 		dev_err(smmu->dev,
 			"ATOS results differed across TLBIALL...\n"
 			"Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
 	}
-	if (!phys_post_tlbiall) {
-		dev_err(smmu->dev,
-			"ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
-	}
 
-	return phys_post_tlbiall;
+	return (phys == 0 ? phys_post_tlbiall : phys);
 }
 
 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
@@ -1260,8 +1247,11 @@
 				dev_err(smmu->dev,
 					"SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
 					dev_name(smmu->dev));
-			dev_err(smmu->dev,
-				"hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
+			if (phys_atos)
+				dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
+					&phys_atos);
+			else
+				dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
 			dev_err(smmu->dev, "SID=0x%x\n", frsynra);
 		}
 		ret = IRQ_NONE;
@@ -1493,7 +1483,8 @@
 }
 
 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
-					struct arm_smmu_device *smmu)
+					struct arm_smmu_device *smmu,
+					struct device *dev)
 {
 	int irq, start, ret = 0;
 	unsigned long ias, oas;
@@ -1642,6 +1633,7 @@
 	};
 
 	smmu_domain->smmu = smmu;
+	smmu_domain->dev = dev;
 	pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
 					smmu_domain);
 	if (!pgtbl_ops) {
@@ -2130,7 +2122,7 @@
 		return ret;
 
 	/* Ensure that the domain is finalised */
-	ret = arm_smmu_init_domain_context(domain, smmu);
+	ret = arm_smmu_init_domain_context(domain, smmu, dev);
 	if (ret < 0)
 		goto out_power_off;
 
@@ -2331,9 +2323,11 @@
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 
 	if (smmu_domain->smmu->arch_ops &&
-	    smmu_domain->smmu->arch_ops->iova_to_phys_hard)
-		return smmu_domain->smmu->arch_ops->iova_to_phys_hard(
+	    smmu_domain->smmu->arch_ops->iova_to_phys_hard) {
+		ret = smmu_domain->smmu->arch_ops->iova_to_phys_hard(
 						domain, iova);
+		return ret;
+	}
 
 	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
 	if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
@@ -3068,64 +3062,27 @@
 	qsmmuv2_resume(smmu);
 }
 
-static phys_addr_t __qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
-					      dma_addr_t iova, bool halt)
+static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
+				dma_addr_t iova)
 {
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 	struct arm_smmu_device *smmu = smmu_domain->smmu;
 	int ret;
 	phys_addr_t phys = 0;
 	unsigned long flags;
+	u32 sctlr, sctlr_orig, fsr;
+	void __iomem *cb_base;
 
 	ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
 	if (ret)
-		return 0;
+		return ret;
 
-	if (halt) {
-		ret = qsmmuv2_halt(smmu);
-		if (ret)
-			goto out_power_off;
-	}
-
-	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
-	spin_lock(&smmu->atos_lock);
-	phys = __arm_smmu_iova_to_phys_hard(domain, iova);
-	spin_unlock(&smmu->atos_lock);
-	spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
-
-	if (halt)
-		qsmmuv2_resume(smmu);
-
-out_power_off:
-	arm_smmu_power_off(smmu_domain->smmu->pwr);
-	return phys;
-}
-
-static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
-					      dma_addr_t iova)
-{
-	return __qsmmuv2_iova_to_phys_hard(domain, iova, true);
-}
-
-static void qsmmuv2_iova_to_phys_fault(
-				struct iommu_domain *domain,
-				dma_addr_t iova, phys_addr_t *phys,
-				phys_addr_t *phys_post_tlbiall)
-{
-	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
-	struct arm_smmu_device *smmu;
-	void __iomem *cb_base;
-	u64 sctlr, sctlr_orig;
-	u32 fsr;
-
-	smmu = smmu_domain->smmu;
-	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+	spin_lock_irqsave(&smmu->atos_lock, flags);
+	cb_base = ARM_SMMU_CB_BASE(smmu) +
+			ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
 
 	qsmmuv2_halt_nowait(smmu);
-
 	writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
-
 	qsmmuv2_wait_for_halt(smmu);
 
 	/* clear FSR to allow ATOS to log any faults */
@@ -3137,20 +3094,21 @@
 	sctlr = sctlr_orig & ~SCTLR_CFCFG;
 	writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
 
-	*phys = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
-	arm_smmu_tlb_inv_context(smmu_domain);
-	*phys_post_tlbiall = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
+	phys = __arm_smmu_iova_to_phys_hard(domain, iova);
 
 	/* restore SCTLR */
 	writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
 
 	qsmmuv2_resume(smmu);
+	spin_unlock_irqrestore(&smmu->atos_lock, flags);
+
+	arm_smmu_power_off(smmu_domain->smmu->pwr);
+	return phys;
 }
 
 struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
 	.device_reset = qsmmuv2_device_reset,
 	.iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
-	.iova_to_phys_fault = qsmmuv2_iova_to_phys_fault,
 };
 
 static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
@@ -3996,6 +3954,10 @@
 
 #define TBU_DBG_TIMEOUT_US		30000
 
+struct qsmmuv500_archdata {
+	struct list_head		tbus;
+};
+
 struct qsmmuv500_tbu_device {
 	struct list_head		list;
 	struct device			*dev;
@@ -4013,10 +3975,10 @@
 static int qsmmuv500_tbu_power_on_all(struct arm_smmu_device *smmu)
 {
 	struct qsmmuv500_tbu_device *tbu;
-	struct list_head *list = smmu->archdata;
+	struct qsmmuv500_archdata *data = smmu->archdata;
 	int ret = 0;
 
-	list_for_each_entry(tbu, list, list) {
+	list_for_each_entry(tbu, &data->tbus, list) {
 		ret = arm_smmu_power_on(tbu->pwr);
 		if (ret)
 			break;
@@ -4024,7 +3986,7 @@
 	if (!ret)
 		return 0;
 
-	list_for_each_entry_continue_reverse(tbu, list, list) {
+	list_for_each_entry_continue_reverse(tbu, &data->tbus, list) {
 		arm_smmu_power_off(tbu->pwr);
 	}
 	return ret;
@@ -4033,9 +3995,9 @@
 static void qsmmuv500_tbu_power_off_all(struct arm_smmu_device *smmu)
 {
 	struct qsmmuv500_tbu_device *tbu;
-	struct list_head *list = smmu->archdata;
+	struct qsmmuv500_archdata *data = smmu->archdata;
 
-	list_for_each_entry_reverse(tbu, list, list) {
+	list_for_each_entry_reverse(tbu, &data->tbus, list) {
 		arm_smmu_power_off(tbu->pwr);
 	}
 }
@@ -4101,10 +4063,10 @@
 static int qsmmuv500_halt_all(struct arm_smmu_device *smmu)
 {
 	struct qsmmuv500_tbu_device *tbu;
-	struct list_head *list = smmu->archdata;
+	struct qsmmuv500_archdata *data = smmu->archdata;
 	int ret = 0;
 
-	list_for_each_entry(tbu, list, list) {
+	list_for_each_entry(tbu, &data->tbus, list) {
 		ret = qsmmuv500_tbu_halt(tbu);
 		if (ret)
 			break;
@@ -4113,7 +4075,7 @@
 	if (!ret)
 		return 0;
 
-	list_for_each_entry_continue_reverse(tbu, list, list) {
+	list_for_each_entry_continue_reverse(tbu, &data->tbus, list) {
 		qsmmuv500_tbu_resume(tbu);
 	}
 	return ret;
@@ -4122,9 +4084,9 @@
 static void qsmmuv500_resume_all(struct arm_smmu_device *smmu)
 {
 	struct qsmmuv500_tbu_device *tbu;
-	struct list_head *list = smmu->archdata;
+	struct qsmmuv500_archdata *data = smmu->archdata;
 
-	list_for_each_entry(tbu, list, list) {
+	list_for_each_entry(tbu, &data->tbus, list) {
 		qsmmuv500_tbu_resume(tbu);
 	}
 }
@@ -4169,15 +4131,15 @@
 static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
 {
 	struct device *dev = smmu->dev;
-	struct list_head *list;
+	struct qsmmuv500_archdata *data;
 	int ret;
 
-	list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
-	if (!list)
+	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
 		return -ENOMEM;
 
-	INIT_LIST_HEAD(list);
-	smmu->archdata = list;
+	INIT_LIST_HEAD(&data->tbus);
+	smmu->archdata = data;
 
 	ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
 	if (ret)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 34be95e..b9e50c1 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -915,7 +915,7 @@
 				 * which we used for the IOMMU lookup. Strictly speaking
 				 * we could do this for all PCI devices; we only need to
 				 * get the BDF# from the scope table for ACPI matches. */
-				if (pdev->is_virtfn)
+				if (pdev && pdev->is_virtfn)
 					goto got_pdev;
 
 				*bus = drhd->devices[i].bus;
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index aecec6d..7f1c625 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2317,6 +2317,9 @@
 		return -ENODEV;
 	}
 
+	if (hostif->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	dev_info(&udev->dev,
 		 "%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
 		 __func__, le16_to_cpu(udev->descriptor.idVendor),
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 01e553c..b045e3b 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -1077,6 +1077,8 @@
 			pr_err("trigger lmh mitigation failed, rc=%d\n", rc);
 			return rc;
 		}
+		/* Wait for LMH mitigation to take effect */
+		udelay(500);
 	}
 
 	if (led->trigger_chgr) {
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index c041db6..0db8a6d 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -151,4 +151,11 @@
 	  Support for communication with the hardened-RPM blocks in
 	  Qualcomm Technologies Inc (QTI) SoCs using TCS hardware mailbox.
 
+config MSM_QMP
+	bool "QTI Mailbox Protocol(QMP)"
+	depends on MSM_SMEM
+	help
+	  QMP is a lightweight communication protocol for sending messages to
+	  a remote processor. This protocol fits into the Generic Mailbox
+	  Framework. QMP uses a mailbox located in shared memory.
 endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 0a01d79..3c811d3 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -31,3 +31,5 @@
 obj-$(CONFIG_BCM_PDC_MBOX)	+= bcm-pdc-mailbox.o
 
 obj-$(CONFIG_QTI_RPMH_MBOX)	+= qti-tcs.o
+
+obj-$(CONFIG_MSM_QMP)	+= msm_qmp.o
diff --git a/drivers/mailbox/msm_qmp.c b/drivers/mailbox/msm_qmp.c
new file mode 100644
index 0000000..dd022d3
--- /dev/null
+++ b/drivers/mailbox/msm_qmp.c
@@ -0,0 +1,811 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/kthread.h>
+#include <linux/workqueue.h>
+#include <linux/mailbox/qmp.h>
+
+#define QMP_MAGIC	0x4d41494c	/* MAIL */
+#define QMP_VERSION	0x1
+#define QMP_FEATURES	0x0
+#define QMP_NUM_CHANS	0x1
+#define QMP_TOUT_MS	5000
+#define QMP_TX_TOUT_MS	2000
+
+#define QMP_MBOX_LINK_DOWN		0xFFFF0000
+#define QMP_MBOX_LINK_UP		0x0000FFFF
+#define QMP_MBOX_CH_DISCONNECTED	0xFFFF0000
+#define QMP_MBOX_CH_CONNECTED		0x0000FFFF
+
+#define MSG_RAM_ALIGN_BYTES 3
+
+/**
+ * enum qmp_local_state - definition of the local state machine
+ * @LINK_DISCONNECTED:		Init state, waiting for ucore to start
+ * @LINK_NEGOTIATION:		Set local link state to up, wait for ucore ack
+ * @LINK_CONNECTED:		Link state up, channel not connected
+ * @LOCAL_CONNECTING:		Channel opening locally, wait for ucore ack
+ * @LOCAL_CONNECTED:		Channel opened locally
+ * @CHANNEL_CONNECTED:		Channel fully opened
+ * @LOCAL_DISCONNECTING:	Channel closing locally, wait for ucore ack
+ */
+enum qmp_local_state {
+	LINK_DISCONNECTED,
+	LINK_NEGOTIATION,
+	LINK_CONNECTED,
+	LOCAL_CONNECTING,
+	LOCAL_CONNECTED,
+	CHANNEL_CONNECTED,
+	LOCAL_DISCONNECTING,
+};
+
+/**
+ * struct channel_desc - description of a core's link, channel and mailbox state
+ * @link_state		Current link state of core
+ * @link_state_ack	Ack for other core to use when link state changes
+ * @ch_state		Current channel state of core
+ * @ch_state_ack	Ack for other core to use when channel state changes
+ * @mailbox_size	Size of this core's mailbox
+ * @mailbox_offset	Location of core's mailbox from a base smem location
+ */
+struct channel_desc {
+	u32 link_state;
+	u32 link_state_ack;
+	u32 ch_state;
+	u32 ch_state_ack;
+	u32 mailbox_size;
+	u32 mailbox_offset;
+};
+
+/**
+ * struct mbox_desc - description of the protocol's mailbox state
+ * @magic	Magic number field to be set by ucore
+ * @version	Version field to be set by ucore
+ * @features	Features field to be set by ucore
+ * @ucore	Channel descriptor to hold state of ucore
+ * @mcore	Channel descriptor to hold state of mcore
+ * @reserved	Reserved in case of future use
+ *
+ * This structure resides in SMEM and contains the control information for the
+ * mailbox channel. Each core in the link will have one channel descriptor
+ */
+struct mbox_desc {
+	u32 magic;
+	u32 version;
+	u32 features;
+	struct channel_desc ucore;
+	struct channel_desc mcore;
+	u32 reserved;
+};
+
+/**
+ * struct qmp_core_version - local structure to hold version and features
+ * @version	Version field to indicate what version the ucore supports
+ * @features	Features field to indicate what features the ucore supports
+ */
+struct qmp_core_version {
+	u32 version;
+	u32 features;
+};
+
+/**
+ * struct qmp_device - local information for managing a single mailbox
+ * @dev:		The device that corresponds to this mailbox
+ * @mbox:		The mbox controller for this mailbox
+ * @name:		The name of this mailbox
+ * @local_state:	Current state of the mailbox protocol
+ * @link_complete:	Use to block until link negotiation with remote proc
+ *			is complete
+ * @ch_complete:	Use to block until the channel is fully opened
+ * @tx_sent:		True if tx is sent and remote proc has not sent ack
+ * @ch_in_use:		True if this mailbox's channel owned by a client
+ * @rx_buf:		buffer to pass to client, holds copied data from mailbox
+ * @version:		Version and features received during link negotiation
+ * @mcore_mbox_offset:	Offset of mcore mbox from the msgram start
+ * @mcore_mbox_size:	Size of the mcore mbox
+ * @desc:		Reference to the mailbox descriptor in SMEM
+ * @msgram:		Reference to the start of msgram
+ * @irq_mask:		Mask written to @tx_irq_reg to trigger irq
+ * @tx_irq_reg:		Reference to the register to send an irq to remote proc
+ * @rx_reset_reg:	Reference to the register to reset the rx irq, if
+ *			applicable
+ * @rx_irq_line:	The incoming interrupt line
+ * @tx_irq_count:	Number of tx interrupts triggered
+ * @rx_irq_count:	Number of rx interrupts received
+ * @kwork:		Work to be executed when an irq is received
+ * @kworker:		Handle to entitiy to process incoming data
+ * @task:		Handle to task context used to run @kworker
+ * @state_lock:		Serialize mailbox state changes
+ * @dwork:		Delayed work to detect timed out tx
+ * @tx_lock:		Serialize access for writes to mailbox
+ */
+struct qmp_device {
+	struct device *dev;
+	struct mbox_controller *mbox;
+	const char *name;
+	enum qmp_local_state local_state;
+	struct completion link_complete;
+	struct completion ch_complete;
+	bool tx_sent;
+	bool ch_in_use;
+	struct qmp_pkt rx_pkt;
+	struct qmp_core_version version;
+	u32 mcore_mbox_offset;
+	u32 mcore_mbox_size;
+	void __iomem *desc;
+	void __iomem *msgram;
+	u32 irq_mask;
+	void __iomem *tx_irq_reg;
+	void __iomem *rx_reset_reg;
+	u32 rx_irq_line;
+	u32 tx_irq_count;
+	u32 rx_irq_count;
+	struct kthread_work kwork;
+	struct kthread_worker kworker;
+	struct task_struct *task;
+	struct mutex state_lock;
+	struct delayed_work dwork;
+	spinlock_t tx_lock;
+};
+
+/**
+ * send_irq() - send an irq to a remote entity as an event signal.
+ * @mdev:	Which remote entity that should receive the irq.
+ */
+static void send_irq(struct qmp_device *mdev)
+{
+	/*
+	 * Any data associated with this event must be visable to the remote
+	 * before the interrupt is triggered
+	 */
+	wmb();
+	writel_relaxed(mdev->irq_mask, mdev->tx_irq_reg);
+	mdev->tx_irq_count++;
+}
+
+/**
+ * qmp_irq_handler() - handle irq from remote entitity.
+ * @irq:	irq number for the trggered interrupt.
+ * @priv:	private pointer to qmp mbox device.
+ */
+irqreturn_t qmp_irq_handler(int irq, void *priv)
+{
+	struct qmp_device *mdev = (struct qmp_device *)priv;
+
+	if (mdev->rx_reset_reg)
+		writel_relaxed(mdev->irq_mask, mdev->rx_reset_reg);
+
+	kthread_queue_work(&mdev->kworker, &mdev->kwork);
+	mdev->rx_irq_count++;
+
+	return IRQ_HANDLED;
+}
+
+static void memcpy32_toio(void *dest, void *src, size_t size)
+{
+	u32 *dest_local = (u32 *)dest;
+	u32 *src_local = (u32 *)src;
+
+	WARN_ON(size & MSG_RAM_ALIGN_BYTES);
+	size /= sizeof(u32);
+	while (size--)
+		iowrite32(*src_local++, dest_local++);
+}
+
+static void memcpy32_fromio(void *dest, void *src, size_t size)
+{
+	u32 *dest_local = (u32 *)dest;
+	u32 *src_local = (u32 *)src;
+
+	WARN_ON(size & MSG_RAM_ALIGN_BYTES);
+	size /= sizeof(u32);
+	while (size--)
+		*dest_local++ = ioread32(src_local++);
+}
+
+/**
+ * set_ucore_link_ack() - set the link ack in the ucore channel desc.
+ * @mdev:	the mailbox for the field that is being set.
+ * @state:	the value to set the ack field to.
+ */
+static void set_ucore_link_ack(struct qmp_device *mdev, u32 state)
+{
+	u32 offset;
+
+	offset = offsetof(struct mbox_desc, ucore);
+	offset += offsetof(struct channel_desc, link_state_ack);
+	iowrite32(state, mdev->desc + offset);
+}
+
+/**
+ * set_ucore_ch_ack() - set the channel ack in the ucore channel desc.
+ * @mdev:	the mailbox for the field that is being set.
+ * @state:	the value to set the ack field to.
+ */
+static void set_ucore_ch_ack(struct qmp_device *mdev, u32 state)
+{
+	u32 offset;
+
+	offset = offsetof(struct mbox_desc, ucore);
+	offset += offsetof(struct channel_desc, ch_state_ack);
+	iowrite32(state, mdev->desc + offset);
+}
+
+/**
+ * set_mcore_ch() - set the channel state in the mcore channel desc.
+ * @mdev:	the mailbox for the field that is being set.
+ * @state:	the value to set the channel field to.
+ */
+static void set_mcore_ch(struct qmp_device *mdev, u32 state)
+{
+	u32 offset;
+
+	offset = offsetof(struct mbox_desc, mcore);
+	offset += offsetof(struct channel_desc, ch_state);
+	iowrite32(state, mdev->desc + offset);
+}
+
+/**
+ * qmp_notify_timeout() - Notify client of tx timeout with -EIO
+ * @work:	Structure for work that was scheduled.
+ */
+static void qmp_notify_timeout(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct qmp_device *mdev = container_of(dwork, struct qmp_device, dwork);
+	struct mbox_chan *chan = &mdev->mbox->chans[0];
+	int err = -EIO;
+
+	pr_err("%s: qmp tx timeout for %s\n", __func__, mdev->name);
+	mbox_chan_txdone(chan, err);
+}
+
+/**
+ * qmp_startup() - Start qmp mailbox channel for communication. Waits for
+ *			remote subsystem to open channel if link is not
+ *			initated or until timeout.
+ * @chan:	mailbox channel that is being opened.
+ *
+ * Return: 0 on succes or standard Linux error code.
+ */
+static int qmp_startup(struct mbox_chan *chan)
+{
+	struct qmp_device *mdev = chan->con_priv;
+
+	if (!mdev)
+		return -EINVAL;
+
+	mutex_lock(&mdev->state_lock);
+	if (mdev->local_state == CHANNEL_CONNECTED) {
+		mutex_unlock(&mdev->state_lock);
+		return -EINVAL;
+	}
+	if (!completion_done(&mdev->link_complete)) {
+		mutex_unlock(&mdev->state_lock);
+		return -EAGAIN;
+	}
+
+	set_mcore_ch(mdev, QMP_MBOX_CH_CONNECTED);
+	mdev->local_state = LOCAL_CONNECTING;
+	mutex_unlock(&mdev->state_lock);
+
+	send_irq(mdev);
+	wait_for_completion_interruptible_timeout(&mdev->ch_complete,
+					msecs_to_jiffies(QMP_TOUT_MS));
+	return 0;
+}
+
+static inline void qmp_schedule_tx_timeout(struct qmp_device *mdev)
+{
+	schedule_delayed_work(&mdev->dwork, msecs_to_jiffies(QMP_TX_TOUT_MS));
+}
+
+/**
+ * qmp_send_data() - Copy the data to the channel's mailbox and notify
+ *				remote subsystem of new data. This function will
+ *				return an error if the previous message sent has
+ *				not been read. Cannot Sleep.
+ * @chan:	mailbox channel that data is to be sent over.
+ * @data:	Data to be sent to remote processor, should be in the format of
+ *		a qmp_pkt.
+ *
+ * Return: 0 on succes or standard Linux error code.
+ */
+static int qmp_send_data(struct mbox_chan *chan, void *data)
+{
+	struct qmp_device *mdev = chan->con_priv;
+	struct qmp_pkt *pkt = (struct qmp_pkt *)data;
+	void __iomem *addr;
+	unsigned long flags;
+
+	if (!mdev || !data || mdev->local_state != CHANNEL_CONNECTED)
+		return -EINVAL;
+
+	spin_lock_irqsave(&mdev->tx_lock, flags);
+	addr = mdev->msgram + mdev->mcore_mbox_offset;
+	if (ioread32(addr)) {
+		spin_unlock_irqrestore(&mdev->tx_lock, flags);
+		return -EBUSY;
+	}
+
+	if (pkt->size + sizeof(pkt->size) > mdev->mcore_mbox_size) {
+		spin_unlock_irqrestore(&mdev->tx_lock, flags);
+		return -EINVAL;
+	}
+	memcpy32_toio(addr + sizeof(pkt->size), pkt->data, pkt->size);
+	iowrite32(pkt->size, addr);
+	mdev->tx_sent = true;
+	send_irq(mdev);
+	qmp_schedule_tx_timeout(mdev);
+	spin_unlock_irqrestore(&mdev->tx_lock, flags);
+	return 0;
+}
+
+/**
+ * qmp_shutdown() - Disconnect this mailbox channel so the client does not
+ *				receive anymore data and can reliquish control
+ *				of the channel
+ * @chan:	mailbox channel to be shutdown.
+ */
+static void qmp_shutdown(struct mbox_chan *chan)
+{
+	struct qmp_device *mdev = chan->con_priv;
+
+	mutex_lock(&mdev->state_lock);
+	if (mdev->local_state != LINK_DISCONNECTED) {
+		mdev->local_state = LOCAL_DISCONNECTING;
+		set_mcore_ch(mdev, QMP_MBOX_CH_DISCONNECTED);
+		send_irq(mdev);
+	}
+	mdev->ch_in_use = false;
+	mutex_unlock(&mdev->state_lock);
+}
+
+/**
+ * qmp_last_tx_done() - qmp does not support polling operations, print
+ *				error of unexpected usage and return true to
+ *				resume operation.
+ * @chan:	Corresponding mailbox channel for requested last tx.
+ *
+ * Return: true
+ */
+static bool qmp_last_tx_done(struct mbox_chan *chan)
+{
+	pr_err("In %s, unexpected usage of last_tx_done\n", __func__);
+	return true;
+}
+
+/**
+ * qmp_recv_data() - received notification that data is available in the
+ *			mailbox. Copy data from mailbox and pass to client.
+ * @mdev:	mailbox device that received the notification.
+ * @mbox_of:	offset of mailbox from msgram start.
+ */
+static void qmp_recv_data(struct qmp_device *mdev, u32 mbox_of)
+{
+	void __iomem *addr;
+	struct qmp_pkt *pkt;
+
+	addr = mdev->msgram + mbox_of;
+	pkt = &mdev->rx_pkt;
+	pkt->size = ioread32(addr);
+
+	if (pkt->size > mdev->mcore_mbox_size)
+		pr_err("%s: Invalid mailbox packet\n", __func__);
+	else {
+		memcpy32_fromio(pkt->data, addr + sizeof(pkt->size), pkt->size);
+		mbox_chan_received_data(&mdev->mbox->chans[0], &pkt);
+	}
+	iowrite32(0, addr);
+	send_irq(mdev);
+}
+
+/**
+ * init_mcore_state() - initialize the mcore state of a mailbox.
+ * @mdev:	mailbox device to be initialized.
+ */
+static void init_mcore_state(struct qmp_device *mdev)
+{
+	struct channel_desc mcore;
+	u32 offset = offsetof(struct mbox_desc, mcore);
+
+	mcore.link_state = QMP_MBOX_LINK_UP;
+	mcore.link_state_ack = QMP_MBOX_LINK_DOWN;
+	mcore.ch_state = QMP_MBOX_CH_DISCONNECTED;
+	mcore.ch_state_ack = QMP_MBOX_CH_DISCONNECTED;
+	mcore.mailbox_size = mdev->mcore_mbox_size;
+	mcore.mailbox_offset = mdev->mcore_mbox_offset;
+	memcpy32_toio(mdev->desc + offset, &mcore, sizeof(mcore));
+}
+
+/**
+ * __qmp_rx_worker() - Handle incoming messages from remote processor.
+ * @mdev:	mailbox device that received notification.
+ */
+static void __qmp_rx_worker(struct qmp_device *mdev)
+{
+	u32 msg_len;
+	struct mbox_desc desc;
+
+	memcpy_fromio(&desc, mdev->desc, sizeof(desc));
+	if (desc.magic != QMP_MAGIC)
+		return;
+
+	mutex_lock(&mdev->state_lock);
+	switch (mdev->local_state) {
+	case LINK_DISCONNECTED:
+		mdev->version.version = desc.version;
+		mdev->version.features = desc.features;
+		set_ucore_link_ack(mdev, desc.ucore.link_state);
+		if (desc.mcore.mailbox_size) {
+			mdev->mcore_mbox_size = desc.mcore.mailbox_size;
+			mdev->mcore_mbox_offset = desc.mcore.mailbox_offset;
+		}
+		init_mcore_state(mdev);
+		mdev->local_state = LINK_NEGOTIATION;
+		mdev->rx_pkt.data = devm_kzalloc(mdev->dev,
+						 desc.ucore.mailbox_size,
+						 GFP_KERNEL);
+		if (!mdev->rx_pkt.data) {
+			pr_err("In %s: failed to allocate rx pkt\n", __func__);
+			break;
+		}
+		send_irq(mdev);
+		break;
+	case LINK_NEGOTIATION:
+		if (desc.mcore.link_state_ack != QMP_MBOX_LINK_UP ||
+				desc.mcore.link_state != QMP_MBOX_LINK_UP) {
+			pr_err("In %s: rx interrupt without negotiation ack\n",
+					__func__);
+			break;
+		}
+		mdev->local_state = LINK_CONNECTED;
+		complete_all(&mdev->link_complete);
+		break;
+	case LINK_CONNECTED:
+		if (desc.ucore.ch_state == desc.ucore.ch_state_ack) {
+			pr_err("In %s: rx interrupt without channel open\n",
+					__func__);
+			break;
+		}
+		set_ucore_ch_ack(mdev, desc.ucore.ch_state);
+		send_irq(mdev);
+		break;
+	case LOCAL_CONNECTING:
+		if (desc.mcore.ch_state_ack == QMP_MBOX_CH_CONNECTED &&
+				desc.mcore.ch_state == QMP_MBOX_CH_CONNECTED)
+			mdev->local_state = LOCAL_CONNECTED;
+
+		if (desc.ucore.ch_state != desc.ucore.ch_state_ack) {
+			set_ucore_ch_ack(mdev, desc.ucore.ch_state);
+			send_irq(mdev);
+		}
+		if (mdev->local_state == LOCAL_CONNECTED &&
+				desc.mcore.ch_state == QMP_MBOX_CH_CONNECTED &&
+				desc.ucore.ch_state == QMP_MBOX_CH_CONNECTED) {
+			mdev->local_state = CHANNEL_CONNECTED;
+			complete_all(&mdev->ch_complete);
+		}
+		break;
+	case LOCAL_CONNECTED:
+		if (desc.ucore.ch_state == desc.ucore.ch_state_ack) {
+			pr_err("In %s: rx interrupt without remote channel open\n",
+					__func__);
+			break;
+		}
+		set_ucore_ch_ack(mdev, desc.ucore.ch_state);
+		mdev->local_state = CHANNEL_CONNECTED;
+		send_irq(mdev);
+		complete_all(&mdev->ch_complete);
+		break;
+	case CHANNEL_CONNECTED:
+		if (desc.ucore.ch_state == QMP_MBOX_CH_DISCONNECTED) {
+			set_ucore_ch_ack(mdev, desc.ucore.ch_state);
+			mdev->local_state = LOCAL_CONNECTED;
+			send_irq(mdev);
+		}
+
+		msg_len = ioread32(mdev->msgram + desc.ucore.mailbox_offset);
+		if (msg_len)
+			qmp_recv_data(mdev, desc.ucore.mailbox_offset);
+
+		if (mdev->tx_sent) {
+			msg_len = ioread32(mdev->msgram +
+						mdev->mcore_mbox_offset);
+			if (msg_len == 0) {
+				mdev->tx_sent = false;
+				cancel_delayed_work(&mdev->dwork);
+				mbox_chan_txdone(&mdev->mbox->chans[0], 0);
+			}
+		}
+		break;
+	case LOCAL_DISCONNECTING:
+		if (desc.mcore.ch_state_ack == QMP_MBOX_CH_DISCONNECTED &&
+				desc.mcore.ch_state == desc.mcore.ch_state_ack)
+			mdev->local_state = LINK_CONNECTED;
+		reinit_completion(&mdev->ch_complete);
+		break;
+	default:
+		pr_err("In %s: Local Channel State corrupted\n", __func__);
+	}
+	mutex_unlock(&mdev->state_lock);
+}
+
+static void rx_worker(struct kthread_work *work)
+{
+	struct qmp_device *mdev;
+
+	mdev = container_of(work, struct qmp_device, kwork);
+	__qmp_rx_worker(mdev);
+}
+
+/**
+ * qmp_mbox_of_xlate() - Returns a mailbox channel to be used for this mailbox
+ *			device. Make sure the channel is not already in use.
+ * @mbox:	Mailbox device controlls the requested channel.
+ * @spec:	Device tree arguments to specify which channel is requested.
+ */
+static struct mbox_chan *qmp_mbox_of_xlate(struct mbox_controller *mbox,
+		const struct of_phandle_args *spec)
+{
+	struct qmp_device *mdev = dev_get_drvdata(mbox->dev);
+	unsigned int channel = spec->args[0];
+
+	if (!mdev || channel >= mbox->num_chans)
+		return ERR_PTR(-EINVAL);
+
+	mutex_lock(&mdev->state_lock);
+	if (mdev->ch_in_use) {
+		pr_err("%s, mbox channel already in use %s\n", __func__,
+								mdev->name);
+		mutex_unlock(&mdev->state_lock);
+		return ERR_PTR(-EBUSY);
+	}
+	mdev->ch_in_use = true;
+	mutex_unlock(&mdev->state_lock);
+	return &mbox->chans[0];
+}
+
+/**
+ * parse_devicetree() - Parse the device tree information for QMP, map io
+ *			memory and register for needed interrupts
+ * @pdev:	platform device for this driver.
+ * @mdev:	mailbox device to hold the device tree configuration.
+ *
+ * Return: 0 on succes or standard Linux error code.
+ */
+static int qmp_parse_devicetree(struct platform_device *pdev,
+					struct qmp_device *mdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	char *key;
+	int rc;
+	const char *subsys_name;
+	u32 rx_irq_line, tx_irq_mask;
+	u32 desc_of = 0;
+	u32 mbox_of = 0;
+	u32 mbox_size = 0;
+	struct resource *msgram_r, *tx_irq_reg_r;
+
+	key = "label";
+	subsys_name = of_get_property(node, key, NULL);
+	if (!subsys_name) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		return -ENODEV;
+	}
+
+	key = "msgram";
+	msgram_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!msgram_r) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		return -ENODEV;
+	}
+
+	key = "irq-reg-base";
+	tx_irq_reg_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!tx_irq_reg_r) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		return -ENODEV;
+	}
+
+	key = "qcom,irq-mask";
+	rc = of_property_read_u32(node, key, &tx_irq_mask);
+	if (rc) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		return -ENODEV;
+	}
+
+	key = "interrupts";
+	rx_irq_line = irq_of_parse_and_map(node, 0);
+	if (!rx_irq_line) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		return -ENODEV;
+	}
+
+	key = "mbox-desc-offset";
+	rc = of_property_read_u32(node, key, &desc_of);
+	if (rc) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		return -ENODEV;
+	}
+
+	key = "mbox-offset";
+	rc = of_property_read_u32(node, key, &mbox_of);
+	if (!rc)
+		mdev->mcore_mbox_offset = mbox_of;
+
+	key = "mbox-size";
+	rc = of_property_read_u32(node, key, &mbox_size);
+	if (!rc)
+		mdev->mcore_mbox_size = mbox_size;
+
+	mdev->name = subsys_name;
+	mdev->msgram = devm_ioremap_nocache(&pdev->dev, msgram_r->start,
+						resource_size(msgram_r));
+	if (!mdev->msgram)
+		return -ENOMEM;
+
+	mdev->desc = mdev->msgram + desc_of;
+	if (!mdev->desc)
+		return -ENOMEM;
+
+	mdev->irq_mask = tx_irq_mask;
+	mdev->tx_irq_reg = devm_ioremap_nocache(&pdev->dev, tx_irq_reg_r->start,
+						resource_size(tx_irq_reg_r));
+	if (!mdev->tx_irq_reg)
+		return -ENOMEM;
+
+	mdev->rx_irq_line = rx_irq_line;
+	return 0;
+}
+
+/**
+ * cleanup_workqueue() - Flush all work and stop the thread for this mailbox.
+ * @mdev:	mailbox device to cleanup.
+ */
+static void cleanup_workqueue(struct qmp_device *mdev)
+{
+	kthread_flush_worker(&mdev->kworker);
+	kthread_stop(mdev->task);
+	mdev->task = NULL;
+}
+
+static struct mbox_chan_ops qmp_mbox_ops = {
+	.startup = qmp_startup,
+	.shutdown = qmp_shutdown,
+	.send_data = qmp_send_data,
+	.last_tx_done = qmp_last_tx_done,
+};
+
+static const struct of_device_id qmp_mbox_match_table[] = {
+	{ .compatible = "qcom,qmp-mbox" },
+	{},
+};
+
+static int qmp_mbox_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct mbox_controller *mbox;
+	struct qmp_device *mdev;
+	struct mbox_chan *chans;
+	int ret = 0;
+
+	mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
+	if (!mdev)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, mdev);
+
+	ret = qmp_parse_devicetree(pdev, mdev);
+	if (ret)
+		return ret;
+
+	mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
+	if (!mbox)
+		return -ENOMEM;
+
+	chans = devm_kzalloc(&pdev->dev, sizeof(*chans) * QMP_NUM_CHANS,
+								GFP_KERNEL);
+	if (!chans)
+		return -ENOMEM;
+
+	mbox->dev = &pdev->dev;
+	mbox->ops = &qmp_mbox_ops;
+	mbox->chans = chans;
+	mbox->chans[0].con_priv = mdev;
+	mbox->num_chans = QMP_NUM_CHANS;
+	mbox->txdone_irq = true;
+	mbox->txdone_poll = false;
+	mbox->of_xlate = qmp_mbox_of_xlate;
+
+	mdev->dev = &pdev->dev;
+	mdev->mbox = mbox;
+	spin_lock_init(&mdev->tx_lock);
+	mutex_init(&mdev->state_lock);
+	mdev->local_state = LINK_DISCONNECTED;
+	kthread_init_work(&mdev->kwork, rx_worker);
+	kthread_init_worker(&mdev->kworker);
+	mdev->task = kthread_run(kthread_worker_fn, &mdev->kworker, "qmp_%s",
+								mdev->name);
+	init_completion(&mdev->link_complete);
+	init_completion(&mdev->ch_complete);
+	mdev->tx_sent = false;
+	mdev->ch_in_use = false;
+	INIT_DELAYED_WORK(&mdev->dwork, qmp_notify_timeout);
+
+	ret = mbox_controller_register(mbox);
+	if (ret) {
+		cleanup_workqueue(mdev);
+		pr_err("%s: failed to register mbox controller %d\n", __func__,
+									ret);
+		return ret;
+	}
+
+	ret = devm_request_irq(&pdev->dev, mdev->rx_irq_line, qmp_irq_handler,
+		IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED,
+		node->name, mdev);
+	if (ret < 0) {
+		cleanup_workqueue(mdev);
+		mbox_controller_unregister(mdev->mbox);
+		pr_err("%s: request irq on %d failed: %d\n", __func__,
+							mdev->rx_irq_line, ret);
+		return ret;
+	}
+	ret = enable_irq_wake(mdev->rx_irq_line);
+	if (ret < 0)
+		pr_err("%s: enable_irq_wake on %d failed: %d\n", __func__,
+							mdev->rx_irq_line, ret);
+
+	qmp_irq_handler(0, mdev);
+	return 0;
+}
+
+static int qmp_mbox_remove(struct platform_device *pdev)
+{
+	struct qmp_device *mdev = platform_get_drvdata(pdev);
+
+	cleanup_workqueue(mdev);
+	mbox_controller_unregister(mdev->mbox);
+	return 0;
+}
+
+static struct platform_driver qmp_mbox_driver = {
+	.probe = qmp_mbox_probe,
+	.remove = qmp_mbox_remove,
+	.driver = {
+		.name = "qmp_mbox",
+		.owner = THIS_MODULE,
+		.of_match_table = qmp_mbox_match_table,
+	},
+};
+
+static int __init qmp_init(void)
+{
+	int rc = 0;
+
+	rc = platform_driver_register(&qmp_mbox_driver);
+	if (rc)
+		pr_err("%s: qmp_mbox_driver reg failed %d\n", __func__, rc);
+	return rc;
+}
+arch_initcall(qmp_init);
+
+MODULE_DESCRIPTION("MSM QTI Mailbox Protocol");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c
index 5b114cb..31119ea 100644
--- a/drivers/mailbox/qti-tcs.c
+++ b/drivers/mailbox/qti-tcs.c
@@ -121,6 +121,7 @@
 
 /* One per MBOX controller */
 struct tcs_drv {
+	const char *name;
 	void *base; /* start address of the RSC's registers */
 	void *reg_base; /* start address for DRV specific register */
 	int drv_id;
@@ -385,7 +386,8 @@
 			mbox_chan_received_data(resp->chan, resp->msg);
 		}
 
-		trace_rpmh_notify_irq(m, resp->msg->payload[0].addr, resp->err);
+		trace_rpmh_notify_irq(drv->name, m, resp->msg->payload[0].addr,
+						resp->err);
 
 		/* Notify the client that this request is completed. */
 		send_tcs_response(resp);
@@ -401,7 +403,9 @@
 static inline void mbox_notify_tx_done(struct mbox_chan *chan,
 				struct tcs_mbox_msg *msg, int m, int err)
 {
-	trace_rpmh_notify(m, msg->payload[0].addr, err);
+	struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
+
+	trace_rpmh_notify(drv->name, m, msg->payload[0].addr, err);
 	mbox_chan_txdone(chan, err);
 }
 
@@ -467,7 +471,7 @@
 	mbox_notify_tx_done(chan, msg, -1, err);
 }
 
-static void __tcs_buffer_write(void __iomem *base, int d, int m, int n,
+static void __tcs_buffer_write(struct tcs_drv *drv, int d, int m, int n,
 			struct tcs_mbox_msg *msg, bool trigger)
 {
 	u32 cmd_msgid = 0;
@@ -476,6 +480,7 @@
 	u32 enable = TCS_AMC_MODE_ENABLE;
 	struct tcs_cmd *cmd;
 	int i;
+	void __iomem *base = drv->reg_base;
 
 	/* We have homologous command set i.e pure read or write, not a mix */
 	cmd_msgid = CMD_MSGID_LEN;
@@ -492,8 +497,8 @@
 		write_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n + i, cmd_msgid);
 		write_tcs_reg(base, TCS_DRV_CMD_ADDR, m, n + i, cmd->addr);
 		write_tcs_reg(base, TCS_DRV_CMD_DATA, m, n + i, cmd->data);
-		trace_rpmh_send_msg(base, m, n + i,
-				cmd_msgid, cmd->addr, cmd->data, cmd->complete);
+		trace_rpmh_send_msg(drv->name, m, n + i, cmd_msgid, cmd->addr,
+					cmd->data, cmd->complete, trigger);
 	}
 
 	/* Write the send-after-prev completion bits for the batch */
@@ -716,7 +721,7 @@
 	}
 
 	/* Write to the TCS or AMC */
-	__tcs_buffer_write(drv->reg_base, d, m, n, msg, trigger);
+	__tcs_buffer_write(drv, d, m, n, msg, trigger);
 
 	/* Schedule a timeout response, incase there is no actual response */
 	if (trigger)
@@ -826,15 +831,16 @@
 	return 0;
 }
 
-static void __tcs_write_hidden(void *base, int d, struct tcs_mbox_msg *msg)
+static void __tcs_write_hidden(struct tcs_drv *drv, int d,
+					struct tcs_mbox_msg *msg)
 {
 	int i;
-	void __iomem *addr = base + TCS_HIDDEN_CMD0_DRV_DATA;
+	void __iomem *addr = drv->base + TCS_HIDDEN_CMD0_DRV_DATA;
 
 	for (i = 0; i < msg->num_payload; i++) {
 		/* Only data is write capable */
 		writel_relaxed(cpu_to_le32(msg->payload[i].data), addr);
-		trace_rpmh_control_msg(addr, msg->payload[i].data);
+		trace_rpmh_control_msg(drv->name, msg->payload[i].data);
 		addr += TCS_HIDDEN_CMD_SHIFT;
 	}
 }
@@ -855,7 +861,7 @@
 	}
 
 	spin_lock(&tcs->tcs_lock);
-	__tcs_write_hidden(tcs->drv->base, drv->drv_id, msg);
+	__tcs_write_hidden(tcs->drv, drv->drv_id, msg);
 	spin_unlock(&tcs->tcs_lock);
 
 	return 0;
@@ -1073,6 +1079,10 @@
 	drv->num_tcs = st;
 	drv->pdev = pdev;
 
+	drv->name = of_get_property(pdev->dev.of_node, "label", NULL);
+	if (!drv->name)
+		drv->name = dev_name(&pdev->dev);
+
 	ret = tcs_response_pool_init(drv);
 	if (ret)
 		return ret;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 39fddda..55b5e0e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1470,7 +1470,25 @@
 			split = bio;
 		}
 
+		/*
+		 * If a bio is splitted, the first part of bio will pass
+		 * barrier but the bio is queued in current->bio_list (see
+		 * generic_make_request). If there is a raise_barrier() called
+		 * here, the second part of bio can't pass barrier. But since
+		 * the first part bio isn't dispatched to underlaying disks
+		 * yet, the barrier is never released, hence raise_barrier will
+		 * alays wait. We have a deadlock.
+		 * Note, this only happens in read path. For write path, the
+		 * first part of bio is dispatched in a schedule() call
+		 * (because of blk plug) or offloaded to raid10d.
+		 * Quitting from the function immediately can change the bio
+		 * order queued in bio_list and avoid the deadlock.
+		 */
 		__make_request(mddev, split);
+		if (split != bio && bio_data_dir(bio) == READ) {
+			generic_make_request(bio);
+			break;
+		}
 	} while (split != bio);
 
 	/* In case raid10d snuck in to freeze_array */
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
index faba819..db01353 100644
--- a/drivers/media/platform/msm/camera/Makefile
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -2,4 +2,4 @@
 
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_utils/
-
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_core/
diff --git a/drivers/media/platform/msm/camera/cam_core/Makefile b/drivers/media/platform/msm/camera/cam_core/Makefile
new file mode 100644
index 0000000..417de13
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/Makefile
@@ -0,0 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_context.o cam_node.o cam_subdev.o
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
new file mode 100644
index 0000000..56b34f5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -0,0 +1,361 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "cam_context.h"
+
+static int cam_context_handle_hw_event(void *context, uint32_t evt_id,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_context *ctx = (struct cam_context *)context;
+
+	if (!ctx || !ctx->state_machine) {
+		pr_err("%s: Context is not ready.\n", __func__);
+		return -EINVAL;
+	}
+
+	if (ctx->state_machine[ctx->state].irq_ops)
+		rc = ctx->state_machine[ctx->state].irq_ops(ctx, evt_id,
+			evt_data);
+	else
+		pr_debug("%s: No function to handle event %d in dev %d, state %d\n",
+				__func__, evt_id, ctx->dev_hdl, ctx->state);
+	return rc;
+}
+
+int cam_context_handle_get_dev_info(struct cam_context *ctx,
+	struct cam_req_mgr_device_info *info)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		pr_err("%s: Context is not ready.\n'", __func__);
+		return -EINVAL;
+	}
+
+	if (!info) {
+		pr_err("%s: Invalid get device info payload.\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.get_dev_info) {
+		rc = ctx->state_machine[ctx->state].crm_ops.get_dev_info(
+			ctx, info);
+	} else {
+		pr_err("%s: No get device info in dev %d, state %d\n",
+			__func__, ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_link(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *link)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		pr_err("%s: Context is not ready.\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!link) {
+		pr_err("%s: Invalid link payload.\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.link) {
+		rc = ctx->state_machine[ctx->state].crm_ops.link(ctx, link);
+	} else {
+		pr_err("%s: No crm link in dev %d, state %d\n", __func__,
+			ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_unlink(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		pr_err("%s: Context is not ready!\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!unlink) {
+		pr_err("%s: Invalid unlink payload.\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.unlink) {
+		rc = ctx->state_machine[ctx->state].crm_ops.unlink(
+			ctx, unlink);
+	} else {
+		pr_err("%s: No crm unlink in dev %d, state %d\n",
+			__func__, ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_apply_req(struct cam_context *ctx,
+	struct cam_req_mgr_apply_request *apply)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		pr_err("%s: Context is not ready.\n'", __func__);
+		return -EINVAL;
+	}
+
+	if (!apply) {
+		pr_err("%s: Invalid apply request payload.\n'", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.apply_req) {
+		rc = ctx->state_machine[ctx->state].crm_ops.apply_req(ctx,
+			apply);
+	} else {
+		pr_err("%s: No crm apply req in dev %d, state %d\n",
+			__func__, ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+
+int cam_context_handle_acquire_dev(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		pr_err("%s: Context is not ready.\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		pr_err("%s: Invalid acquire device command payload.\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.acquire_dev) {
+		rc = ctx->state_machine[ctx->state].ioctl_ops.acquire_dev(
+			ctx, cmd);
+	} else {
+		pr_err("%s: No acquire device in dev %d, state %d\n",
+			__func__, cmd->dev_handle, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_release_dev(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		pr_err("%s: Context is not ready.\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		pr_err("%s: Invalid release device command payload.\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.release_dev) {
+		rc = ctx->state_machine[ctx->state].ioctl_ops.release_dev(
+			ctx, cmd);
+	} else {
+		pr_err("%s: No release device in dev %d, state %d\n",
+			__func__, ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_config_dev(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		pr_err("%s: context is not ready\n'", __func__);
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		pr_err("%s: Invalid config device command payload.\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.config_dev) {
+		rc = ctx->state_machine[ctx->state].ioctl_ops.config_dev(
+			ctx, cmd);
+	} else {
+		pr_err("%s: No config device in dev %d, state %d\n",
+			__func__, ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_start_dev(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	if (!ctx->state_machine) {
+		pr_err("%s: Context is not ready.\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		pr_err("%s: Invalid start device command payload.\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.start_dev)
+		rc = ctx->state_machine[ctx->state].ioctl_ops.start_dev(
+			ctx, cmd);
+	else
+		/* start device can be optional for some driver */
+		pr_debug("%s: No start device in dev %d, state %d\n",
+			__func__, ctx->dev_hdl, ctx->state);
+
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_stop_dev(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	if (!ctx->state_machine) {
+		pr_err("%s: Context is not ready.\n'", __func__);
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		pr_err("%s: Invalid stop device command payload.\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.stop_dev)
+		rc = ctx->state_machine[ctx->state].ioctl_ops.stop_dev(
+			ctx, cmd);
+	else
+		/* stop device can be optional for some driver */
+		pr_warn("%s: No stop device in dev %d, state %d\n",
+			__func__, ctx->dev_hdl, ctx->state);
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_init(struct cam_context *ctx,
+	struct cam_req_mgr_kmd_ops *crm_node_intf,
+	struct cam_hw_mgr_intf *hw_mgr_intf,
+	struct cam_ctx_request *req_list,
+	uint32_t req_size)
+{
+	int i;
+
+	/* crm_node_intf is optinal */
+	if (!ctx || !hw_mgr_intf || !req_list) {
+		pr_err("%s: Invalid input parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	INIT_LIST_HEAD(&ctx->list);
+	mutex_init(&ctx->ctx_mutex);
+	spin_lock_init(&ctx->lock);
+
+	ctx->ctx_crm_intf = NULL;
+	ctx->crm_ctx_intf = crm_node_intf;
+	ctx->hw_mgr_intf = hw_mgr_intf;
+	ctx->irq_cb_intf = cam_context_handle_hw_event;
+
+	INIT_LIST_HEAD(&ctx->active_req_list);
+	INIT_LIST_HEAD(&ctx->wait_req_list);
+	INIT_LIST_HEAD(&ctx->pending_req_list);
+	INIT_LIST_HEAD(&ctx->free_req_list);
+	ctx->req_list = req_list;
+	ctx->req_size = req_size;
+	for (i = 0; i < req_size; i++) {
+		INIT_LIST_HEAD(&ctx->req_list[i].list);
+		list_add_tail(&ctx->req_list[i].list, &ctx->free_req_list);
+	}
+	ctx->state = CAM_CTX_AVAILABLE;
+	ctx->state_machine = NULL;
+	ctx->ctx_priv = NULL;
+
+	return 0;
+}
+
+int cam_context_deinit(struct cam_context *ctx)
+{
+	if (!ctx)
+		return -EINVAL;
+
+	/**
+	 * This is called from platform device remove.
+	 * Everyting should be released at this moment.
+	 * so we just free the memory for the context
+	 */
+	if (ctx->state != CAM_CTX_AVAILABLE)
+		pr_err("%s: Device did not shutdown cleanly.\n", __func__);
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	return 0;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.h b/drivers/media/platform/msm/camera/cam_core/cam_context.h
new file mode 100644
index 0000000..c7329cf
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.h
@@ -0,0 +1,302 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_CONTEXT_H_
+#define _CAM_CONTEXT_H_
+
+#include <linux/spinlock.h>
+#include "cam_req_mgr_interface.h"
+#include "cam_hw_mgr_intf.h"
+
+/* Forward declarations */
+struct cam_context;
+
+/* max request number */
+#define CAM_CTX_REQ_MAX              20
+
+/**
+ * enum cam_ctx_state -  context top level states
+ *
+ */
+enum cam_context_state {
+	CAM_CTX_UNINIT               = 0,
+	CAM_CTX_AVAILABLE            = 1,
+	CAM_CTX_ACQUIRED             = 2,
+	CAM_CTX_READY                = 3,
+	CAM_CTX_ACTIVATED            = 4,
+	CAM_CTX_STATE_MAX            = 5,
+};
+
+/**
+ * struct cam_ctx_request - Common request structure for the context
+ *
+ * @list:                  Link list entry
+ * @status:                Request status
+ * @request_id:            Request id
+ * @req_priv:              Derived request object
+ *
+ */
+struct cam_ctx_request {
+	struct list_head   list;
+	uint32_t           status;
+	uint64_t           request_id;
+	void              *req_priv;
+};
+
+/**
+ * struct cam_ctx_ioctl_ops - Function table for handling IOCTL calls
+ *
+ * @acquire_dev:           Function pointer for acquire device
+ * @release_dev:           Function pointer for release device
+ * @config_dev:            Function pointer for config device
+ * @start_dev:             Function pointer for start device
+ * @stop_dev:              Function pointer for stop device
+ *
+ */
+struct cam_ctx_ioctl_ops {
+	int (*acquire_dev)(struct cam_context *ctx,
+			struct cam_acquire_dev_cmd *cmd);
+	int (*release_dev)(struct cam_context *ctx,
+			struct cam_release_dev_cmd *cmd);
+	int (*config_dev)(struct cam_context *ctx,
+			struct cam_config_dev_cmd *cmd);
+	int (*start_dev)(struct cam_context *ctx,
+			struct cam_start_stop_dev_cmd *cmd);
+	int (*stop_dev)(struct cam_context *ctx,
+			struct cam_start_stop_dev_cmd *cmd);
+};
+
+/**
+ * struct cam_ctx_crm_ops -  Function table for handling CRM to context calls
+ *
+ * @get_dev_info:          Get device informaiton
+ * @link:                  Link the context
+ * @unlink:                Unlink the context
+ * @apply_req:             Apply setting for the context
+ *
+ */
+struct cam_ctx_crm_ops {
+	int (*get_dev_info)(struct cam_context *ctx,
+			struct cam_req_mgr_device_info *);
+	int (*link)(struct cam_context *ctx,
+			struct cam_req_mgr_core_dev_link_setup *link);
+	int (*unlink)(struct cam_context *ctx,
+			struct cam_req_mgr_core_dev_link_setup *unlink);
+	int (*apply_req)(struct cam_context *ctx,
+			struct cam_req_mgr_apply_request *apply);
+};
+
+
+/**
+ * struct cam_ctx_ops - Collection of the interface funciton tables
+ *
+ * @ioctl_ops:             Ioctl funciton table
+ * @crm_ops:               CRM to context interface function table
+ * @irq_ops:               Hardware event handle function
+ *
+ */
+struct cam_ctx_ops {
+	struct cam_ctx_ioctl_ops     ioctl_ops;
+	struct cam_ctx_crm_ops       crm_ops;
+	cam_hw_event_cb_func         irq_ops;
+};
+
+/**
+ * struct cam_context - camera context object for the subdevice node
+ *
+ * @list:                  Link list entry
+ * @sessoin_hdl:           Session handle
+ * @dev_hdl:               Device handle
+ * @link_hdl:              Link handle
+ * @ctx_mutex:             Mutex for ioctl calls
+ * @lock:                  Spin lock
+ * @active_req_list:       Requests pending for done event
+ * @pending_req_list:      Requests pending for reg upd event
+ * @wait_req_list:         Requests waiting for apply
+ * @free_req_list:         Requests that are free
+ * @req_list:              Reference to the request storage
+ * @req_size:              Size of the request storage
+ * @hw_mgr_intf:           Context to HW interface
+ * @ctx_crm_intf:          Context to CRM interface
+ * @crm_ctx_intf:          CRM to context interface
+ * @irq_cb_intf:           HW to context callback interface
+ * @state:                 Current state for top level state machine
+ * @state_machine:         Top level state machine
+ * @ctx_priv:              Private context pointer
+ *
+ */
+struct cam_context {
+	struct list_head             list;
+	int32_t                      session_hdl;
+	int32_t                      dev_hdl;
+	int32_t                      link_hdl;
+
+	struct mutex                 ctx_mutex;
+	spinlock_t                   lock;
+
+	struct list_head             active_req_list;
+	struct list_head             pending_req_list;
+	struct list_head             wait_req_list;
+	struct list_head             free_req_list;
+	struct cam_ctx_request      *req_list;
+	uint32_t                     req_size;
+
+	struct cam_hw_mgr_intf      *hw_mgr_intf;
+	struct cam_req_mgr_crm_cb   *ctx_crm_intf;
+	struct cam_req_mgr_kmd_ops  *crm_ctx_intf;
+	cam_hw_event_cb_func         irq_cb_intf;
+
+	enum cam_context_state       state;
+	struct cam_ctx_ops          *state_machine;
+
+	void                        *ctx_priv;
+};
+
+/**
+ * cam_context_handle_get_dev_info()
+ *
+ * @brief:        Handle get device information command
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @info:                  Device information returned
+ *
+ */
+int cam_context_handle_get_dev_info(struct cam_context *ctx,
+		struct cam_req_mgr_device_info *info);
+
+/**
+ * cam_context_handle_link()
+ *
+ * @brief:        Handle link command
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @link:                  Link command payload
+ *
+ */
+int cam_context_handle_link(struct cam_context *ctx,
+		struct cam_req_mgr_core_dev_link_setup *link);
+
+/**
+ * cam_context_handle_unlink()
+ *
+ * @brief:        Handle unlink command
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @unlink:                Unlink command payload
+ *
+ */
+int cam_context_handle_unlink(struct cam_context *ctx,
+		struct cam_req_mgr_core_dev_link_setup *unlink);
+
+/**
+ * cam_context_handle_apply_req()
+ *
+ * @brief:        Handle apply request command
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @apply:                 Apply request command payload
+ *
+ */
+int cam_context_handle_apply_req(struct cam_context *ctx,
+		struct cam_req_mgr_apply_request *apply);
+
+
+/**
+ * cam_context_handle_acquire_dev()
+ *
+ * @brief:        Handle acquire device command
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @cmd:                   Acquire device command payload
+ *
+ */
+int cam_context_handle_acquire_dev(struct cam_context *ctx,
+		struct cam_acquire_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_release_dev()
+ *
+ * @brief:        Handle release device command
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @cmd:                   Release device command payload
+ *
+ */
+int cam_context_handle_release_dev(struct cam_context *ctx,
+		struct cam_release_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_config_dev()
+ *
+ * @brief:        Handle config device command
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @cmd:                   Config device command payload
+ *
+ */
+int cam_context_handle_config_dev(struct cam_context *ctx,
+		struct cam_config_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_start_dev()
+ *
+ * @brief:        Handle start device command
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @cmd:                   Start device command payload
+ *
+ */
+int cam_context_handle_start_dev(struct cam_context *ctx,
+		struct cam_start_stop_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_stop_dev()
+ *
+ * @brief:        Handle stop device command
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @cmd:                   Stop device command payload
+ *
+ */
+int cam_context_handle_stop_dev(struct cam_context *ctx,
+		struct cam_start_stop_dev_cmd *cmd);
+
+/**
+ * cam_context_deinit()
+ *
+ * @brief:        Camera context deinitialize function
+ *
+ * @ctx:                   Object pointer for cam_context
+ *
+ */
+int cam_context_deinit(struct cam_context *ctx);
+
+/**
+ * cam_context_init()
+ *
+ * @brief:        Camera context initialize function
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @crm_node_intf:         Function table for crm to context interface
+ * @hw_mgr_intf:           Function table for context to hw interface
+ * @req_list:              Requests storage
+ * @req_size:              Size of the request storage
+ *
+ */
+int cam_context_init(struct cam_context *ctx,
+		struct cam_req_mgr_kmd_ops *crm_node_intf,
+		struct cam_hw_mgr_intf *hw_mgr_intf,
+		struct cam_ctx_request *req_list,
+		uint32_t req_size);
+
+
+#endif  /* _CAM_CONTEXT_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw.h b/drivers/media/platform/msm/camera/cam_core/cam_hw.h
new file mode 100644
index 0000000..d01a84a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_HW_H_
+#define _CAM_HW_H_
+
+#include "cam_soc_util.h"
+
+/*
+ * This file declares Enums, Structures and APIs to be used as template
+ * when writing any HW driver in the camera subsystem.
+ */
+
+/* Hardware state enum */
+enum cam_hw_state {
+	CAM_HW_STATE_POWER_DOWN,
+	CAM_HW_STATE_POWER_UP,
+};
+
+/**
+ * struct cam_hw_info - Common hardware information
+ *
+ * @hw_mutex:              Hardware mutex
+ * @hw_lock:               Hardware spinlock
+ * @hw_complete:           Hardware Completion
+ * @open_count:            Count to track the HW enable from the client
+ * @hw_state:              Hardware state
+ * @soc_info:              Platform SOC properties for hardware
+ * @node_info:             Private HW data related to nodes
+ * @core_info:             Private HW data related to core logic
+ *
+ */
+struct cam_hw_info {
+	struct mutex                    hw_mutex;
+	spinlock_t                      hw_lock;
+	struct completion               hw_complete;
+	uint32_t                        open_count;
+	enum cam_hw_state               hw_state;
+	struct cam_hw_soc_info          soc_info;
+	void                           *node_info;
+	void                           *core_info;
+};
+
+#endif /* _CAM_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h
new file mode 100644
index 0000000..3a997ae
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h
@@ -0,0 +1,80 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_HW_INTF_H_
+#define _CAM_HW_INTF_H_
+
+#include <linux/types.h>
+
+/*
+ * This file declares Constants, Enums, Structures and APIs to be used as
+ * Interface between HW driver and HW Manager.
+ */
+
+/**
+ * struct cam_hw_ops - Hardware layer interface functions
+ *
+ * @get_hw_caps:           Function pointer for get hw caps
+ * @init:                  Function poniter for initialize hardware
+ * @deinit:                Function pointer for deinitialize hardware
+ * @reset:                 Function pointer for reset hardware
+ * @reserve:               Function pointer for reserve hardware
+ * @release:               Function pointer for release hardware
+ * @start:                 Function pointer for start hardware
+ * @stop:                  Function pointer for stop hardware
+ * @read:                  Function pointer for read hardware registers
+ * @write:                 Function pointer for Write hardware registers
+ * @process_cmd:           Function pointer for additional hardware controls
+ *
+ */
+struct cam_hw_ops {
+	int (*get_hw_caps)(void *hw_priv,
+		void *get_hw_cap_args, uint32_t arg_size);
+	int (*init)(void *hw_priv,
+		void *init_hw_args, uint32_t arg_size);
+	int (*deinit)(void *hw_priv,
+		void *init_hw_args, uint32_t arg_size);
+	int (*reset)(void *hw_priv,
+		void *reset_core_args, uint32_t arg_size);
+	int (*reserve)(void *hw_priv,
+		void *reserve_args, uint32_t arg_size);
+	int (*release)(void *hw_priv,
+		void *release_args, uint32_t arg_size);
+	int (*start)(void *hw_priv,
+		void *start_args, uint32_t arg_size);
+	int (*stop)(void *hw_priv,
+		void *stop_args, uint32_t arg_size);
+	int (*read)(void *hw_priv,
+		void *read_args, uint32_t arg_size);
+	int (*write)(void *hw_priv,
+		void *write_args, uint32_t arg_size);
+	int (*process_cmd)(void *hw_priv,
+		uint32_t cmd_type, void *cmd_args, uint32_t arg_size);
+};
+
+/**
+ * struct cam_hw_intf - Common hardware node
+ *
+ * @hw_type:               Hardware type
+ * @hw_idx:                Hardware ID
+ * @hw_ops:                Hardware interface function table
+ * @hw_priv:               Private hardware node pointer
+ *
+ */
+struct cam_hw_intf {
+	uint32_t                     hw_type;
+	uint32_t                     hw_idx;
+	struct cam_hw_ops            hw_ops;
+	void                        *hw_priv;
+};
+
+#endif /* _CAM_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
new file mode 100644
index 0000000..db605e7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
@@ -0,0 +1,209 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_HW_MGR_INTF_H_
+#define _CAM_HW_MGR_INTF_H_
+
+/*
+ * This file declares Constants, Enums, Structures and APIs to be used as
+ * Interface between HW Manager and Context.
+ */
+
+
+/* maximum context numbers */
+#define CAM_CTX_MAX                         8
+
+/* maximum buf done irqs */
+#define CAM_NUM_OUT_PER_COMP_IRQ_MAX        12
+
+/* hardware event callback function type */
+typedef int (*cam_hw_event_cb_func)(void *context, uint32_t evt_id,
+	void *evt_data);
+
+/**
+ * struct cam_hw_update_entry - Entry for hardware config
+ *
+ * @handle:                Memory handle for the configuration
+ * @offset:                Memory offset
+ * @len:                   Size of the configuration
+ * @flags:                 Flags for the config entry(eg. DMI)
+ *
+ */
+struct cam_hw_update_entry {
+	int                handle;
+	uint32_t           offset;
+	uint32_t           len;
+	uint32_t           flags;
+};
+
+/**
+ * struct cam_hw_fence_map_entry - Entry for the resource to sync id map
+ *
+ * @resrouce_handle:       Resource port id for the buffer
+ * @sync_id:               Synce id
+ *
+ */
+struct cam_hw_fence_map_entry {
+	uint32_t           resource_handle;
+	int32_t            sync_id;
+};
+
+/**
+ * struct cam_hw_done_event_data - Payload for hw done event
+ *
+ * @num_handles:           number of handles in the event
+ * @resrouce_handle:       list of the resource handle
+ * @timestamp:             time stamp
+ *
+ */
+struct cam_hw_done_event_data {
+	uint32_t           num_handles;
+	uint32_t           resource_handle[CAM_NUM_OUT_PER_COMP_IRQ_MAX];
+	struct timeval     timestamp;
+};
+
+/**
+ * struct cam_hw_acquire_args - Payload for acquire command
+ *
+ * @context_data:          Context data pointer for the callback function
+ * @event_cb:              Callback function array
+ * @num_acq:               Total number of acquire in the payload
+ * @acquire_info:          Acquired resource array pointer
+ * @ctxt_to_hw_map:        HW context (returned)
+ *
+ */
+struct cam_hw_acquire_args {
+	void                        *context_data;
+	cam_hw_event_cb_func         event_cb;
+	uint32_t                     num_acq;
+	uint64_t                     acquire_info;
+	void                        *ctxt_to_hw_map;
+};
+
+/**
+ * struct cam_hw_release_args - Payload for release command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ *
+ */
+struct cam_hw_release_args {
+	void              *ctxt_to_hw_map;
+};
+
+/**
+ * struct cam_hw_start_args - Payload for start command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @num_hw_update_entries: Number of Hardware configuration
+ * @hw_update_entries:     Hardware configuration list
+ *
+ */
+struct cam_hw_start_args {
+	void                        *ctxt_to_hw_map;
+	uint32_t                     num_hw_update_entries;
+	struct cam_hw_update_entry  *hw_update_entries;
+};
+
+/**
+ * struct cam_hw_stop_args - Payload for stop command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ *
+ */
+struct cam_hw_stop_args {
+	void              *ctxt_to_hw_map;
+};
+
+/**
+ * struct cam_hw_prepare_update_args - Payload for prepare command
+ *
+ * @packet:                CSL packet from user mode driver
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @max_hw_update_entries: Maximum hardware update entries supported
+ * @hw_update_entries:     Actual hardware update configuration (returned)
+ * @num_hw_update_entries: Number of actual hardware update entries (returned)
+ * @max_out_map_entries:   Maximum output fence mapping supported
+ * @out_map_entries:       Actual output fence mapping list (returned)
+ * @num_out_map_entries:   Number of actual output fence mapping (returned)
+ * @max_in_map_entries:    Maximum input fence mapping supported
+ * @in_map_entries:        Actual input fence mapping list (returned)
+ * @num_in_map_entries:    Number of acutal input fence mapping (returned)
+ *
+ */
+struct cam_hw_prepare_update_args {
+	struct cam_packet              *packet;
+	void                           *ctxt_to_hw_map;
+	uint32_t                        max_hw_update_entries;
+	struct cam_hw_update_entry     *hw_update_entries;
+	uint32_t                        num_hw_update_entries;
+	uint32_t                        max_out_map_entries;
+	struct cam_hw_fence_map_entry  *out_map_entries;
+	uint32_t                        num_out_map_entries;
+	uint32_t                        max_in_map_entries;
+	struct cam_hw_fence_map_entry  *in_map_entries;
+	uint32_t                        num_in_map_entries;
+};
+
+/**
+ * struct cam_hw_config_args - Payload for config command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @num_hw_update_entries: Number of hardware update entries
+ * @hw_update_entries:     Hardware update list
+ *
+ */
+struct cam_hw_config_args {
+	void                        *ctxt_to_hw_map;
+	uint32_t                     num_hw_update_entries;
+	struct cam_hw_update_entry  *hw_update_entries;
+};
+
+/**
+ * cam_hw_mgr_intf - HW manager interface
+ *
+ * @hw_mgr_priv:           HW manager object
+ * @hw_get_caps:           Function pointer for get hw caps
+ *                               args = cam_query_cap_cmd
+ * @hw_acquire:            Function poniter for acquire hw resources
+ *                               args = cam_hw_acquire_args
+ * @hw_release:            Function pointer for release hw device resource
+ *                               args = cam_hw_release_args
+ * @hw_start:              Function pointer for start hw devices
+ *                               args = cam_hw_start_args
+ * @hw_stop:               Function pointer for stop hw devices
+ *                               args = cam_hw_stop_args
+ * @hw_prepare_update:     Function pointer for prepare hw update for hw devices
+ *                               args = cam_hw_prepare_update_args
+ * @hw_config:             Function pointer for configure hw devices
+ *                               args = cam_hw_config_args
+ * @hw_read:               Function pointer for read hardware registers
+ * @hw_write:              Function pointer for Write hardware registers
+ * @hw_cmd:                Function pointer for any customized commands for the
+ *                         hardware manager
+ *
+ */
+struct cam_hw_mgr_intf {
+	void *hw_mgr_priv;
+
+	int (*hw_get_caps)(void *hw_priv, void *hw_caps_args);
+	int (*hw_acquire)(void *hw_priv, void *hw_acquire_args);
+	int (*hw_release)(void *hw_priv, void *hw_release_args);
+	int (*hw_start)(void *hw_priv, void *hw_start_args);
+	int (*hw_stop)(void *hw_priv, void *hw_stop_args);
+	int (*hw_prepare_update)(void *hw_priv, void *hw_prepare_update_args);
+	int (*hw_config)(void *hw_priv, void *hw_config_args);
+	int (*hw_read)(void *hw_priv, void *read_args);
+	int (*hw_write)(void *hw_priv, void *write_args);
+	int (*hw_cmd)(void *hw_priv, void *write_args);
+};
+
+#endif /* _CAM_HW_MGR_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
new file mode 100644
index 0000000..ef60822
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -0,0 +1,413 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+
+#include "cam_node.h"
+
+static int __cam_node_handle_query_cap(struct cam_node *node,
+	struct cam_query_cap_cmd *query)
+{
+	int rc = -EFAULT;
+
+	if (!query)
+		return -EINVAL;
+
+	if (node->hw_mgr_intf.hw_get_caps) {
+		rc = node->hw_mgr_intf.hw_get_caps(
+			node->hw_mgr_intf.hw_mgr_priv, query);
+	}
+	return rc;
+}
+
+static int __cam_node_handle_acquire_dev(struct cam_node *node,
+	struct cam_acquire_dev_cmd *acquire)
+{
+	int rc = 0;
+	struct cam_context *ctx = NULL;
+
+	if (!acquire)
+		return -EINVAL;
+
+	mutex_lock(&node->list_mutex);
+	if (!list_empty(&node->free_ctx_list)) {
+		ctx = list_first_entry(&node->free_ctx_list,
+			struct cam_context, list);
+		list_del_init(&ctx->list);
+	}
+	mutex_unlock(&node->list_mutex);
+
+	if (!ctx) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	rc = cam_context_handle_acquire_dev(ctx, acquire);
+	if (rc) {
+		pr_err("%s: Acquire device failed\n", __func__);
+		goto free_ctx;
+	}
+
+	return 0;
+free_ctx:
+	mutex_lock(&node->list_mutex);
+	list_add_tail(&ctx->list, &node->free_ctx_list);
+	mutex_unlock(&node->list_mutex);
+err:
+	return rc;
+}
+
+static int __cam_node_handle_start_dev(struct cam_node *node,
+	struct cam_start_stop_dev_cmd *start)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!start)
+		return -EINVAL;
+
+	if (start->dev_handle <= 0) {
+		pr_err("Invalid device handle for context\n");
+		return -EINVAL;
+	}
+
+	if (start->session_handle <= 0) {
+		pr_err("Invalid session handle for context\n");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(start->dev_handle);
+	if (!ctx) {
+		pr_err("%s: Can not get context for handle %d\n",
+			__func__, start->dev_handle);
+		return -EINVAL;
+	}
+
+	return cam_context_handle_start_dev(ctx, start);
+}
+
+static int __cam_node_handle_stop_dev(struct cam_node *node,
+	struct cam_start_stop_dev_cmd *stop)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!stop)
+		return -EINVAL;
+
+	if (stop->dev_handle <= 0) {
+		pr_err("Invalid device handle for context\n");
+		return -EINVAL;
+	}
+
+	if (stop->session_handle <= 0) {
+		pr_err("Invalid session handle for context\n");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(stop->dev_handle);
+	if (!ctx) {
+		pr_err("%s: Can not get context for handle %d\n",
+			__func__, stop->dev_handle);
+		return -EINVAL;
+	}
+
+	return cam_context_handle_stop_dev(ctx, stop);
+}
+
+static int __cam_node_handle_config_dev(struct cam_node *node,
+	struct cam_config_dev_cmd *config)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!config)
+		return -EINVAL;
+
+	if (config->dev_handle <= 0) {
+		pr_err("Invalid device handle for context\n");
+		return -EINVAL;
+	}
+
+	if (config->session_handle <= 0) {
+		pr_err("Invalid session handle for context\n");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(config->dev_handle);
+	if (!ctx) {
+		pr_err("%s: Can not get context for handle %d\n",
+			__func__, config->dev_handle);
+		return -EINVAL;
+	}
+
+	return cam_context_handle_config_dev(ctx, config);
+}
+
+static int __cam_node_handle_release_dev(struct cam_node *node,
+	struct cam_release_dev_cmd *release)
+{
+	int rc = 0;
+	struct cam_context *ctx = NULL;
+
+	if (!release)
+		return -EINVAL;
+
+	if (release->dev_handle <= 0) {
+		pr_err("Invalid device handle for context\n");
+		return -EINVAL;
+	}
+
+	if (release->session_handle <= 0) {
+		pr_err("Invalid session handle for context\n");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(release->dev_handle);
+	if (!ctx) {
+		pr_err("%s: Can not get context for handle %d\n",
+			__func__, release->dev_handle);
+		return -EINVAL;
+	}
+
+	rc = cam_context_handle_release_dev(ctx, release);
+	if (rc)
+		pr_err("%s: context release failed\n", __func__);
+
+	rc = cam_destroy_device_hdl(release->dev_handle);
+	if (rc)
+		pr_err("%s: destroy device handle is failed\n", __func__);
+
+	mutex_lock(&node->list_mutex);
+	list_add_tail(&ctx->list, &node->free_ctx_list);
+	mutex_unlock(&node->list_mutex);
+	return rc;
+}
+
+static int __cam_node_get_dev_info(struct cam_req_mgr_device_info *info)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!info)
+		return -EINVAL;
+
+	ctx = (struct cam_context *) cam_get_device_priv(info->dev_hdl);
+	if (!ctx) {
+		pr_err("%s: Can not get context  for handle %d\n",
+			__func__, info->dev_hdl);
+		return -EINVAL;
+	}
+	return cam_context_handle_get_dev_info(ctx, info);
+}
+
+static int __cam_node_link_setup(struct cam_req_mgr_core_dev_link_setup *setup)
+{
+	int rc;
+	struct cam_context *ctx = NULL;
+
+	if (!setup)
+		return -EINVAL;
+
+	ctx = (struct cam_context *) cam_get_device_priv(setup->dev_hdl);
+	if (!ctx) {
+		pr_err("%s: Can not get context for handle %d\n",
+			__func__, setup->dev_hdl);
+		return -EINVAL;
+	}
+
+	if (setup->link_enable)
+		rc = cam_context_handle_link(ctx, setup);
+	else
+		rc = cam_context_handle_unlink(ctx, setup);
+
+	return rc;
+}
+
+static int __cam_node_apply_req(struct cam_req_mgr_apply_request *apply)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!apply)
+		return -EINVAL;
+
+	ctx = (struct cam_context *) cam_get_device_priv(apply->dev_hdl);
+	if (!ctx) {
+		pr_err("%s: Can not get context for handle %d\n",
+			__func__, apply->dev_hdl);
+		return -EINVAL;
+	}
+
+	return cam_context_handle_apply_req(ctx, apply);
+}
+
+int cam_node_deinit(struct cam_node *node)
+{
+	if (node)
+		memset(node, 0, sizeof(*node));
+
+	pr_debug("%s: deinit complete!\n", __func__);
+	return 0;
+
+}
+
+int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf,
+	struct cam_context *ctx_list, uint32_t ctx_size, char *name)
+{
+	int rc = 0;
+	int i;
+
+	if (!node || !hw_mgr_intf ||
+		sizeof(node->hw_mgr_intf) != sizeof(*hw_mgr_intf)) {
+		return -EINVAL;
+	}
+
+	memset(node, 0, sizeof(*node));
+
+	strlcpy(node->name, name, sizeof(node->name));
+
+	memcpy(&node->hw_mgr_intf, hw_mgr_intf, sizeof(node->hw_mgr_intf));
+
+	node->crm_node_intf.apply_req = __cam_node_apply_req;
+	node->crm_node_intf.get_dev_info = __cam_node_get_dev_info;
+	node->crm_node_intf.link_setup = __cam_node_link_setup;
+
+	mutex_init(&node->list_mutex);
+	INIT_LIST_HEAD(&node->free_ctx_list);
+	node->ctx_list = ctx_list;
+	node->ctx_size = ctx_size;
+	for (i = 0; i < ctx_size; i++) {
+		if (!ctx_list[i].state_machine) {
+			pr_err("%s: camera context %d is not initialized!",
+				__func__, i);
+			rc = -1;
+			goto err;
+		}
+		INIT_LIST_HEAD(&ctx_list[i].list);
+		list_add_tail(&ctx_list[i].list, &node->free_ctx_list);
+	}
+
+	node->state = CAM_NODE_STATE_INIT;
+err:
+	pr_debug("%s: Exit. (rc = %d)\n", __func__, rc);
+	return rc;
+}
+
+int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd)
+{
+	int rc = 0;
+
+	if (!cmd)
+		return -EINVAL;
+
+	pr_debug("%s: handle cmd %d\n", __func__, cmd->op_code);
+
+	switch (cmd->op_code) {
+	case CAM_QUERY_CAP: {
+		struct cam_query_cap_cmd query;
+
+		if (copy_from_user(&query, (void __user *)cmd->handle,
+			sizeof(query))) {
+			rc = -EFAULT;
+			break;
+		}
+		rc = __cam_node_handle_query_cap(node, &query);
+		if (rc) {
+			pr_err("%s: querycap is failed(rc = %d)\n",
+				__func__,  rc);
+			break;
+		}
+		if (copy_to_user((void __user *)cmd->handle, &query,
+			sizeof(query)))
+			rc = -EFAULT;
+		break;
+	}
+	case CAM_ACQUIRE_DEV: {
+		struct cam_acquire_dev_cmd acquire;
+
+		if (copy_from_user(&acquire, (void __user *)cmd->handle,
+			sizeof(acquire))) {
+			rc = -EFAULT;
+			break;
+		}
+		rc = __cam_node_handle_acquire_dev(node, &acquire);
+		if (rc) {
+			pr_err("%s: acquire device failed(rc = %d)\n",
+				__func__, rc);
+			break;
+		}
+		if (copy_to_user((void __user *)cmd->handle, &acquire,
+			sizeof(acquire)))
+			rc = -EFAULT;
+		break;
+	}
+	case CAM_START_DEV: {
+		struct cam_start_stop_dev_cmd start;
+
+		if (copy_from_user(&start, (void __user *)cmd->handle,
+			sizeof(start)))
+			rc = -EFAULT;
+		else {
+			rc = __cam_node_handle_start_dev(node, &start);
+			if (rc)
+				pr_err("%s: start device failed(rc = %d)\n",
+					__func__, rc);
+		}
+		break;
+	}
+	case CAM_STOP_DEV: {
+		struct cam_start_stop_dev_cmd stop;
+
+		if (copy_from_user(&stop, (void __user *)cmd->handle,
+			sizeof(stop)))
+			rc = -EFAULT;
+		else {
+			rc = __cam_node_handle_stop_dev(node, &stop);
+			if (rc)
+				pr_err("%s: stop device failed(rc = %d)\n",
+					__func__, rc);
+		}
+		break;
+	}
+	case CAM_CONFIG_DEV: {
+		struct cam_config_dev_cmd config;
+
+		if (copy_from_user(&config, (void __user *)cmd->handle,
+			sizeof(config)))
+			rc = -EFAULT;
+		else {
+			rc = __cam_node_handle_config_dev(node, &config);
+			if (rc)
+				pr_err("%s: config device failed(rc = %d)\n",
+					__func__, rc);
+		}
+		break;
+	}
+	case CAM_RELEASE_DEV: {
+		struct cam_release_dev_cmd release;
+
+		if (copy_from_user(&release, (void __user *)cmd->handle,
+			sizeof(release)))
+			rc = -EFAULT;
+		else {
+			rc = __cam_node_handle_release_dev(node, &release);
+			if (rc)
+				pr_err("%s: release device failed(rc = %d)\n",
+					__func__, rc);
+		}
+		break;
+	}
+	default:
+		pr_err("Unknown op code %d\n", cmd->op_code);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.h b/drivers/media/platform/msm/camera/cam_core/cam_node.h
new file mode 100644
index 0000000..6e4a641
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.h
@@ -0,0 +1,90 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_NODE_H_
+#define _CAM_NODE_H_
+
+#include "cam_context.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_interface.h"
+
+#define CAM_NODE_NAME_LENGTH_MAX        256
+
+#define CAM_NODE_STATE_UNINIT           0
+#define CAM_NODE_STATE_INIT             1
+
+/**
+ * struct cam_node - Singleton Node for camera HW devices
+ *
+ * @name:                  Name for struct cam_node
+ * @state:                 Node state:
+ *                            0 = uninitialized, 1 = initialized
+ * @list_mutex:            Mutex for the context pool
+ * @free_ctx_list:         Free context pool list
+ * @ctx_list:              Context list
+ * @ctx_size:              Context list size
+ * @hw_mgr_intf:           Interface for cam_node to HW
+ * @crm_node_intf:         Interface for the CRM to cam_node
+ *
+ */
+struct cam_node {
+	char                         name[CAM_NODE_NAME_LENGTH_MAX];
+	uint32_t                     state;
+
+	/* context pool */
+	struct mutex                 list_mutex;
+	struct list_head             free_ctx_list;
+	struct cam_context          *ctx_list;
+	uint32_t                     ctx_size;
+
+	/* interfaces */
+	struct cam_hw_mgr_intf       hw_mgr_intf;
+	struct cam_req_mgr_kmd_ops   crm_node_intf;
+};
+
+/**
+ * cam_node_handle_ioctl()
+ *
+ * @brief:       Handle ioctl commands
+ *
+ * @node:                  Node handle
+ * @cmd:                   IOCTL command
+ *
+ */
+int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd);
+
+/**
+ * cam_node_deinit()
+ *
+ * @brief:       Deinitialization function for the Node interface
+ *
+ * @node:                  Node handle
+ *
+ */
+int cam_node_deinit(struct cam_node *node);
+
+/**
+ * cam_node_init()
+ *
+ * @brief:       Initialization function for the Node interface.
+ *
+ * @node:                  Cam_node pointer
+ * @hw_mgr_intf:           HW manager interface blob
+ * @ctx_list:              List of cam_contexts to be added
+ * @ctx_size:              Size of the cam_context
+ * @name:                  Name for the node
+ *
+ */
+int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf,
+	struct cam_context *ctx_list, uint32_t ctx_size, char *name);
+
+#endif /* _CAM_NODE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_subdev.c b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
new file mode 100644
index 0000000..03b18cf
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
@@ -0,0 +1,143 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_subdev.h"
+#include "cam_node.h"
+
+/**
+ * cam_subdev_subscribe_event()
+ *
+ * @brief: function to subscribe to v4l2 events
+ *
+ * @sd:                    Pointer to struct v4l2_subdev.
+ * @fh:                    Pointer to struct v4l2_fh.
+ * @sub:                   Pointer to struct v4l2_event_subscription.
+ */
+static int cam_subdev_subscribe_event(struct v4l2_subdev *sd,
+	struct v4l2_fh *fh,
+	struct v4l2_event_subscription *sub)
+{
+	return v4l2_event_subscribe(fh, sub, CAM_SUBDEVICE_EVENT_MAX, NULL);
+}
+
+/**
+ * cam_subdev_unsubscribe_event()
+ *
+ * @brief: function to unsubscribe from v4l2 events
+ *
+ * @sd:                    Pointer to struct v4l2_subdev.
+ * @fh:                    Pointer to struct v4l2_fh.
+ * @sub:                   Pointer to struct v4l2_event_subscription.
+ */
+static int cam_subdev_unsubscribe_event(struct v4l2_subdev *sd,
+	struct v4l2_fh *fh,
+	struct v4l2_event_subscription *sub)
+{
+	return v4l2_event_unsubscribe(fh, sub);
+}
+
+static long cam_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd,
+	void *arg)
+{
+	long rc;
+	struct cam_node *node =
+		(struct cam_node *) v4l2_get_subdevdata(sd);
+
+	if (!node || node->state == CAM_NODE_STATE_UNINIT) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_node_handle_ioctl(node,
+			(struct cam_control *) arg);
+		break;
+	default:
+		pr_err("Invalid command %d for %s!\n", cmd,
+			node->name);
+		rc = -EINVAL;
+	}
+end:
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_subdev_compat_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	return cam_subdev_ioctl(sd, cmd, compat_ptr(arg));
+}
+#endif
+
+const struct v4l2_subdev_core_ops cam_subdev_core_ops = {
+	.ioctl = cam_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_subdev_compat_ioctl,
+#endif
+	.subscribe_event = cam_subdev_subscribe_event,
+	.unsubscribe_event = cam_subdev_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_ops cam_subdev_ops = {
+	.core = &cam_subdev_core_ops,
+};
+
+int cam_subdev_remove(struct cam_subdev *sd)
+{
+	if (!sd)
+		return -EINVAL;
+
+	cam_unregister_subdev(sd);
+	cam_node_deinit((struct cam_node *)sd->token);
+	kfree(sd->token);
+
+	return 0;
+}
+
+int cam_subdev_probe(struct cam_subdev *sd, struct platform_device *pdev,
+	char *name, uint32_t dev_type)
+{
+	int rc;
+	struct cam_node *node = NULL;
+
+	if (!sd || !pdev || !name) {
+		rc = -EINVAL;
+		goto err;
+	}
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	/* Setup camera v4l2 subdevice */
+	sd->pdev = pdev;
+	sd->name = name;
+	sd->ops = &cam_subdev_ops;
+	sd->token = node;
+	sd->sd_flags =
+		V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+	sd->ent_function = dev_type;
+	rc = cam_register_subdev(sd);
+	if (rc) {
+		pr_err("%s: cam_register_subdev() failed for dev: %s!\n",
+			__func__, sd->name);
+		goto err;
+	}
+	platform_set_drvdata(sd->pdev, sd);
+	return rc;
+err:
+	kfree(node);
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_dev_mgr_util.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_dev_mgr_util.h
deleted file mode 100644
index 69970b5..0000000
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_dev_mgr_util.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef _CAM_DEV_MGR_UTIL_H_
-#define _CAM_DEV_MGR_UTIL_H_
-
-#define CAM_SUBDEVICE_EVENT_MAX 30
-
-#include <linux/types.h>
-#include <media/v4l2-fh.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-subdev.h>
-#include <media/v4l2-event.h>
-#include <media/v4l2-ioctl.h>
-
-/**
- * struct cam_subdev - describes a camera sub-device
- *
- * @sd: struct v4l2_subdev
- * @ops: struct v4l2_subdev_ops
- * @internal_ops: struct v4l2_subdev_internal_ops
- * @name: Name of the sub-device. Please notice that the name must be unique.
- * @sd_flags: subdev flags. Can be:
- *   %V4L2_SUBDEV_FL_HAS_DEVNODE - Set this flag if this subdev needs a
- *   device node;
- *   %V4L2_SUBDEV_FL_HAS_EVENTS -  Set this flag if this subdev generates
- *   events.
- * @token: pointer to cookie of the client driver
- * @ent_function: media entity function type. Can be:
- *   %CAM_IFE_DEVICE_TYPE - identifies as IFE device;
- *   %CAM_ICP_DEVICE_TYPE - identifies as ICP device.
- * Each instance of a subdev driver should create this struct, either
- * stand-alone or embedded in a larger struct.
- *
- * This structure should be initialized/registered by cam_register_subdev
- */
-struct cam_subdev {
-	struct v4l2_subdev sd;
-	const struct v4l2_subdev_ops *ops;
-	const struct v4l2_subdev_internal_ops *internal_ops;
-	char *name;
-	u32 sd_flags;
-	void *token;
-	u32 ent_function;
-};
-
-/**
- * cam_register_subdev()
- *
- * @brief:  Registration function for camera subdevice
- *
- * @sd: pointer to struct cam_subdev.
- */
-int cam_register_subdev(struct cam_subdev *sd);
-
-/**
- * cam_unregister_subdev()
- *
- * @brief:  Unregistration function for camera subdevice
- *
- * @sd: pointer to struct cam_subdev.
- */
-int cam_unregister_subdev(struct cam_subdev *sd);
-
-/**
- * cam_send_event()
- *
- * @brief: Inline function to sent event to user space
- *
- * @csd: pointer to struct cam_subdev.
- * @ev: pointer to struct v4l2_event.
- */
-static inline int cam_send_event(struct cam_subdev *csd,
-	const struct v4l2_event *ev)
-{
-	if (!csd || !ev)
-		return -EINVAL;
-
-	v4l2_event_queue(csd->sd.devnode, ev);
-
-	return 0;
-}
-
-/**
- * cam_get_subdev_data()
- *
- * @brief: Inline function to retrieve the private data
- *
- * @csd: pointer to struct cam_subdev.
- */
-static inline void *cam_get_subdev_data(struct cam_subdev *csd)
-{
-	if (!csd)
-		return ERR_PTR(-EINVAL);
-
-	return v4l2_get_subdevdata(&csd->sd);
-}
-
-/**
- * cam_sd_subscribe_event()
- *
- * @brief: Inline function to subscribe to v4l2 events
- *
- * @sd: pointer to struct v4l2_subdev.
- * @fh: pointer to struct v4l2_fh.
- * @sub: pointer to struct v4l2_event_subscription.
- */
-static inline int cam_sd_subscribe_event(struct v4l2_subdev *sd,
-	struct v4l2_fh *fh,
-	struct v4l2_event_subscription *sub)
-{
-	return v4l2_event_subscribe(fh, sub, CAM_SUBDEVICE_EVENT_MAX, NULL);
-}
-
-/**
- * cam_sd_unsubscribe_event()
- *
- * @brief: Inline function to unsubscribe from v4l2 events
- *
- * @sd: pointer to struct v4l2_subdev.
- * @fh: pointer to struct v4l2_fh.
- * @sub: pointer to struct v4l2_event_subscription.
- */
-static inline int cam_sd_unsubscribe_event(struct v4l2_subdev *sd,
-	struct v4l2_fh *fh,
-	struct v4l2_event_subscription *sub)
-{
-	return v4l2_event_unsubscribe(fh, sub);
-}
-#endif /* _CAM_DEV_MGR_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index 2dba2c8..f3af1bd 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -23,7 +23,7 @@
 #include "cam_req_mgr_dev.h"
 #include "cam_req_mgr_util.h"
 #include "cam_req_mgr_core.h"
-#include "cam_dev_mgr_util.h"
+#include "cam_subdev.h"
 
 #define CAM_REQ_MGR_EVENT_MAX 30
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_subdev.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_subdev.h
new file mode 100644
index 0000000..78f2223
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_subdev.h
@@ -0,0 +1,106 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SUBDEV_H_
+#define _CAM_SUBDEV_H_
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#define CAM_SUBDEVICE_EVENT_MAX 30
+
+/**
+ * struct cam_subdev - describes a camera sub-device
+ *
+ * @pdev:                  Pointer to the platform device
+ * @sd:                    V4l2 subdevice
+ * @ops:                   V4l2 subdecie operations
+ * @internal_ops:          V4l2 subdevice internal operations
+ * @name:                  Name of the sub-device. Please notice that the name
+ *                             must be unique.
+ * @sd_flags:              Subdev flags. Can be:
+ *                             %V4L2_SUBDEV_FL_HAS_DEVNODE - Set this flag if
+ *                                 this subdev needs a device node.
+ *                             %V4L2_SUBDEV_FL_HAS_EVENTS -  Set this flag if
+ *                                 this subdev generates events.
+ * @token:                 Pointer to cookie of the client driver
+ * @ent_function:          Media entity function type. Can be:
+ *                             %CAM_IFE_DEVICE_TYPE - identifies as IFE device.
+ *                             %CAM_ICP_DEVICE_TYPE - identifies as ICP device.
+ *
+ * Each instance of a subdev driver should create this struct, either
+ * stand-alone or embedded in a larger struct. This structure should be
+ * initialized/registered by cam_register_subdev
+ *
+ */
+struct cam_subdev {
+	struct platform_device                *pdev;
+	struct v4l2_subdev                     sd;
+	const struct v4l2_subdev_ops          *ops;
+	const struct v4l2_subdev_internal_ops *internal_ops;
+	char                                  *name;
+	u32                                    sd_flags;
+	void                                  *token;
+	u32                                    ent_function;
+};
+
+/**
+ * cam_subdev_probe()
+ *
+ * @brief:      Camera Subdevice node probe function for v4l2 setup
+ *
+ * @sd:                    Camera subdevice object
+ * @name:                  Name of the subdevice node
+ * @dev_type:              Subdevice node type
+ *
+ */
+int cam_subdev_probe(struct cam_subdev *sd, struct platform_device *pdev,
+	char *name, uint32_t dev_type);
+
+/**
+ * cam_subdev_remove()
+ *
+ * @brief:      Called when subdevice node is unloaded
+ *
+ * @sd:                    Camera subdevice node object
+ *
+ */
+int cam_subdev_remove(struct cam_subdev *sd);
+
+/**
+ * cam_register_subdev()
+ *
+ * @brief:   This is the common utility function to be called by each camera
+ *           subdevice node when it tries to register itself to the camera
+ *           request manager
+ *
+ * @sd:                    Pointer to struct cam_subdev.
+ */
+int cam_register_subdev(struct cam_subdev *sd);
+
+/**
+ * cam_unregister_subdev()
+ *
+ * @brief:    This is the common utility function to be called by each camera
+ *            subdevice node when it tries to unregister itself from the
+ *            camera request manger
+ *
+ * @sd:                    Pointer to struct cam_subdev.
+ */
+int cam_unregister_subdev(struct cam_subdev *sd);
+
+#endif /* _CAM_SUBDEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index d396d4f..683386c 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -59,7 +59,7 @@
 	int rc = 0;
 	long clk_rate_round;
 
-	if (!clk || !clk_name || !clk_rate)
+	if (!clk || !clk_name)
 		return -EINVAL;
 
 	CDBG("enable %s, clk %pK rate %d\n",
@@ -231,8 +231,8 @@
 		return rc;
 	}
 
-	rc = of_property_read_string_index(of_node, "src-clock-name",
-				i, &src_clk_str);
+	rc = of_property_read_string_index(of_node, "src-clock-name", 0,
+		&src_clk_str);
 	if (rc) {
 		CDBG("No src_clk_str found\n");
 		soc_info->src_clk_idx = -1;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index c7abd9d..a0b53bb 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -98,6 +98,7 @@
  * @SDE_CAPS_R3_1P5_DOWNSCALE: 1.5x downscale rotator support
  * @SDE_CAPS_MIN_BUS_VOTE: minimum bus vote prior to power enable
  * @SDE_CAPS_SBUF_1: stream buffer support for inline rotation
+ * @SDE_CAPS_UBWC_2: universal bandwidth compression version 2
  */
 enum sde_caps_settings {
 	SDE_CAPS_R1_WB,
@@ -106,6 +107,7 @@
 	SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
 	SDE_CAPS_MIN_BUS_VOTE,
 	SDE_CAPS_SBUF_1,
+	SDE_CAPS_UBWC_2,
 	SDE_CAPS_MAX,
 };
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index 643e8a0..27156fc 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -112,7 +112,10 @@
 		return -EINVAL;
 	}
 
-	if (bus->bus_hdl < 1) {
+	if (!bus->bus_hdl) {
+		SDEROT_DBG("bus scaling not enabled\n");
+		return 0;
+	} else if (bus->bus_hdl < 0) {
 		SDEROT_ERR("invalid bus handle %d\n", bus->bus_hdl);
 		return -EINVAL;
 	}
@@ -2501,8 +2504,7 @@
 	mgr->data_bus.bus_scale_pdata = msm_bus_cl_get_pdata(dev);
 	if (IS_ERR_OR_NULL(mgr->data_bus.bus_scale_pdata)) {
 		ret = PTR_ERR(mgr->data_bus.bus_scale_pdata);
-		if (!ret) {
-			ret = -EINVAL;
+		if (ret) {
 			SDEROT_ERR("msm_bus_cl_get_pdata failed. ret=%d\n",
 					ret);
 			mgr->data_bus.bus_scale_pdata = NULL;
@@ -2638,8 +2640,8 @@
 static int sde_rotator_bus_scale_register(struct sde_rot_mgr *mgr)
 {
 	if (!mgr->data_bus.bus_scale_pdata) {
-		SDEROT_ERR("Scale table is NULL\n");
-		return -EINVAL;
+		SDEROT_DBG("Bus scaling is not enabled\n");
+		return 0;
 	}
 
 	mgr->data_bus.bus_hdl =
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
index c78c513..573e0a8 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
@@ -284,6 +284,27 @@
 		},
 	},
 	{
+		.mdp_format = {
+			FMT_YUV_COMMON(SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC),
+			.description = "SDE/Y_CBCR_H2V2_P010_UBWC",
+			.flag = 0,
+			.fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR,
+			.chroma_sample = SDE_MDP_CHROMA_420,
+			.unpack_count = 2,
+			.bpp = 2,
+			.frame_format = SDE_MDP_FMT_TILE_A5X,
+			.pixel_mode = SDE_MDP_PIXEL_10BIT,
+			.element = { C1_B_Cb, C2_R_Cr },
+			.unpack_tight = 0,
+			.unpack_align_msb = 1,
+			.is_ubwc = SDE_MDP_COMPRESS_UBWC
+		},
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 32,
+		},
+	},
+	{
 		.mdp_format =
 			FMT_RGB_1010102(SDE_PIX_FMT_RGBA_1010102_TILE,
 			"SDE/RGBA_1010102_TILE",
@@ -517,6 +538,27 @@
 			.tile_width = 16,
 		},
 	},
+	{
+		.mdp_format = {
+			FMT_YUV_COMMON(SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE),
+			.description = "SDE/Y_CBCR_H2V2_P010_TILE",
+			.flag = SDE_MDP_FORMAT_FLAG_PRIVATE,
+			.fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR,
+			.chroma_sample = SDE_MDP_CHROMA_420,
+			.unpack_count = 2,
+			.bpp = 2,
+			.frame_format = SDE_MDP_FMT_TILE_A5X,
+			.pixel_mode = SDE_MDP_PIXEL_10BIT,
+			.element = { C1_B_Cb, C2_R_Cr },
+			.unpack_tight = 0,
+			.unpack_align_msb = 1,
+			.is_ubwc = SDE_MDP_COMPRESS_NONE,
+		},
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 32,
+		},
+	},
 };
 
 static struct sde_mdp_format_params sde_mdp_format_map[] = {
@@ -853,6 +895,11 @@
 	case SDE_PIX_FMT_BGRX_1010102_TILE:
 		*dst_pixfmt = SDE_PIX_FMT_BGRX_1010102_TILE;
 		break;
+	case SDE_PIX_FMT_Y_CBCR_H2V2_P010:
+	case SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE:
+	case SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC:
+		*dst_pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE;
+		break;
 	case SDE_PIX_FMT_Y_CBCR_H2V2_TP10:
 	case SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC:
 		*dst_pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_TP10;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 4278b6d..2c9c75e 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -48,10 +48,12 @@
 #define XIN_WRITEBACK		1
 
 /* wait for at most 2 vsync for lowest refresh rate (24hz) */
-#define KOFF_TIMEOUT		(42 * 32)
+#define KOFF_TIMEOUT		(84)
 
 /* default stream buffer headroom in lines */
 #define DEFAULT_SBUF_HEADROOM	20
+#define DEFAULT_UBWC_MALSIZE	1
+#define DEFAULT_UBWC_SWIZZLE	1
 
 /* Macro for constructing the REGDMA command */
 #define SDE_REGDMA_WRITE(p, off, data) \
@@ -278,6 +280,8 @@
 	SDE_PIX_FMT_Y_CBCR_H2V2_P010,
 	SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
 	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
 	SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
 	SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
 	SDE_PIX_FMT_XRGB_8888_TILE,
@@ -356,6 +360,8 @@
 	SDE_PIX_FMT_Y_CBCR_H2V2_P010,
 	SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
 	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
 	SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
 	SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
 	SDE_PIX_FMT_XRGB_8888_TILE,
@@ -877,6 +883,12 @@
 			SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
 			((rot->highest_bank & 0x3) << 18));
 
+	if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
+		SDE_REGDMA_WRITE(wrptr, ROT_SSPP_UBWC_STATIC_CTRL, BIT(31) |
+				((ctx->rot->ubwc_malsize & 0x3) << 8) |
+				((ctx->rot->highest_bank & 0x3) << 4) |
+				((ctx->rot->ubwc_swizzle & 0x1) << 0));
+
 	/* setup source buffer plane security status */
 	if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
 			SDE_ROT_FLAG_SECURE_CAMERA_SESSION)) {
@@ -1009,6 +1021,12 @@
 	SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
 			(ctx->rot->highest_bank & 0x3) << 8);
 
+	if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
+		SDE_REGDMA_WRITE(wrptr, ROT_WB_UBWC_STATIC_CTRL,
+				((ctx->rot->ubwc_malsize & 0x3) << 8) |
+				((ctx->rot->highest_bank & 0x3) << 4) |
+				((ctx->rot->ubwc_swizzle & 0x1) << 0));
+
 	if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map))
 		SDE_REGDMA_WRITE(wrptr, ROT_WB_SYS_CACHE_MODE,
 				ctx->sys_cache_mode);
@@ -2212,6 +2230,7 @@
 		SDEROT_DBG("Supporting sys cache inline rotation\n");
 		set_bit(SDE_CAPS_MIN_BUS_VOTE,  mdata->sde_caps_map);
 		set_bit(SDE_CAPS_SBUF_1,  mdata->sde_caps_map);
+		set_bit(SDE_CAPS_UBWC_2,  mdata->sde_caps_map);
 		rot->inpixfmts = sde_hw_rotator_v4_inpixfmts;
 		rot->num_inpixfmt = ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
 		rot->outpixfmts = sde_hw_rotator_v4_outpixfmts;
@@ -2739,6 +2758,26 @@
 	}
 
 	ret = of_property_read_u32(dev->dev.of_node,
+			"qcom,sde-ubwc-malsize", &data);
+	if (ret) {
+		ret = 0;
+		hw_data->ubwc_malsize = DEFAULT_UBWC_MALSIZE;
+	} else {
+		SDEROT_DBG("set ubwc malsize to %d\n", data);
+		hw_data->ubwc_malsize = data;
+	}
+
+	ret = of_property_read_u32(dev->dev.of_node,
+			"qcom,sde-ubwc_swizzle", &data);
+	if (ret) {
+		ret = 0;
+		hw_data->ubwc_swizzle = DEFAULT_UBWC_SWIZZLE;
+	} else {
+		SDEROT_DBG("set ubwc swizzle to %d\n", data);
+		hw_data->ubwc_swizzle = data;
+	}
+
+	ret = of_property_read_u32(dev->dev.of_node,
 			"qcom,mdss-sbuf-headroom", &data);
 	if (ret) {
 		ret = 0;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
index f86f54b..dc97bdf 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
@@ -70,6 +70,7 @@
 #define ROT_SSPP_SRC_UNPACK_PATTERN             (SDE_ROT_SSPP_OFFSET+0x34)
 #define ROT_SSPP_SRC_OP_MODE                    (SDE_ROT_SSPP_OFFSET+0x38)
 #define ROT_SSPP_SRC_CONSTANT_COLOR             (SDE_ROT_SSPP_OFFSET+0x3C)
+#define ROT_SSPP_UBWC_STATIC_CTRL               (SDE_ROT_SSPP_OFFSET+0x44)
 #define ROT_SSPP_FETCH_CONFIG                   (SDE_ROT_SSPP_OFFSET+0x48)
 #define ROT_SSPP_VC1_RANGE                      (SDE_ROT_SSPP_OFFSET+0x4C)
 #define ROT_SSPP_REQPRIORITY_FIFO_WATERMARK_0   (SDE_ROT_SSPP_OFFSET+0x50)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
index c011d7a..d1607d9 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
@@ -248,6 +248,9 @@
  * struct sde_hw_rotator : Rotator description
  * @hw:           mdp register mapped offset
  * @ops:          pointer to operations possible for the rotator HW
+ * @highest_bank: highest bank size of memory
+ * @ubwc_malsize: ubwc minimum allowable length
+ * @ubwc_swizzle: ubwc swizzle enable
  * @sbuf_headroom: stream buffer headroom in lines
  * @solid_fill: true if solid fill is requested
  * @constant_color: solid fill constant color
@@ -296,6 +299,8 @@
 	void *swts_buffer;
 
 	u32    highest_bank;
+	u32    ubwc_malsize;
+	u32    ubwc_swizzle;
 	u32    sbuf_headroom;
 	u32    solid_fill;
 	u32    constant_color;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
index 4cf9dfc..9ef4282 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
@@ -210,6 +210,32 @@
 		ps->ystride[3] = ALIGN(DIV_ROUND_UP(width / 2, 16), 64);
 		ps->plane_size[3] = ALIGN(ps->ystride[3] *
 			ALIGN(DIV_ROUND_UP(height / 2, 8), 16), 4096);
+	} else if (sde_mdp_is_p010_format(fmt)) {
+		ps->num_planes = 2;
+		/* Y bitstream stride and plane size */
+		ps->ystride[0] = ALIGN(width * 2, 256);
+		ps->plane_size[0] = ALIGN(ps->ystride[0] * ALIGN(height, 16),
+					4096);
+
+		/* CbCr bitstream stride and plane size */
+		ps->ystride[1] = ALIGN(width * 2, 256);
+		ps->plane_size[1] = ALIGN(ps->ystride[1] *
+			ALIGN(height / 2, 16), 4096);
+
+		if (!sde_mdp_is_ubwc_format(fmt))
+			goto done;
+
+		ps->num_planes += 2;
+
+		/* Y meta data stride and plane size */
+		ps->ystride[2] = ALIGN(DIV_ROUND_UP(width, 32), 64);
+		ps->plane_size[2] = ALIGN(ps->ystride[2] *
+			ALIGN(DIV_ROUND_UP(height, 4), 16), 4096);
+
+		/* CbCr meta data stride and plane size */
+		ps->ystride[3] = ALIGN(DIV_ROUND_UP(width / 2, 16), 64);
+		ps->plane_size[3] = ALIGN(ps->ystride[3] *
+			ALIGN(DIV_ROUND_UP(height / 2, 4), 16), 4096);
 	} else if (sde_mdp_is_tp10_format(fmt)) {
 		u32 yWidth   = sde_mdp_general_align(width, 192);
 		u32 yHeight  = ALIGN(height, 16);
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 49fe5fb..87a4ac8 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -33,7 +33,6 @@
 	[ilog2(HAL_H264_PROFILE_CONSTRAINED_HIGH)] =
 		HFI_H264_PROFILE_CONSTRAINED_HIGH,
 	[ilog2(HAL_VPX_PROFILE_VERSION_1)] = HFI_VPX_PROFILE_VERSION_1,
-	[ilog2(HAL_MVC_PROFILE_STEREO_HIGH)] = HFI_H264_PROFILE_STEREO_HIGH,
 };
 
 static int entropy_mode[] = {
@@ -68,13 +67,10 @@
 	[ilog2(HAL_COLOR_FORMAT_BGR565)] = HFI_COLOR_FORMAT_BGR565,
 	[ilog2(HAL_COLOR_FORMAT_RGB888)] = HFI_COLOR_FORMAT_RGB888,
 	[ilog2(HAL_COLOR_FORMAT_BGR888)] = HFI_COLOR_FORMAT_BGR888,
-	[ilog2(HAL_COLOR_FORMAT_RGBA8888)] = HFI_COLOR_FORMAT_RGBA8888,
 	/* UBWC Color formats*/
 	[ilog2(HAL_COLOR_FORMAT_NV12_UBWC)] =  HFI_COLOR_FORMAT_NV12_UBWC,
 	[ilog2(HAL_COLOR_FORMAT_NV12_TP10_UBWC)] =
 			HFI_COLOR_FORMAT_YUV420_TP10_UBWC,
-	[ilog2(HAL_COLOR_FORMAT_RGBA8888_UBWC)] =
-			HFI_COLOR_FORMAT_RGBA8888_UBWC,
 };
 
 static int nal_type[] = {
@@ -126,26 +122,6 @@
 	}
 }
 
-u32 get_hfi_layout(enum hal_buffer_layout_type hal_buf_layout)
-{
-	u32 hfi_layout;
-
-	switch (hal_buf_layout) {
-	case HAL_BUFFER_LAYOUT_TOP_BOTTOM:
-		hfi_layout = HFI_MVC_BUFFER_LAYOUT_TOP_BOTTOM;
-		break;
-	case HAL_BUFFER_LAYOUT_SEQ:
-		hfi_layout = HFI_MVC_BUFFER_LAYOUT_SEQ;
-		break;
-	default:
-		dprintk(VIDC_ERR, "Invalid buffer layout: %#x\n",
-			hal_buf_layout);
-		hfi_layout = HFI_MVC_BUFFER_LAYOUT_SEQ;
-		break;
-	}
-	return hfi_layout;
-}
-
 enum hal_domain vidc_get_hal_domain(u32 hfi_domain)
 {
 	enum hal_domain hal_domain = 0;
@@ -192,9 +168,6 @@
 	case HFI_VIDEO_CODEC_VP9:
 		hal_codec = HAL_VIDEO_CODEC_VP9;
 		break;
-	case HFI_VIDEO_CODEC_HEVC_HYBRID:
-		hal_codec = HAL_VIDEO_CODEC_HEVC_HYBRID;
-		break;
 	default:
 		dprintk(VIDC_INFO, "%s: invalid codec 0x%x\n",
 			__func__, hfi_codec);
@@ -233,7 +206,6 @@
 	u32 hfi_codec = 0;
 
 	switch (hal_codec) {
-	case HAL_VIDEO_CODEC_MVC:
 	case HAL_VIDEO_CODEC_H264:
 		hfi_codec = HFI_VIDEO_CODEC_H264;
 		break;
@@ -252,9 +224,6 @@
 	case HAL_VIDEO_CODEC_VP9:
 		hfi_codec = HFI_VIDEO_CODEC_VP9;
 		break;
-	case HAL_VIDEO_CODEC_HEVC_HYBRID:
-		hfi_codec = HFI_VIDEO_CODEC_HEVC_HYBRID;
-		break;
 	default:
 		dprintk(VIDC_INFO, "%s: invalid codec 0x%x\n",
 			__func__, hal_codec);
@@ -555,12 +524,6 @@
 	case HAL_EXTRADATA_INTERLACE_VIDEO:
 		ret = HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA;
 		break;
-	case HAL_EXTRADATA_VC1_FRAMEDISP:
-		ret = HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA;
-		break;
-	case HAL_EXTRADATA_VC1_SEQDISP:
-		ret = HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA;
-		break;
 	case HAL_EXTRADATA_TIMESTAMP:
 		ret = HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA;
 		break;
@@ -673,9 +636,6 @@
 	case HAL_LTR_MODE_MANUAL:
 		ltrmode = HFI_LTR_MODE_MANUAL;
 		break;
-	case HAL_LTR_MODE_PERIODIC:
-		ltrmode = HFI_LTR_MODE_PERIODIC;
-		break;
 	default:
 		dprintk(VIDC_ERR, "Invalid ltr mode: %#x\n",
 			ltr_mode_type);
@@ -939,31 +899,10 @@
 		struct hfi_cmd_session_get_property_packet *pkt,
 		struct hal_session *session, enum hal_property ptype)
 {
-	int rc = 0;
-
-	if (!pkt || !session) {
-		dprintk(VIDC_ERR, "%s Invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-	pkt->size = sizeof(struct hfi_cmd_session_get_property_packet);
-	pkt->packet_type = HFI_CMD_SESSION_GET_PROPERTY;
-	pkt->session_id = hash32_ptr(session);
-	pkt->num_properties = 1;
-	switch (ptype) {
-	case HAL_CONFIG_VDEC_ENTROPY:
-		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_VDEC_ENTROPY;
-		break;
-	case HAL_PARAM_PROFILE_LEVEL_CURRENT:
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
-		break;
-	default:
-		dprintk(VIDC_ERR, "%s cmd:%#x not supported\n", __func__,
+	/* Currently no get property is supported */
+	dprintk(VIDC_ERR, "%s cmd:%#x not supported\n", __func__,
 			ptype);
-		rc = -EINVAL;
-		break;
-	}
-	return rc;
+	return -EINVAL;
 }
 
 int create_pkt_cmd_session_set_property(
@@ -1028,8 +967,6 @@
 		break;
 	case HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO:
 		break;
-	case HAL_PARAM_EXTRA_DATA_HEADER_CONFIG:
-		break;
 	case HAL_PARAM_FRAME_SIZE:
 	{
 		struct hfi_frame_size *hfi;
@@ -1142,14 +1079,6 @@
 		pkt->size += sizeof(u32) * 2;
 		break;
 	}
-	case HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(u32) * 2;
-		break;
-	}
 	case HAL_PARAM_VDEC_MULTI_STREAM:
 	{
 		struct hfi_multi_stream *hfi;
@@ -1199,10 +1128,6 @@
 			HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME;
 		pkt->size += sizeof(u32);
 		break;
-	case HAL_PARAM_VENC_MPEG4_SHORT_HEADER:
-		break;
-	case HAL_PARAM_VENC_MPEG4_AC_PREDICTION:
-		break;
 	case HAL_CONFIG_VENC_TARGET_BITRATE:
 	{
 		struct hfi_bitrate *hfi;
@@ -1225,6 +1150,10 @@
 			HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
 		hfi = (struct hfi_profile_level *)
 			&pkt->rg_property_data[1];
+
+		/* There is an assumption here that HAL level is same as
+		 * HFI level
+		 */
 		hfi->level = prop->level;
 		hfi->profile = hal_to_hfi_type(HAL_PARAM_PROFILE_LEVEL_CURRENT,
 				prop->profile);
@@ -1235,13 +1164,6 @@
 					prop->profile);
 		}
 
-		if (!hfi->level) {
-			hfi->level = 1;
-			dprintk(VIDC_WARN,
-					"Level %d not supported, falling back to high\n",
-					prop->level);
-		}
-
 		pkt->size += sizeof(u32) + sizeof(struct hfi_profile_level);
 		break;
 	}
@@ -1593,14 +1515,6 @@
 		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
 		break;
 	}
-	case HAL_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
-		break;
-	}
 	case HAL_PARAM_VENC_PRESERVE_TEXT_QUALITY:
 	{
 		create_pkt_enable(pkt->rg_property_data,
@@ -1609,21 +1523,6 @@
 		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
 		break;
 	}
-	case HAL_PARAM_MVC_BUFFER_LAYOUT:
-	{
-		struct hfi_mvc_buffer_layout_descp_type *hfi;
-		struct hal_mvc_buffer_layout *layout_info = pdata;
-
-		pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT;
-		hfi = (struct hfi_mvc_buffer_layout_descp_type *)
-			&pkt->rg_property_data[1];
-		hfi->layout_type = get_hfi_layout(layout_info->layout_type);
-		hfi->bright_view_first = layout_info->bright_view_first;
-		hfi->ngap = layout_info->ngap;
-		pkt->size += sizeof(u32) +
-			sizeof(struct hfi_mvc_buffer_layout_descp_type);
-		break;
-	}
 	case HAL_PARAM_VENC_LTRMODE:
 	{
 		struct hfi_ltr_mode *hfi;
@@ -1734,14 +1633,6 @@
 		pkt->size += sizeof(u32) * 2;
 		break;
 	}
-	case HAL_PARAM_VENC_HIER_B_MAX_ENH_LAYERS:
-	{
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_HIER_B_MAX_NUM_ENH_LAYER;
-		pkt->rg_property_data[1] = *(u32 *)pdata;
-		pkt->size += sizeof(u32) * 2;
-		break;
-	}
 	case HAL_PARAM_VENC_HIER_P_HYBRID_MODE:
 	{
 		pkt->rg_property_data[0] =
@@ -1940,7 +1831,6 @@
 	case HAL_PARAM_VDEC_MB_QUANTIZATION:
 	case HAL_PARAM_VDEC_NUM_CONCEALED_MB:
 	case HAL_PARAM_VDEC_H264_ENTROPY_SWITCHING:
-	case HAL_PARAM_VENC_MPEG4_DATA_PARTITIONING:
 	case HAL_CONFIG_BUFFER_COUNT_ACTUAL:
 	case HAL_CONFIG_VDEC_MULTI_STREAM:
 	case HAL_PARAM_VENC_MULTI_SLICE_INFO:
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index abc6cc8..7c99e90 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -26,13 +26,6 @@
 #define MAX_OPERATING_FRAME_RATE (300 << 16)
 #define OPERATING_FRAME_RATE_STEP (1 << 16)
 
-static const char *const mpeg_video_vidc_divx_format[] = {
-	"DIVX Format 3",
-	"DIVX Format 4",
-	"DIVX Format 5",
-	"DIVX Format 6",
-	NULL
-};
 static const char *const mpeg_video_stream_format[] = {
 	"NAL Format Start Codes",
 	"NAL Format One NAL Per Buffer",
@@ -57,29 +50,6 @@
 	"Turbo"
 };
 
-static const char *const h263_level[] = {
-	"1.0",
-	"2.0",
-	"3.0",
-	"4.0",
-	"4.5",
-	"5.0",
-	"6.0",
-	"7.0",
-};
-
-static const char *const h263_profile[] = {
-	"Baseline",
-	"H320 Coding",
-	"Backward Compatible",
-	"ISWV2",
-	"ISWV3",
-	"High Compression",
-	"Internet",
-	"Interlace",
-	"High Latency",
-};
-
 static const char *const vp8_profile_level[] = {
 	"Unused",
 	"0.0",
@@ -108,11 +78,6 @@
 	"CABAC Entropy Mode",
 };
 
-static const char *const mpeg_vidc_video_h264_mvc_layout[] = {
-	"Frame packing arrangement sequential",
-	"Frame packing arrangement top-bottom",
-};
-
 static const char *const mpeg_vidc_video_dpb_color_format[] = {
 	"DPB Color Format None",
 	"DPB Color Format UBWC",
@@ -462,37 +427,6 @@
 	return frame_size;
 }
 
-static int is_ctrl_valid_for_codec(struct msm_vidc_inst *inst,
-					struct v4l2_ctrl *ctrl)
-{
-	int rc = 0;
-
-	switch (ctrl->id) {
-	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
-		if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_H264_MVC &&
-			ctrl->val != V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH) {
-			dprintk(VIDC_ERR,
-					"Profile %#x not supported for MVC\n",
-					ctrl->val);
-			rc = -ENOTSUPP;
-			break;
-		}
-		break;
-	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
-		if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_H264_MVC &&
-			ctrl->val >= V4L2_MPEG_VIDEO_H264_LEVEL_5_2) {
-			dprintk(VIDC_ERR, "Level %#x not supported for MVC\n",
-					ctrl->val);
-			rc = -ENOTSUPP;
-			break;
-		}
-		break;
-	default:
-		break;
-	}
-	return rc;
-}
-
 struct msm_vidc_format vdec_formats[] = {
 	{
 		.name = "YCbCr Semiplanar 4:2:0",
@@ -516,14 +450,6 @@
 		.type = CAPTURE_PORT,
 	},
 	{
-		.name = "Mpeg4",
-		.description = "Mpeg4 compressed format",
-		.fourcc = V4L2_PIX_FMT_MPEG4,
-		.get_frame_size = get_frame_size_compressed,
-		.type = OUTPUT_PORT,
-		.defer_outputs = false,
-	},
-	{
 		.name = "Mpeg2",
 		.description = "Mpeg2 compressed format",
 		.fourcc = V4L2_PIX_FMT_MPEG2,
@@ -532,30 +458,6 @@
 		.defer_outputs = false,
 	},
 	{
-		.name = "H263",
-		.description = "H263 compressed format",
-		.fourcc = V4L2_PIX_FMT_H263,
-		.get_frame_size = get_frame_size_compressed,
-		.type = OUTPUT_PORT,
-		.defer_outputs = false,
-	},
-	{
-		.name = "VC1",
-		.description = "VC-1 compressed format",
-		.fourcc = V4L2_PIX_FMT_VC1_ANNEX_G,
-		.get_frame_size = get_frame_size_compressed,
-		.type = OUTPUT_PORT,
-		.defer_outputs = false,
-	},
-	{
-		.name = "VC1 SP",
-		.description = "VC-1 compressed format G",
-		.fourcc = V4L2_PIX_FMT_VC1_ANNEX_L,
-		.get_frame_size = get_frame_size_compressed,
-		.type = OUTPUT_PORT,
-		.defer_outputs = false,
-	},
-	{
 		.name = "H264",
 		.description = "H264 compressed format",
 		.fourcc = V4L2_PIX_FMT_H264,
@@ -564,14 +466,6 @@
 		.defer_outputs = false,
 	},
 	{
-		.name = "H264_MVC",
-		.description = "H264_MVC compressed format",
-		.fourcc = V4L2_PIX_FMT_H264_MVC,
-		.get_frame_size = get_frame_size_compressed,
-		.type = OUTPUT_PORT,
-		.defer_outputs = false,
-	},
-	{
 		.name = "HEVC",
 		.description = "HEVC compressed format",
 		.fourcc = V4L2_PIX_FMT_HEVC,
@@ -826,10 +720,6 @@
 	}
 	hdev = inst->core->device;
 
-	rc = is_ctrl_valid_for_codec(inst, ctrl);
-	if (rc)
-		return rc;
-
 	/* Small helper macro for quickly getting a control and err checking */
 #define TRY_GET_CTRL(__ctrl_id) ({ \
 		struct v4l2_ctrl *__temp; \
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 2ec5155..ff28dd0 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -37,11 +37,6 @@
 #define MAX_HYBRID_HIER_P_LAYERS 6
 
 #define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
-#define CODING V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY
-#define BITSTREAM_RESTRICT_ENABLED \
-	V4L2_MPEG_VIDC_VIDEO_H264_VUI_BITSTREAM_RESTRICT_ENABLED
-#define BITSTREAM_RESTRICT_DISABLED \
-	V4L2_MPEG_VIDC_VIDEO_H264_VUI_BITSTREAM_RESTRICT_DISABLED
 #define MIN_TIME_RESOLUTION 1
 #define MAX_TIME_RESOLUTION 0xFFFFFF
 #define DEFAULT_TIME_RESOLUTION 0x7530
@@ -91,30 +86,8 @@
 	NULL
 };
 
-static const char *const h263_level[] = {
-	"1.0",
-	"2.0",
-	"3.0",
-	"4.0",
-	"4.5",
-	"5.0",
-	"6.0",
-	"7.0",
-};
-
-static const char *const h263_profile[] = {
-	"Baseline",
-	"H320 Coding",
-	"Backward Compatible",
-	"ISWV2",
-	"ISWV3",
-	"High Compression",
-	"Internet",
-	"Interlace",
-	"High Latency",
-};
-
 static const char *const hevc_tier_level[] = {
+	"Level unknown"
 	"Main Tier Level 1",
 	"Main Tier Level 2",
 	"Main Tier Level 2.1",
@@ -454,8 +427,8 @@
 		.name = "H264 Level",
 		.type = V4L2_CTRL_TYPE_MENU,
 		.minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
-		.maximum = V4L2_MPEG_VIDEO_H264_LEVEL_5_2,
-		.default_value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+		.maximum = V4L2_MPEG_VIDEO_H264_LEVEL_UNKNOWN,
+		.default_value = V4L2_MPEG_VIDEO_H264_LEVEL_UNKNOWN,
 		.menu_skip_mask = 0,
 	},
 	{
@@ -491,9 +464,9 @@
 		.name = "HEVC Tier and Level",
 		.type = V4L2_CTRL_TYPE_MENU,
 		.minimum = V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_1,
-		.maximum = V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_5_2,
+		.maximum = V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_UNKNOWN,
 		.default_value =
-			V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_1,
+			V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_UNKNOWN,
 		.menu_skip_mask = ~(
 		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_1) |
 		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_2) |
@@ -513,7 +486,8 @@
 		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_4) |
 		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_4_1) |
 		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_5) |
-		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_5_1)
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_5_1) |
+		(1 << V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_UNKNOWN)
 		),
 		.qmenu = hevc_tier_level,
 	},
@@ -795,72 +769,6 @@
 		.qmenu = NULL,
 	},
 	{
-		.id = V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_X_RANGE,
-		.name = "I-Frame X coordinate search range",
-		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = 4,
-		.maximum = 128,
-		.default_value = 4,
-		.step = 1,
-		.menu_skip_mask = 0,
-		.qmenu = NULL,
-	},
-	{
-		.id = V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_Y_RANGE,
-		.name = "I-Frame Y coordinate search range",
-		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = 4,
-		.maximum = 128,
-		.default_value = 4,
-		.step = 1,
-		.menu_skip_mask = 0,
-		.qmenu = NULL,
-	},
-	{
-		.id = V4L2_CID_MPEG_VIDC_VIDEO_PFRAME_X_RANGE,
-		.name = "P-Frame X coordinate search range",
-		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = 4,
-		.maximum = 128,
-		.default_value = 4,
-		.step = 1,
-		.menu_skip_mask = 0,
-		.qmenu = NULL,
-	},
-	{
-		.id = V4L2_CID_MPEG_VIDC_VIDEO_PFRAME_Y_RANGE,
-		.name = "P-Frame Y coordinate search range",
-		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = 4,
-		.maximum = 128,
-		.default_value = 4,
-		.step = 1,
-		.menu_skip_mask = 0,
-		.qmenu = NULL,
-	},
-	{
-		.id = V4L2_CID_MPEG_VIDC_VIDEO_BFRAME_X_RANGE,
-		.name = "B-Frame X coordinate search range",
-		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = 4,
-		.maximum = 128,
-		.default_value = 4,
-		.step = 1,
-		.menu_skip_mask = 0,
-		.qmenu = NULL,
-	},
-	{
-		.id = V4L2_CID_MPEG_VIDC_VIDEO_BFRAME_Y_RANGE,
-		.name = "B-Frame Y coordinate search range",
-		.type = V4L2_CTRL_TYPE_INTEGER,
-		.minimum = 4,
-		.maximum = 128,
-		.default_value = 4,
-		.step = 1,
-		.menu_skip_mask = 0,
-		.qmenu = NULL,
-	},
-	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS,
 		.name = "Set Hier B num layers",
 		.type = V4L2_CTRL_TYPE_INTEGER,
@@ -1279,7 +1187,7 @@
 	struct hal_ltr_use use_ltr;
 	struct hal_ltr_mark mark_ltr;
 	struct hal_hybrid_hierp hyb_hierp;
-	u32 hier_p_layers = 0, hier_b_layers = 0;
+	u32 hier_p_layers = 0;
 	int max_hierp_layers;
 	int baselayerid = 0;
 	struct hal_video_signal_info signal_info = {0};
@@ -1496,28 +1404,6 @@
 			temp_ctrl->val);
 		pdata = &h264_entropy_control;
 		break;
-	case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
-		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL);
-
-		property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT;
-		profile_level.profile = msm_comm_v4l2_to_hal(ctrl->id,
-						ctrl->val);
-		profile_level.level = msm_comm_v4l2_to_hal(
-				V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
-				temp_ctrl->val);
-		pdata = &profile_level;
-		break;
-	case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
-		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE);
-
-		property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT;
-		profile_level.level = msm_comm_v4l2_to_hal(ctrl->id,
-							ctrl->val);
-		profile_level.profile = msm_comm_v4l2_to_hal(
-				V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
-				temp_ctrl->val);
-		pdata = &profile_level;
-		break;
 	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
 		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_LEVEL);
 
@@ -1736,6 +1622,7 @@
 			enable.enable = 0;
 			break;
 		case V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME:
+			enable.enable = 1;
 		default:
 			rc = -ENOTSUPP;
 			break;
@@ -1856,16 +1743,6 @@
 		enable.enable = ctrl->val;
 		pdata = &enable;
 		break;
-	case V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS:
-		if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC) {
-			dprintk(VIDC_ERR, "Hier B supported for HEVC only\n");
-			rc = -ENOTSUPP;
-			break;
-		}
-		property_id = HAL_PARAM_VENC_HIER_B_MAX_ENH_LAYERS;
-		hier_b_layers = ctrl->val;
-		pdata = &hier_b_layers;
-		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_HYBRID_HIERP_MODE:
 		property_id = HAL_PARAM_VENC_HIER_P_HYBRID_MODE;
 		hyb_hierp.layers = ctrl->val;
@@ -2102,7 +1979,6 @@
 	struct v4l2_ext_control *control;
 	struct hfi_device *hdev;
 	struct hal_ltr_mode ltr_mode;
-	struct hal_vc1e_perf_cfg_type search_range = { {0} };
 	u32 property_id = 0, layer_id = MSM_VIDC_ALL_LAYER_ID;
 	void *pdata = NULL;
 	struct msm_vidc_capability *cap = NULL;
@@ -2157,36 +2033,6 @@
 			property_id = HAL_PARAM_VENC_LTRMODE;
 			pdata = &ltr_mode;
 			break;
-		case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_X_RANGE:
-			search_range.i_frame.x_subsampled = control[i].value;
-			property_id = HAL_PARAM_VENC_SEARCH_RANGE;
-			pdata = &search_range;
-			break;
-		case V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_Y_RANGE:
-			search_range.i_frame.y_subsampled = control[i].value;
-			property_id = HAL_PARAM_VENC_SEARCH_RANGE;
-			pdata = &search_range;
-			break;
-		case V4L2_CID_MPEG_VIDC_VIDEO_PFRAME_X_RANGE:
-			search_range.p_frame.x_subsampled = control[i].value;
-			property_id = HAL_PARAM_VENC_SEARCH_RANGE;
-			pdata = &search_range;
-			break;
-		case V4L2_CID_MPEG_VIDC_VIDEO_PFRAME_Y_RANGE:
-			search_range.p_frame.y_subsampled = control[i].value;
-			property_id = HAL_PARAM_VENC_SEARCH_RANGE;
-			pdata = &search_range;
-			break;
-		case V4L2_CID_MPEG_VIDC_VIDEO_BFRAME_X_RANGE:
-			search_range.b_frame.x_subsampled = control[i].value;
-			property_id = HAL_PARAM_VENC_SEARCH_RANGE;
-			pdata = &search_range;
-			break;
-		case V4L2_CID_MPEG_VIDC_VIDEO_BFRAME_Y_RANGE:
-			search_range.b_frame.y_subsampled = control[i].value;
-			property_id = HAL_PARAM_VENC_SEARCH_RANGE;
-			pdata = &search_range;
-			break;
 		case V4L2_CID_MPEG_VIDC_VENC_PARAM_SAR_WIDTH:
 			sar.aspect_width = control[i].value;
 			property_id = HAL_PROPERTY_PARAM_VENC_ASPECT_RATIO;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 270fc31..114a702 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -1820,14 +1820,12 @@
 
 	switch (ctrl->id) {
 
-	case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
 	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
 	case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
 	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE:
 		ctrl->val = inst->profile;
 	break;
 
-	case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
 	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
 	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL:
 		ctrl->val = inst->level;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 4aaa525..5e49f42 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -52,8 +52,6 @@
 	"Extradata none",
 	"Extradata MB Quantization",
 	"Extradata Interlace Video",
-	"Extradata VC1 Framedisp",
-	"Extradata VC1 Seqdisp",
 	"Extradata timestamp",
 	"Extradata S3D Frame Packing",
 	"Extradata Frame Rate",
@@ -304,6 +302,8 @@
 			return HAL_H264_LEVEL_51;
 		case V4L2_MPEG_VIDEO_H264_LEVEL_5_2:
 			return HAL_H264_LEVEL_52;
+		case V4L2_MPEG_VIDEO_H264_LEVEL_UNKNOWN:
+			return HAL_H264_LEVEL_UNKNOWN;
 		default:
 			goto unknown_value;
 		}
@@ -405,6 +405,8 @@
 			return HAL_HEVC_HIGH_TIER_LEVEL_6;
 		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_6_1:
 			return HAL_HEVC_HIGH_TIER_LEVEL_6_1;
+		case V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_UNKNOWN:
+			return HAL_HEVC_TIER_LEVEL_UNKNOWN;
 		default:
 			goto unknown_value;
 		}
@@ -711,22 +713,13 @@
 	case V4L2_PIX_FMT_H264_MVC:
 		codec = HAL_VIDEO_CODEC_MVC;
 		break;
-	case V4L2_PIX_FMT_H263:
-		codec = HAL_VIDEO_CODEC_H263;
-		break;
+
 	case V4L2_PIX_FMT_MPEG1:
 		codec = HAL_VIDEO_CODEC_MPEG1;
 		break;
 	case V4L2_PIX_FMT_MPEG2:
 		codec = HAL_VIDEO_CODEC_MPEG2;
 		break;
-	case V4L2_PIX_FMT_MPEG4:
-		codec = HAL_VIDEO_CODEC_MPEG4;
-		break;
-	case V4L2_PIX_FMT_VC1_ANNEX_G:
-	case V4L2_PIX_FMT_VC1_ANNEX_L:
-		codec = HAL_VIDEO_CODEC_VC1;
-		break;
 	case V4L2_PIX_FMT_VP8:
 		codec = HAL_VIDEO_CODEC_VP8;
 		break;
@@ -762,9 +755,6 @@
 	case V4L2_PIX_FMT_NV12_TP10_UBWC:
 		format = HAL_COLOR_FORMAT_NV12_TP10_UBWC;
 		break;
-	case V4L2_PIX_FMT_RGB32:
-		format = HAL_COLOR_FORMAT_RGBA8888;
-		break;
 	default:
 		format = HAL_UNUSED_COLOR;
 		break;
@@ -2430,55 +2420,6 @@
 	put_inst(inst);
 }
 
-static void handle_seq_hdr_done(enum hal_command_response cmd, void *data)
-{
-	struct msm_vidc_cb_data_done *response = data;
-	struct msm_vidc_inst *inst;
-	struct vb2_buffer *vb;
-	struct vidc_hal_fbd *fill_buf_done;
-	struct vb2_v4l2_buffer *vbuf;
-
-	if (!response) {
-		dprintk(VIDC_ERR, "Invalid response from vidc_hal\n");
-		return;
-	}
-
-	inst = get_inst(get_vidc_core(response->device_id),
-			response->session_id);
-	if (!inst) {
-		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
-		return;
-	}
-
-	fill_buf_done = (struct vidc_hal_fbd *)&response->output_done;
-	vb = get_vb_from_device_addr(&inst->bufq[CAPTURE_PORT],
-				fill_buf_done->packet_buffer1);
-	if (!vb) {
-		dprintk(VIDC_ERR,
-				"Failed to find video buffer for seq_hdr_done: %pa\n",
-				&fill_buf_done->packet_buffer1);
-		goto err_seq_hdr_done;
-	}
-	vbuf = to_vb2_v4l2_buffer(vb);
-	vb->timestamp = 0;
-
-	vb->planes[0].bytesused = fill_buf_done->filled_len1;
-	vb->planes[0].data_offset = fill_buf_done->offset1;
-
-	vbuf->flags = V4L2_QCOM_BUF_FLAG_CODECCONFIG;
-
-	dprintk(VIDC_DBG, "Filled length = %d; offset = %d; flags %x\n",
-				vb->planes[0].bytesused,
-				vb->planes[0].data_offset,
-				vbuf->flags);
-	mutex_lock(&inst->bufq[CAPTURE_PORT].lock);
-	vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
-	mutex_unlock(&inst->bufq[CAPTURE_PORT].lock);
-
-err_seq_hdr_done:
-	put_inst(inst);
-}
-
 void handle_cmd_response(enum hal_command_response cmd, void *data)
 {
 	dprintk(VIDC_DBG, "Command response = %d\n", cmd);
@@ -2523,9 +2464,6 @@
 	case HAL_SESSION_FLUSH_DONE:
 		handle_session_flush(cmd, data);
 		break;
-	case HAL_SESSION_GET_SEQ_HDR_DONE:
-		handle_seq_hdr_done(cmd, data);
-		break;
 	case HAL_SYS_WATCHDOG_TIMEOUT:
 	case HAL_SYS_ERROR:
 		handle_sys_error(cmd, data);
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 74e360e..58954f6 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -3176,14 +3176,12 @@
 		case HAL_SESSION_RESUME_DONE:
 		case HAL_SESSION_SET_PROP_DONE:
 		case HAL_SESSION_GET_PROP_DONE:
-		case HAL_SESSION_PARSE_SEQ_HDR_DONE:
 		case HAL_SESSION_RELEASE_BUFFER_DONE:
 		case HAL_SESSION_RELEASE_RESOURCE_DONE:
 		case HAL_SESSION_PROPERTY_INFO:
 			session_id = &info->response.cmd.session_id;
 			break;
 		case HAL_SESSION_ERROR:
-		case HAL_SESSION_GET_SEQ_HDR_DONE:
 		case HAL_SESSION_ETB_DONE:
 		case HAL_SESSION_FTB_DONE:
 			session_id = &info->response.data.session_id;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 7caff53..2a833dc 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -73,8 +73,6 @@
 #define HFI_EXTRADATA_NONE					0x00000000
 #define HFI_EXTRADATA_MB_QUANTIZATION		0x00000001
 #define HFI_EXTRADATA_INTERLACE_VIDEO		0x00000002
-#define HFI_EXTRADATA_VC1_FRAMEDISP			0x00000003
-#define HFI_EXTRADATA_VC1_SEQDISP			0x00000004
 #define HFI_EXTRADATA_TIMESTAMP				0x00000005
 #define HFI_EXTRADATA_S3D_FRAME_PACKING		0x00000006
 #define HFI_EXTRADATA_FRAME_RATE			0x00000007
@@ -132,8 +130,6 @@
 	(HFI_PROPERTY_PARAM_OX_START + 0x001)
 #define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO	\
 	(HFI_PROPERTY_PARAM_OX_START + 0x002)
-#define HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG		\
-	(HFI_PROPERTY_PARAM_OX_START + 0x005)
 #define HFI_PROPERTY_PARAM_INDEX_EXTRADATA             \
 	(HFI_PROPERTY_PARAM_OX_START + 0x006)
 #define HFI_PROPERTY_PARAM_S3D_FRAME_PACKING_EXTRADATA	\
@@ -175,10 +171,6 @@
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00C)
 #define HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE   \
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x00D)
-#define HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA		\
-	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x011)
-#define HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA		\
-	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x012)
 #define HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA			\
 	(HFI_PROPERTY_PARAM_VDEC_OX_START + 0x013)
 #define HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA	\
@@ -206,8 +198,6 @@
 
 #define HFI_PROPERTY_CONFIG_VDEC_OX_START				\
 	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x4000)
-#define HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER	\
-	(HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x001)
 #define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING	\
 	(HFI_PROPERTY_CONFIG_VDEC_OX_START + 0x002)
 #define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP			\
@@ -279,14 +269,6 @@
 	u32 picture_type;
 };
 
-struct hfi_extra_data_header_config {
-	u32 type;
-	u32 buffer_type;
-	u32 version;
-	u32 port_index;
-	u32 client_extra_data_id;
-};
-
 struct hfi_mb_error_map {
 	u32 error_map_size;
 	u8 rg_error_map[1];
@@ -720,35 +702,6 @@
 	u8 rg_mb_qp[1];
 };
 
-struct hfi_extradata_vc1_pswnd {
-	u32 ps_wnd_h_offset;
-	u32 ps_wnd_v_offset;
-	u32 ps_wnd_width;
-	u32 ps_wnd_height;
-};
-
-struct hfi_extradata_vc1_framedisp_payload {
-	u32 res_pic;
-	u32 ref;
-	u32 range_map_present;
-	u32 range_map_y;
-	u32 range_map_uv;
-	u32 num_pan_scan_wnds;
-	struct hfi_extradata_vc1_pswnd rg_ps_wnd[1];
-};
-
-struct hfi_extradata_vc1_seqdisp_payload {
-	u32 prog_seg_frm;
-	u32 uv_sampling_fmt;
-	u32 color_fmt_flag;
-	u32 color_primaries;
-	u32 transfer_char;
-	u32 mat_coeff;
-	u32 aspect_ratio;
-	u32 aspect_horiz;
-	u32 aspect_vert;
-};
-
 struct hfi_extradata_timestamp_payload {
 	u32 time_stamp_low;
 	u32 time_stamp_high;
@@ -836,10 +789,6 @@
 	u32 aspect_width;
 	u32 aspect_height;
 };
-struct hfi_extradata_panscan_wndw_payload {
-	u32 num_window;
-	struct hfi_extradata_vc1_pswnd wnd[1];
-};
 
 struct hfi_extradata_frame_type_payload {
 	u32 frame_rate;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index eff16f2..8aa0bbb 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -98,8 +98,6 @@
 	HAL_EXTRADATA_NONE,
 	HAL_EXTRADATA_MB_QUANTIZATION,
 	HAL_EXTRADATA_INTERLACE_VIDEO,
-	HAL_EXTRADATA_VC1_FRAMEDISP,
-	HAL_EXTRADATA_VC1_SEQDISP,
 	HAL_EXTRADATA_TIMESTAMP,
 	HAL_EXTRADATA_S3D_FRAME_PACKING,
 	HAL_EXTRADATA_FRAME_RATE,
@@ -134,7 +132,6 @@
 	HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT,
 	HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
 	HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO,
-	HAL_PARAM_EXTRA_DATA_HEADER_CONFIG,
 	HAL_PARAM_INDEX_EXTRADATA,
 	HAL_PARAM_FRAME_SIZE,
 	HAL_CONFIG_REALTIME,
@@ -144,22 +141,16 @@
 	HAL_PARAM_VDEC_OUTPUT_ORDER,
 	HAL_PARAM_VDEC_PICTURE_TYPE_DECODE,
 	HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO,
-	HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER,
 	HAL_PARAM_VDEC_MULTI_STREAM,
 	HAL_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT,
-	HAL_PARAM_DIVX_FORMAT,
 	HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING,
 	HAL_PARAM_VDEC_CONTINUE_DATA_TRANSFER,
 	HAL_CONFIG_VDEC_MB_ERROR_MAP,
 	HAL_CONFIG_VENC_REQUEST_IFRAME,
-	HAL_PARAM_VENC_MPEG4_SHORT_HEADER,
-	HAL_PARAM_VENC_MPEG4_AC_PREDICTION,
 	HAL_CONFIG_VENC_TARGET_BITRATE,
 	HAL_PARAM_PROFILE_LEVEL_CURRENT,
 	HAL_PARAM_VENC_H264_ENTROPY_CONTROL,
 	HAL_PARAM_VENC_RATE_CONTROL,
-	HAL_PARAM_VENC_MPEG4_TIME_RESOLUTION,
-	HAL_PARAM_VENC_MPEG4_HEADER_EXTENSION,
 	HAL_PARAM_VENC_H264_DEBLOCK_CONTROL,
 	HAL_PARAM_VENC_TEMPORAL_SPATIAL_TRADEOFF,
 	HAL_PARAM_VENC_SESSION_QP_RANGE,
@@ -190,7 +181,6 @@
 	HAL_PARAM_VDEC_NUM_CONCEALED_MB,
 	HAL_PARAM_VDEC_H264_ENTROPY_SWITCHING,
 	HAL_PARAM_VENC_SLICE_DELIVERY_MODE,
-	HAL_PARAM_VENC_MPEG4_DATA_PARTITIONING,
 	HAL_CONFIG_BUFFER_COUNT_ACTUAL,
 	HAL_CONFIG_VDEC_MULTI_STREAM,
 	HAL_PARAM_VENC_MULTI_SLICE_INFO,
@@ -204,12 +194,10 @@
 	HAL_PARAM_VENC_MAX_NUM_B_FRAMES,
 	HAL_PARAM_BUFFER_ALLOC_MODE,
 	HAL_PARAM_VDEC_FRAME_ASSEMBLY,
-	HAL_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC,
 	HAL_PARAM_VENC_PRESERVE_TEXT_QUALITY,
 	HAL_PARAM_VDEC_CONCEAL_COLOR,
 	HAL_PARAM_VDEC_SCS_THRESHOLD,
 	HAL_PARAM_GET_BUFFER_REQUIREMENTS,
-	HAL_PARAM_MVC_BUFFER_LAYOUT,
 	HAL_PARAM_VENC_LTRMODE,
 	HAL_CONFIG_VENC_MARKLTRFRAME,
 	HAL_CONFIG_VENC_USELTRFRAME,
@@ -221,7 +209,6 @@
 	HAL_PARAM_VPE_COLOR_SPACE_CONVERSION,
 	HAL_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE,
 	HAL_CONFIG_VENC_PERF_MODE,
-	HAL_PARAM_VENC_HIER_B_MAX_ENH_LAYERS,
 	HAL_PARAM_VDEC_NON_SECURE_OUTPUT2,
 	HAL_PARAM_VENC_HIER_P_HYBRID_MODE,
 	HAL_PARAM_VENC_MBI_STATISTICS_MODE,
@@ -232,7 +219,6 @@
 	HAL_PROPERTY_PARAM_VENC_ASPECT_RATIO,
 	HAL_CONFIG_VDEC_ENTROPY,
 	HAL_PARAM_VENC_BITRATE_TYPE,
-	HAL_PARAM_VENC_H264_PIC_ORDER_CNT,
 	HAL_PARAM_VENC_LOW_LATENCY,
 	HAL_CONFIG_VENC_BLUR_RESOLUTION,
 	HAL_PARAM_VENC_H264_TRANSFORM_8x8,
@@ -289,31 +275,6 @@
 	HAL_UNUSED_CODEC = 0x10000000,
 };
 
-enum hal_h263_profile {
-	HAL_H263_PROFILE_BASELINE           = 0x00000001,
-	HAL_H263_PROFILE_H320CODING         = 0x00000002,
-	HAL_H263_PROFILE_BACKWARDCOMPATIBLE = 0x00000004,
-	HAL_H263_PROFILE_ISWV2              = 0x00000008,
-	HAL_H263_PROFILE_ISWV3              = 0x00000010,
-	HAL_H263_PROFILE_HIGHCOMPRESSION    = 0x00000020,
-	HAL_H263_PROFILE_INTERNET           = 0x00000040,
-	HAL_H263_PROFILE_INTERLACE          = 0x00000080,
-	HAL_H263_PROFILE_HIGHLATENCY        = 0x00000100,
-	HAL_UNUSED_H263_PROFILE = 0x10000000,
-};
-
-enum hal_h263_level {
-	HAL_H263_LEVEL_10 = 0x00000001,
-	HAL_H263_LEVEL_20 = 0x00000002,
-	HAL_H263_LEVEL_30 = 0x00000004,
-	HAL_H263_LEVEL_40 = 0x00000008,
-	HAL_H263_LEVEL_45 = 0x00000010,
-	HAL_H263_LEVEL_50 = 0x00000020,
-	HAL_H263_LEVEL_60 = 0x00000040,
-	HAL_H263_LEVEL_70 = 0x00000080,
-	HAL_UNUSED_H263_LEVEL = 0x10000000,
-};
-
 enum hal_mpeg2_profile {
 	HAL_MPEG2_PROFILE_SIMPLE  = 0x00000001,
 	HAL_MPEG2_PROFILE_MAIN    = 0x00000002,
@@ -332,44 +293,6 @@
 	HAL_UNUSED_MEPG2_LEVEL = 0x10000000,
 };
 
-enum hal_mpeg4_profile {
-	HAL_MPEG4_PROFILE_SIMPLE           = 0x00000001,
-	HAL_MPEG4_PROFILE_ADVANCEDSIMPLE   = 0x00000002,
-	HAL_MPEG4_PROFILE_CORE             = 0x00000004,
-	HAL_MPEG4_PROFILE_MAIN             = 0x00000008,
-	HAL_MPEG4_PROFILE_NBIT             = 0x00000010,
-	HAL_MPEG4_PROFILE_SCALABLETEXTURE  = 0x00000020,
-	HAL_MPEG4_PROFILE_SIMPLEFACE       = 0x00000040,
-	HAL_MPEG4_PROFILE_SIMPLEFBA        = 0x00000080,
-	HAL_MPEG4_PROFILE_BASICANIMATED    = 0x00000100,
-	HAL_MPEG4_PROFILE_HYBRID           = 0x00000200,
-	HAL_MPEG4_PROFILE_ADVANCEDREALTIME = 0x00000400,
-	HAL_MPEG4_PROFILE_CORESCALABLE     = 0x00000800,
-	HAL_MPEG4_PROFILE_ADVANCEDCODING   = 0x00001000,
-	HAL_MPEG4_PROFILE_ADVANCEDCORE     = 0x00002000,
-	HAL_MPEG4_PROFILE_ADVANCEDSCALABLE = 0x00004000,
-	HAL_MPEG4_PROFILE_SIMPLESCALABLE   = 0x00008000,
-	HAL_UNUSED_MPEG4_PROFILE = 0x10000000,
-};
-
-enum hal_mpeg4_level {
-	HAL_MPEG4_LEVEL_0  = 0x00000001,
-	HAL_MPEG4_LEVEL_0b = 0x00000002,
-	HAL_MPEG4_LEVEL_1  = 0x00000004,
-	HAL_MPEG4_LEVEL_2  = 0x00000008,
-	HAL_MPEG4_LEVEL_3  = 0x00000010,
-	HAL_MPEG4_LEVEL_4  = 0x00000020,
-	HAL_MPEG4_LEVEL_4a = 0x00000040,
-	HAL_MPEG4_LEVEL_5  = 0x00000080,
-	HAL_MPEG4_LEVEL_VENDOR_START_UNUSED = 0x7F000000,
-	HAL_MPEG4_LEVEL_6  = 0x7F000001,
-	HAL_MPEG4_LEVEL_7  = 0x7F000002,
-	HAL_MPEG4_LEVEL_8  = 0x7F000003,
-	HAL_MPEG4_LEVEL_9  = 0x7F000004,
-	HAL_MPEG4_LEVEL_3b = 0x7F000005,
-	HAL_UNUSED_MPEG4_LEVEL = 0x10000000,
-};
-
 enum hal_h264_profile {
 	HAL_H264_PROFILE_BASELINE = 0x00000001,
 	HAL_H264_PROFILE_MAIN     = 0x00000002,
@@ -384,6 +307,7 @@
 };
 
 enum hal_h264_level {
+	HAL_H264_LEVEL_UNKNOWN = 0x00000000,
 	HAL_H264_LEVEL_1  = 0x00000001,
 	HAL_H264_LEVEL_1b = 0x00000002,
 	HAL_H264_LEVEL_11 = 0x00000004,
@@ -401,7 +325,6 @@
 	HAL_H264_LEVEL_5  = 0x00004000,
 	HAL_H264_LEVEL_51 = 0x00008000,
 	HAL_H264_LEVEL_52 = 0x00010000,
-	HAL_UNUSED_H264_LEVEL = 0x10000000,
 };
 
 enum hal_hevc_profile {
@@ -412,6 +335,7 @@
 };
 
 enum hal_hevc_level {
+	HAL_HEVC_TIER_LEVEL_UNKNOWN     = 0x00000000,
 	HAL_HEVC_MAIN_TIER_LEVEL_1      = 0x10000001,
 	HAL_HEVC_MAIN_TIER_LEVEL_2      = 0x10000002,
 	HAL_HEVC_MAIN_TIER_LEVEL_2_1    = 0x10000004,
@@ -438,7 +362,6 @@
 	HAL_HEVC_HIGH_TIER_LEVEL_6      = 0x20000400,
 	HAL_HEVC_HIGH_TIER_LEVEL_6_1    = 0x20000800,
 	HAL_HEVC_HIGH_TIER_LEVEL_6_2    = 0x20001000,
-	HAL_UNUSED_HEVC_TIER_LEVEL      = 0x80000000,
 };
 
 enum hal_hevc_tier {
@@ -457,66 +380,6 @@
 	HAL_VPX_PROFILE_UNUSED = 0x10000000,
 };
 
-enum hal_vc1_profile {
-	HAL_VC1_PROFILE_SIMPLE   = 0x00000001,
-	HAL_VC1_PROFILE_MAIN     = 0x00000002,
-	HAL_VC1_PROFILE_ADVANCED = 0x00000004,
-	HAL_UNUSED_VC1_PROFILE = 0x10000000,
-};
-
-enum hal_vc1_level {
-	HAL_VC1_LEVEL_LOW    = 0x00000001,
-	HAL_VC1_LEVEL_MEDIUM = 0x00000002,
-	HAL_VC1_LEVEL_HIGH   = 0x00000004,
-	HAL_VC1_LEVEL_0      = 0x00000008,
-	HAL_VC1_LEVEL_1      = 0x00000010,
-	HAL_VC1_LEVEL_2      = 0x00000020,
-	HAL_VC1_LEVEL_3      = 0x00000040,
-	HAL_VC1_LEVEL_4      = 0x00000080,
-	HAL_UNUSED_VC1_LEVEL = 0x10000000,
-};
-
-enum hal_divx_format {
-	HAL_DIVX_FORMAT_4,
-	HAL_DIVX_FORMAT_5,
-	HAL_DIVX_FORMAT_6,
-	HAL_UNUSED_DIVX_FORMAT = 0x10000000,
-};
-
-enum hal_divx_profile {
-	HAL_DIVX_PROFILE_QMOBILE  = 0x00000001,
-	HAL_DIVX_PROFILE_MOBILE   = 0x00000002,
-	HAL_DIVX_PROFILE_MT       = 0x00000004,
-	HAL_DIVX_PROFILE_HT       = 0x00000008,
-	HAL_DIVX_PROFILE_HD       = 0x00000010,
-	HAL_UNUSED_DIVX_PROFILE = 0x10000000,
-};
-
-enum hal_mvc_profile {
-	HAL_MVC_PROFILE_STEREO_HIGH  = 0x00001000,
-	HAL_UNUSED_MVC_PROFILE = 0x10000000,
-};
-
-enum hal_mvc_level {
-	HAL_MVC_LEVEL_1  = 0x00000001,
-	HAL_MVC_LEVEL_1b = 0x00000002,
-	HAL_MVC_LEVEL_11 = 0x00000004,
-	HAL_MVC_LEVEL_12 = 0x00000008,
-	HAL_MVC_LEVEL_13 = 0x00000010,
-	HAL_MVC_LEVEL_2  = 0x00000020,
-	HAL_MVC_LEVEL_21 = 0x00000040,
-	HAL_MVC_LEVEL_22 = 0x00000080,
-	HAL_MVC_LEVEL_3  = 0x00000100,
-	HAL_MVC_LEVEL_31 = 0x00000200,
-	HAL_MVC_LEVEL_32 = 0x00000400,
-	HAL_MVC_LEVEL_4  = 0x00000800,
-	HAL_MVC_LEVEL_41 = 0x00001000,
-	HAL_MVC_LEVEL_42 = 0x00002000,
-	HAL_MVC_LEVEL_5  = 0x00004000,
-	HAL_MVC_LEVEL_51 = 0x00008000,
-	HAL_UNUSED_MVC_LEVEL = 0x10000000,
-};
-
 struct hal_frame_rate {
 	enum hal_buffer buffer_type;
 	u32 frame_rate;
@@ -585,14 +448,6 @@
 	struct hal_uncompressed_plane_constraints rg_plane_format[1];
 };
 
-struct hal_extra_data_header_config {
-	u32 type;
-	enum hal_buffer buffer_type;
-	u32 version;
-	u32 port_index;
-	u32 client_extradata_id;
-};
-
 struct hal_frame_size {
 	enum hal_buffer buffer_type;
 	u32 width;
@@ -718,14 +573,6 @@
 	HAL_UNUSED_RC = 0x10000000,
 };
 
-struct hal_mpeg4_time_resolution {
-	u32 time_increment_resolution;
-};
-
-struct hal_mpeg4_header_extension {
-	u32 header_extension;
-};
-
 enum hal_h264_db_mode {
 	HAL_H264_DB_MODE_DISABLE,
 	HAL_H264_DB_MODE_SKIP_SLICE_BOUNDARY,
@@ -946,12 +793,6 @@
 	HAL_UNUSED_BUFFER_LAYOUT = 0x10000000,
 };
 
-struct hal_mvc_buffer_layout {
-	enum hal_buffer_layout_type layout_type;
-	u32 bright_view_first;
-	u32 ngap;
-};
-
 struct hal_aspect_ratio {
 	u32 aspect_width;
 	u32 aspect_height;
@@ -985,13 +826,6 @@
 	u32 enable;
 };
 
-struct hal_vc1e_perf_cfg_type {
-	struct {
-		u32 x_subsampled;
-		u32 y_subsampled;
-	} i_frame, p_frame, b_frame;
-};
-
 struct hal_vpe_color_space_conversion {
 	u32 csc_matrix[HAL_MAX_MATRIX_COEFFS];
 	u32 csc_bias[HAL_MAX_BIAS_COEFFS];
@@ -1105,7 +939,6 @@
 enum ltr_mode {
 	HAL_LTR_MODE_DISABLE,
 	HAL_LTR_MODE_MANUAL,
-	HAL_LTR_MODE_PERIODIC,
 };
 
 struct hal_ltr_mode {
@@ -1149,7 +982,6 @@
 	struct hal_uncompressed_plane_constraints plane_constraints;
 	struct hal_uncompressed_plane_actual_constraints_info
 						plane_constraints_info;
-	struct hal_extra_data_header_config extra_data_header_config;
 	struct hal_frame_size frame_size;
 	struct hal_enable enable;
 	struct hal_buffer_count_actual buffer_count_actual;
@@ -1162,8 +994,6 @@
 	struct hal_bitrate bitrate;
 	struct hal_profile_level profile_level;
 	struct hal_profile_level_supported profile_level_supported;
-	struct hal_mpeg4_time_resolution mpeg4_time_resolution;
-	struct hal_mpeg4_header_extension mpeg4_header_extension;
 	struct hal_h264_db_control h264_db_control;
 	struct hal_temporal_spatial_tradeoff temporal_spatial_tradeoff;
 	struct hal_quantization quantization;
@@ -1227,8 +1057,6 @@
 	HAL_SESSION_RESUME_DONE,
 	HAL_SESSION_SET_PROP_DONE,
 	HAL_SESSION_GET_PROP_DONE,
-	HAL_SESSION_PARSE_SEQ_HDR_DONE,
-	HAL_SESSION_GET_SEQ_HDR_DONE,
 	HAL_SESSION_RELEASE_BUFFER_DONE,
 	HAL_SESSION_RELEASE_RESOURCE_DONE,
 	HAL_SESSION_PROPERTY_INFO,
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index dc64ad2..0d73410 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -81,7 +81,6 @@
 #define HFI_VIDEO_CODEC_VP8				0x00001000
 #define HFI_VIDEO_CODEC_HEVC				0x00002000
 #define HFI_VIDEO_CODEC_VP9				0x00004000
-#define HFI_VIDEO_CODEC_HEVC_HYBRID			0x80000000
 
 #define HFI_PROFILE_UNKNOWN					0x00000000
 #define HFI_H264_PROFILE_BASELINE			0x00000001
@@ -214,8 +213,6 @@
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x00C)
 #define  HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED            \
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x00E)
-#define HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT \
-	(HFI_PROPERTY_PARAM_COMMON_START + 0x00F)
 #define  HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED	    \
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x010)
 
@@ -281,8 +278,6 @@
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01E)
 #define  HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES \
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x020)
-#define HFI_PROPERTY_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC \
-	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x021)
 #define HFI_PROPERTY_PARAM_VENC_LOW_LATENCY_MODE	\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x022)
 #define HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY \
@@ -549,7 +544,6 @@
 
 #define HFI_LTR_MODE_DISABLE	0x0
 #define HFI_LTR_MODE_MANUAL		0x1
-#define HFI_LTR_MODE_PERIODIC	0x2
 
 struct hfi_ltr_mode {
 	u32 ltr_mode;
@@ -744,7 +738,6 @@
 	u32 close_gop;
 	u32 h264_constrain_intra_pred;
 	u32 h264_transform_8x8_flag;
-	u32 mpeg4_qpel_enable;
 	u32 multi_refp_en;
 	u32 qmatrix_en;
 	u8 vpp_info_packet_mode;
@@ -786,15 +779,6 @@
 	u32 type;
 };
 
-#define HFI_MVC_BUFFER_LAYOUT_TOP_BOTTOM  (0)
-#define HFI_MVC_BUFFER_LAYOUT_SIDEBYSIDE  (1)
-#define HFI_MVC_BUFFER_LAYOUT_SEQ         (2)
-struct hfi_mvc_buffer_layout_descp_type {
-	u32    layout_type;
-	u32    bright_view_first;
-	u32    ngap;
-};
-
 
 #define HFI_CMD_SYS_COMMON_START			\
 (HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + HFI_CMD_START_OFFSET \
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index ce5a7dc..5818986 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1322,6 +1322,8 @@
 					descr = "Y/CbCr 4:2:0 P10"; break;
 	case V4L2_PIX_FMT_NV12_TP10_UBWC:
 					descr = "Y/CbCr 4:2:0 TP10 UBWC"; break;
+	case V4L2_PIX_FMT_NV12_P010_UBWC:
+					descr = "Y/CbCr 4:2:0 P010 UBWC"; break;
 
 	default:
 		/* Compressed formats */
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index daad32f..36d9d69 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -37,3 +37,13 @@
 	  about re-trying SD init requests. This can be a useful
 	  work-around for buggy controllers and hardware. Enable
 	  if you are experiencing issues with SD detection.
+
+config MMC_CLKGATE
+	bool "MMC host clock gating"
+	help
+	  This will attempt to aggressively gate the clock to the MMC card.
+	  This is done to save power due to gating off the logic and bus
+	  noise when the MMC card is not in use. Your host driver has to
+	  support handling this in order for it to be of any use.
+
+	  If unsure, say N.
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 40ddc3e..e7af954 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -226,6 +226,8 @@
 
 		if (mrq->done)
 			mrq->done(mrq);
+
+		mmc_host_clk_release(host);
 	}
 }
 
@@ -340,6 +342,7 @@
 			mrq->stop->mrq = mrq;
 		}
 	}
+	mmc_host_clk_hold(host);
 	led_trigger_event(host->led, LED_FULL);
 	__mmc_start_request(host, mrq);
 
@@ -634,8 +637,11 @@
 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
 		 bool is_first_req)
 {
-	if (host->ops->pre_req)
+	if (host->ops->pre_req) {
+		mmc_host_clk_hold(host);
 		host->ops->pre_req(host, mrq, is_first_req);
+		mmc_host_clk_release(host);
+	}
 }
 
 /**
@@ -650,8 +656,11 @@
 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
 			 int err)
 {
-	if (host->ops->post_req)
+	if (host->ops->post_req) {
+		mmc_host_clk_hold(host);
 		host->ops->post_req(host, mrq, err);
+		mmc_host_clk_release(host);
+	}
 }
 
 /**
@@ -949,9 +958,9 @@
 		unsigned int timeout_us, limit_us;
 
 		timeout_us = data->timeout_ns / 1000;
-		if (card->host->ios.clock)
+		if (mmc_host_clk_rate(card->host))
 			timeout_us += data->timeout_clks * 1000 /
-				(card->host->ios.clock / 1000);
+				(mmc_host_clk_rate(card->host) / 1000);
 
 		if (data->flags & MMC_DATA_WRITE)
 			/*
@@ -1149,6 +1158,8 @@
 		 ios->power_mode, ios->chip_select, ios->vdd,
 		 1 << ios->bus_width, ios->timing);
 
+	if (ios->clock > 0)
+		mmc_set_ungated(host);
 	host->ops->set_ios(host, ios);
 }
 
@@ -1157,15 +1168,17 @@
  */
 void mmc_set_chip_select(struct mmc_host *host, int mode)
 {
+	mmc_host_clk_hold(host);
 	host->ios.chip_select = mode;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 /*
  * Sets the host clock to the highest possible frequency that
  * is below "hz".
  */
-void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
 {
 	WARN_ON(hz && hz < host->f_min);
 
@@ -1176,6 +1189,68 @@
 	mmc_set_ios(host);
 }
 
+void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+{
+	mmc_host_clk_hold(host);
+	__mmc_set_clock(host, hz);
+	mmc_host_clk_release(host);
+}
+
+#ifdef CONFIG_MMC_CLKGATE
+/*
+ * This gates the clock by setting it to 0 Hz.
+ */
+void mmc_gate_clock(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_old = host->ios.clock;
+	host->ios.clock = 0;
+	host->clk_gated = true;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mmc_set_ios(host);
+}
+
+/*
+ * This restores the clock from gating by using the cached
+ * clock value.
+ */
+void mmc_ungate_clock(struct mmc_host *host)
+{
+	/*
+	 * We should previously have gated the clock, so the clock shall
+	 * be 0 here! The clock may however be 0 during initialization,
+	 * when some request operations are performed before setting
+	 * the frequency. When ungate is requested in that situation
+	 * we just ignore the call.
+	 */
+	if (host->clk_old) {
+		BUG_ON(host->ios.clock);
+		/* This call will also set host->clk_gated to false */
+		__mmc_set_clock(host, host->clk_old);
+	}
+}
+
+void mmc_set_ungated(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	/*
+	 * We've been given a new frequency while the clock is gated,
+	 * so make sure we regard this as ungating it.
+	 */
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_gated = false;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+#else
+void mmc_set_ungated(struct mmc_host *host)
+{
+}
+#endif
+
 int mmc_execute_tuning(struct mmc_card *card)
 {
 	struct mmc_host *host = card->host;
@@ -1190,7 +1265,9 @@
 	else
 		opcode = MMC_SEND_TUNING_BLOCK;
 
+	mmc_host_clk_hold(host);
 	err = host->ops->execute_tuning(host, opcode);
+	mmc_host_clk_release(host);
 
 	if (err)
 		pr_err("%s: tuning execution failed: %d\n",
@@ -1206,8 +1283,10 @@
  */
 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
 {
+	mmc_host_clk_hold(host);
 	host->ios.bus_mode = mode;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 /*
@@ -1215,8 +1294,10 @@
  */
 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
 {
+	mmc_host_clk_hold(host);
 	host->ios.bus_width = width;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 /*
@@ -1671,8 +1752,11 @@
 	int old_signal_voltage = host->ios.signal_voltage;
 
 	host->ios.signal_voltage = signal_voltage;
-	if (host->ops->start_signal_voltage_switch)
+	if (host->ops->start_signal_voltage_switch) {
+		mmc_host_clk_hold(host);
 		err = host->ops->start_signal_voltage_switch(host, &host->ios);
+		mmc_host_clk_release(host);
+	}
 
 	if (err)
 		host->ios.signal_voltage = old_signal_voltage;
@@ -1706,17 +1790,20 @@
 		pr_warn("%s: cannot verify signal voltage switch\n",
 			mmc_hostname(host));
 
+	mmc_host_clk_hold(host);
+
 	cmd.opcode = SD_SWITCH_VOLTAGE;
 	cmd.arg = 0;
 	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 
 	err = mmc_wait_for_cmd(host, &cmd, 0);
 	if (err)
-		return err;
+		goto err_command;
 
-	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
-		return -EIO;
-
+	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
+		err = -EIO;
+		goto err_command;
+	}
 	/*
 	 * The card should drive cmd and dat[0:3] low immediately
 	 * after the response of cmd11, but wait 1 ms to be sure
@@ -1765,6 +1852,9 @@
 		mmc_power_cycle(host, ocr);
 	}
 
+err_command:
+	mmc_host_clk_release(host);
+
 	return err;
 }
 
@@ -1773,8 +1863,10 @@
  */
 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
 {
+	mmc_host_clk_hold(host);
 	host->ios.timing = timing;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 /*
@@ -1782,8 +1874,10 @@
  */
 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
 {
+	mmc_host_clk_hold(host);
 	host->ios.drv_type = drv_type;
 	mmc_set_ios(host);
+	mmc_host_clk_release(host);
 }
 
 int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
@@ -1791,6 +1885,7 @@
 {
 	struct mmc_host *host = card->host;
 	int host_drv_type = SD_DRIVER_TYPE_B;
+	int drive_strength;
 
 	*drv_type = 0;
 
@@ -1813,10 +1908,14 @@
 	 * information and let the hardware specific code
 	 * return what is possible given the options
 	 */
-	return host->ops->select_drive_strength(card, max_dtr,
-						host_drv_type,
-						card_drv_type,
-						drv_type);
+	mmc_host_clk_hold(host);
+	drive_strength = host->ops->select_drive_strength(card, max_dtr,
+							  host_drv_type,
+							  card_drv_type,
+							  drv_type);
+	mmc_host_clk_release(host);
+
+	return drive_strength;
 }
 
 /*
@@ -1835,6 +1934,8 @@
 	if (host->ios.power_mode == MMC_POWER_ON)
 		return;
 
+	mmc_host_clk_hold(host);
+
 	mmc_pwrseq_pre_power_on(host);
 
 	host->ios.vdd = fls(ocr) - 1;
@@ -1868,6 +1969,8 @@
 	 * time required to reach a stable voltage.
 	 */
 	mmc_delay(10);
+
+	mmc_host_clk_release(host);
 }
 
 void mmc_power_off(struct mmc_host *host)
@@ -1875,6 +1978,8 @@
 	if (host->ios.power_mode == MMC_POWER_OFF)
 		return;
 
+	mmc_host_clk_hold(host);
+
 	mmc_pwrseq_power_off(host);
 
 	host->ios.clock = 0;
@@ -1890,6 +1995,8 @@
 	 * can be successfully turned on again.
 	 */
 	mmc_delay(1);
+
+	mmc_host_clk_release(host);
 }
 
 void mmc_power_cycle(struct mmc_host *host, u32 ocr)
@@ -2103,7 +2210,7 @@
 		 */
 		timeout_clks <<= 1;
 		timeout_us += (timeout_clks * 1000) /
-			      (card->host->ios.clock / 1000);
+			      (mmc_host_clk_rate(card->host) / 1000);
 
 		erase_timeout = timeout_us / 1000;
 
@@ -2626,7 +2733,9 @@
 {
 	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
 		return;
+	mmc_host_clk_hold(host);
 	host->ops->hw_reset(host);
+	mmc_host_clk_release(host);
 }
 
 int mmc_hw_reset(struct mmc_host *host)
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 0fa86a2..c975c7a 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -40,6 +40,9 @@
 
 void mmc_set_chip_select(struct mmc_host *host, int mode);
 void mmc_set_clock(struct mmc_host *host, unsigned int hz);
+void mmc_gate_clock(struct mmc_host *host);
+void mmc_ungate_clock(struct mmc_host *host);
+void mmc_set_ungated(struct mmc_host *host);
 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
 void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index c8451ce..bf0f6ce 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -256,6 +256,11 @@
 			&mmc_clock_fops))
 		goto err_node;
 
+#ifdef CONFIG_MMC_CLKGATE
+	if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
+				root, &host->clk_delay))
+		goto err_node;
+#endif
 #ifdef CONFIG_FAIL_MMC_REQUEST
 	if (fail_request)
 		setup_fault_attr(&fail_default_attr, fail_request);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 348b58b..f18105f 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -58,6 +58,246 @@
 	class_unregister(&mmc_host_class);
 }
 
+#ifdef CONFIG_MMC_CLKGATE
+static ssize_t clkgate_delay_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay);
+}
+
+static ssize_t clkgate_delay_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	unsigned long flags, value;
+
+	if (kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clkgate_delay = value;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	return count;
+}
+
+/*
+ * Enabling clock gating will make the core call out to the host
+ * once up and once down when it performs a request or card operation
+ * intermingled in any fashion. The driver will see this through
+ * set_ios() operations with ios.clock field set to 0 to gate (disable)
+ * the block clock, and to the old frequency to enable it again.
+ */
+static void mmc_host_clk_gate_delayed(struct mmc_host *host)
+{
+	unsigned long tick_ns;
+	unsigned long freq = host->ios.clock;
+	unsigned long flags;
+
+	if (!freq) {
+		pr_debug("%s: frequency set to 0 in disable function, "
+			 "this means the clock is already disabled.\n",
+			 mmc_hostname(host));
+		return;
+	}
+	/*
+	 * New requests may have appeared while we were scheduling,
+	 * then there is no reason to delay the check before
+	 * clk_disable().
+	 */
+	spin_lock_irqsave(&host->clk_lock, flags);
+
+	/*
+	 * Delay n bus cycles (at least 8 from MMC spec) before attempting
+	 * to disable the MCI block clock. The reference count may have
+	 * gone up again after this delay due to rescheduling!
+	 */
+	if (!host->clk_requests) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		tick_ns = DIV_ROUND_UP(1000000000, freq);
+		ndelay(host->clk_delay * tick_ns);
+	} else {
+		/* New users appeared while waiting for this work */
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		return;
+	}
+	mutex_lock(&host->clk_gate_mutex);
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (!host->clk_requests) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		/* This will set host->ios.clock to 0 */
+		mmc_gate_clock(host);
+		spin_lock_irqsave(&host->clk_lock, flags);
+		pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
+	}
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mutex_unlock(&host->clk_gate_mutex);
+}
+
+/*
+ * Internal work. Work to disable the clock at some later point.
+ */
+static void mmc_host_clk_gate_work(struct work_struct *work)
+{
+	struct mmc_host *host = container_of(work, struct mmc_host,
+					      clk_gate_work.work);
+
+	mmc_host_clk_gate_delayed(host);
+}
+
+/**
+ *	mmc_host_clk_hold - ungate hardware MCI clocks
+ *	@host: host to ungate.
+ *
+ *	Makes sure the host ios.clock is restored to a non-zero value
+ *	past this call.	Increase clock reference count and ungate clock
+ *	if we're the first user.
+ */
+void mmc_host_clk_hold(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	/* cancel any clock gating work scheduled by mmc_host_clk_release() */
+	cancel_delayed_work_sync(&host->clk_gate_work);
+	mutex_lock(&host->clk_gate_mutex);
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (host->clk_gated) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		mmc_ungate_clock(host);
+		spin_lock_irqsave(&host->clk_lock, flags);
+		pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
+	}
+	host->clk_requests++;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mutex_unlock(&host->clk_gate_mutex);
+}
+
+/**
+ *	mmc_host_may_gate_card - check if this card may be gated
+ *	@card: card to check.
+ */
+static bool mmc_host_may_gate_card(struct mmc_card *card)
+{
+	/* If there is no card we may gate it */
+	if (!card)
+		return true;
+	/*
+	 * Don't gate SDIO cards! These need to be clocked at all times
+	 * since they may be independent systems generating interrupts
+	 * and other events. The clock requests counter from the core will
+	 * go down to zero since the core does not need it, but we will not
+	 * gate the clock, because there is somebody out there that may still
+	 * be using it.
+	 */
+	return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING);
+}
+
+/**
+ *	mmc_host_clk_release - gate off hardware MCI clocks
+ *	@host: host to gate.
+ *
+ *	Calls the host driver with ios.clock set to zero as often as possible
+ *	in order to gate off hardware MCI clocks. Decrease clock reference
+ *	count and schedule disabling of clock.
+ */
+void mmc_host_clk_release(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_requests--;
+	if (mmc_host_may_gate_card(host->card) &&
+	    !host->clk_requests)
+		schedule_delayed_work(&host->clk_gate_work,
+				      msecs_to_jiffies(host->clkgate_delay));
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+/**
+ *	mmc_host_clk_rate - get current clock frequency setting
+ *	@host: host to get the clock frequency for.
+ *
+ *	Returns current clock frequency regardless of gating.
+ */
+unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+	unsigned long freq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (host->clk_gated)
+		freq = host->clk_old;
+	else
+		freq = host->ios.clock;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	return freq;
+}
+
+/**
+ *	mmc_host_clk_init - set up clock gating code
+ *	@host: host with potential clock to control
+ */
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+	host->clk_requests = 0;
+	/* Hold MCI clock for 8 cycles by default */
+	host->clk_delay = 8;
+	/*
+	 * Default clock gating delay is 0ms to avoid wasting power.
+	 * This value can be tuned by writing into sysfs entry.
+	 */
+	host->clkgate_delay = 0;
+	host->clk_gated = false;
+	INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
+	spin_lock_init(&host->clk_lock);
+	mutex_init(&host->clk_gate_mutex);
+}
+
+/**
+ *	mmc_host_clk_exit - shut down clock gating code
+ *	@host: host with potential clock to control
+ */
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+	/*
+	 * Wait for any outstanding gate and then make sure we're
+	 * ungated before exiting.
+	 */
+	if (cancel_delayed_work_sync(&host->clk_gate_work))
+		mmc_host_clk_gate_delayed(host);
+	if (host->clk_gated)
+		mmc_host_clk_hold(host);
+	/* There should be only one user now */
+	WARN_ON(host->clk_requests > 1);
+}
+
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+	host->clkgate_delay_attr.show = clkgate_delay_show;
+	host->clkgate_delay_attr.store = clkgate_delay_store;
+	sysfs_attr_init(&host->clkgate_delay_attr.attr);
+	host->clkgate_delay_attr.attr.name = "clkgate_delay";
+	host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(&host->class_dev, &host->clkgate_delay_attr))
+		pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
+				mmc_hostname(host));
+}
+#else
+
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+}
+
+#endif
+
 void mmc_retune_enable(struct mmc_host *host)
 {
 	host->can_retune = 1;
@@ -382,6 +622,8 @@
 		return NULL;
 	}
 
+	mmc_host_clk_init(host);
+
 	spin_lock_init(&host->lock);
 	init_waitqueue_head(&host->wq);
 	INIT_DELAYED_WORK(&host->detect, mmc_rescan);
@@ -427,6 +669,7 @@
 #ifdef CONFIG_DEBUG_FS
 	mmc_add_host_debugfs(host);
 #endif
+	mmc_host_clk_sysfs_init(host);
 
 #ifdef CONFIG_BLOCK
 	mmc_latency_hist_sysfs_init(host);
@@ -466,6 +709,8 @@
 	device_del(&host->class_dev);
 
 	led_trigger_unregister_simple(host->led);
+
+	mmc_host_clk_exit(host);
 }
 
 EXPORT_SYMBOL(mmc_remove_host);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 5aa3f09..56e6355 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -2096,11 +2096,13 @@
 
 	if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
 	     mmc_can_reset(card)) {
+		mmc_host_clk_hold(host);
 		/* If the card accept RST_n signal, send it. */
 		mmc_set_clock(host, host->f_init);
 		host->ops->hw_reset(host);
 		/* Set initial state and call mmc_set_ios */
 		mmc_set_initial_state(host);
+		mmc_host_clk_release(host);
 	} else {
 		/* Do a brute force power cycle */
 		mmc_power_cycle(host, card->ocr);
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
index ca9cade..4e65ea5 100644
--- a/drivers/mmc/core/quirks.c
+++ b/drivers/mmc/core/quirks.c
@@ -35,7 +35,25 @@
 #define SDIO_DEVICE_ID_MARVELL_8797_F0	0x9128
 #endif
 
+/*
+ * This hook just adds a quirk for all sdio devices
+ */
+static void add_quirk_for_sdio_devices(struct mmc_card *card, int data)
+{
+	if (mmc_card_sdio(card))
+		card->quirks |= data;
+}
+
 static const struct mmc_fixup mmc_fixup_methods[] = {
+	/* by default sdio devices are considered CLK_GATING broken */
+	/* good cards will be whitelisted as they are tested */
+	SDIO_FIXUP(SDIO_ANY_ID, SDIO_ANY_ID,
+		   add_quirk_for_sdio_devices,
+		   MMC_QUIRK_BROKEN_CLK_GATING),
+
+	SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+		   remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
 	SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
 		   add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
 
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index a0aa64e..60542b2 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -838,7 +838,9 @@
 	if (!host->ops->get_ro)
 		return -1;
 
+	mmc_host_clk_hold(host);
 	ro = host->ops->get_ro(host);
+	mmc_host_clk_release(host);
 
 	return ro;
 }
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index b5ec3c8..8e10bdc 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -976,10 +976,13 @@
 	}
 
 	if (!err && host->sdio_irqs) {
-		if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD))
+		if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
 			wake_up_process(host->sdio_irq_thread);
-		else if (host->caps & MMC_CAP_SDIO_IRQ)
+		} else if (host->caps & MMC_CAP_SDIO_IRQ) {
+			mmc_host_clk_hold(host);
 			host->ops->enable_sdio_irq(host, 1);
+			mmc_host_clk_release(host);
+		}
 	}
 
 	mmc_release_host(host);
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 91bbbfb..09cc67d 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -168,15 +168,21 @@
 		}
 
 		set_current_state(TASK_INTERRUPTIBLE);
-		if (host->caps & MMC_CAP_SDIO_IRQ)
+		if (host->caps & MMC_CAP_SDIO_IRQ) {
+			mmc_host_clk_hold(host);
 			host->ops->enable_sdio_irq(host, 1);
+			mmc_host_clk_release(host);
+		}
 		if (!kthread_should_stop())
 			schedule_timeout(period);
 		set_current_state(TASK_RUNNING);
 	} while (!kthread_should_stop());
 
-	if (host->caps & MMC_CAP_SDIO_IRQ)
+	if (host->caps & MMC_CAP_SDIO_IRQ) {
+		mmc_host_clk_hold(host);
 		host->ops->enable_sdio_irq(host, 0);
+		mmc_host_clk_release(host);
+	}
 
 	pr_debug("%s: IRQ thread exiting with code %d\n",
 		 mmc_hostname(host), ret);
@@ -202,7 +208,9 @@
 				return err;
 			}
 		} else if (host->caps & MMC_CAP_SDIO_IRQ) {
+			mmc_host_clk_hold(host);
 			host->ops->enable_sdio_irq(host, 1);
+			mmc_host_clk_release(host);
 		}
 	}
 
@@ -221,7 +229,9 @@
 			atomic_set(&host->sdio_irq_thread_abort, 1);
 			kthread_stop(host->sdio_irq_thread);
 		} else if (host->caps & MMC_CAP_SDIO_IRQ) {
+			mmc_host_clk_hold(host);
 			host->ops->enable_sdio_irq(host, 0);
+			mmc_host_clk_release(host);
 		}
 	}
 
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 410a55b..1cfd7f9 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -28,13 +28,9 @@
 #include "sdhci-pltfm.h"
 #include <linux/of.h>
 
-#define SDHCI_ARASAN_CLK_CTRL_OFFSET	0x2c
 #define SDHCI_ARASAN_VENDOR_REGISTER	0x78
 
 #define VENDOR_ENHANCED_STROBE		BIT(0)
-#define CLK_CTRL_TIMEOUT_SHIFT		16
-#define CLK_CTRL_TIMEOUT_MASK		(0xf << CLK_CTRL_TIMEOUT_SHIFT)
-#define CLK_CTRL_TIMEOUT_MIN_EXP	13
 
 #define PHY_CLK_TOO_SLOW_HZ		400000
 
@@ -163,15 +159,15 @@
 
 static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
 {
-	u32 div;
 	unsigned long freq;
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 
-	div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET);
-	div = (div & CLK_CTRL_TIMEOUT_MASK) >> CLK_CTRL_TIMEOUT_SHIFT;
+	/* SDHCI timeout clock is in kHz */
+	freq = DIV_ROUND_UP(clk_get_rate(pltfm_host->clk), 1000);
 
-	freq = clk_get_rate(pltfm_host->clk);
-	freq /= 1 << (CLK_CTRL_TIMEOUT_MIN_EXP + div);
+	/* or in MHz */
+	if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
+		freq = DIV_ROUND_UP(freq, 1000);
 
 	return freq;
 }
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index a9b7fc0..387ae1c 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -85,11 +85,30 @@
 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
 }
 
+/*
+ * In this specific implementation of the SDHCI controller, the power register
+ * needs to have a valid voltage set even when the power supply is managed by
+ * an external regulator.
+ */
+static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
+		     unsigned short vdd)
+{
+	if (!IS_ERR(host->mmc->supply.vmmc)) {
+		struct mmc_host *mmc = host->mmc;
+
+		spin_unlock_irq(&host->lock);
+		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+		spin_lock_irq(&host->lock);
+	}
+	sdhci_set_power_noreg(host, mode, vdd);
+}
+
 static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
 	.set_clock		= sdhci_at91_set_clock,
 	.set_bus_width		= sdhci_set_bus_width,
 	.reset			= sdhci_reset,
 	.set_uhs_signaling	= sdhci_set_uhs_signaling,
+	.set_power		= sdhci_at91_set_power,
 };
 
 static const struct sdhci_pltfm_data soc_data_sama5d2 = {
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 1d9e00a..b0b9ceb 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -412,6 +412,8 @@
 	if (mode == MMC_POWER_OFF)
 		return;
 
+	spin_unlock_irq(&host->lock);
+
 	/*
 	 * Bus power might not enable after D3 -> D0 transition due to the
 	 * present state not yet having propagated. Retry for up to 2ms.
@@ -424,6 +426,8 @@
 		reg |= SDHCI_POWER_ON;
 		sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
 	}
+
+	spin_lock_irq(&host->lock);
 }
 
 static const struct sdhci_ops sdhci_intel_byt_ops = {
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index ba637ff..a983ba0 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1371,7 +1371,9 @@
 			return;
 		}
 		timeout--;
-		mdelay(1);
+		spin_unlock_irq(&host->lock);
+		usleep_range(900, 1100);
+		spin_lock_irq(&host->lock);
 	}
 
 	clk |= SDHCI_CLOCK_CARD_EN;
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index d2c386f..1d84335 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -426,6 +426,9 @@
 	struct ushc_data *ushc;
 	int ret;
 
+	if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
 	if (mmc == NULL)
 		return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index bbef959..1592e1c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -917,8 +917,8 @@
 #define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH	1
 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX	1
 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH	1
-#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX	2
-#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH	1
+#define RX_PACKET_ATTRIBUTES_LAST_INDEX		2
+#define RX_PACKET_ATTRIBUTES_LAST_WIDTH		1
 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX	3
 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH	1
 #define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX	4
@@ -927,6 +927,8 @@
 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH	1
 #define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX	6
 #define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH	1
+#define RX_PACKET_ATTRIBUTES_FIRST_INDEX	7
+#define RX_PACKET_ATTRIBUTES_FIRST_WIDTH	1
 
 #define RX_NORMAL_DESC0_OVT_INDEX		0
 #define RX_NORMAL_DESC0_OVT_WIDTH		16
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 1babcc1..ca106d4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1721,10 +1721,15 @@
 
 	/* Get the header length */
 	if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
+		XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+			       FIRST, 1);
 		rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
 						      RX_NORMAL_DESC2, HL);
 		if (rdata->rx.hdr_len)
 			pdata->ext_stats.rx_split_header_packets++;
+	} else {
+		XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+			       FIRST, 0);
 	}
 
 	/* Get the RSS hash */
@@ -1747,19 +1752,16 @@
 		}
 	}
 
-	/* Get the packet length */
-	rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
-
-	if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
-		/* Not all the data has been transferred for this packet */
-		XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
-			       INCOMPLETE, 1);
+	/* Not all the data has been transferred for this packet */
+	if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
 		return 0;
-	}
 
 	/* This is the last of the data for this packet */
 	XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
-		       INCOMPLETE, 0);
+		       LAST, 1);
+
+	/* Get the packet length */
+	rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
 
 	/* Set checksum done indicator as appropriate */
 	if (netdev->features & NETIF_F_RXCSUM)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 7f9216d..0f0f3014 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1752,13 +1752,12 @@
 {
 	struct sk_buff *skb;
 	u8 *packet;
-	unsigned int copy_len;
 
 	skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
 	if (!skb)
 		return NULL;
 
-	/* Start with the header buffer which may contain just the header
+	/* Pull in the header buffer which may contain just the header
 	 * or the header plus data
 	 */
 	dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
@@ -1767,30 +1766,49 @@
 
 	packet = page_address(rdata->rx.hdr.pa.pages) +
 		 rdata->rx.hdr.pa.pages_offset;
-	copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
-	copy_len = min(rdata->rx.hdr.dma_len, copy_len);
-	skb_copy_to_linear_data(skb, packet, copy_len);
-	skb_put(skb, copy_len);
-
-	len -= copy_len;
-	if (len) {
-		/* Add the remaining data as a frag */
-		dma_sync_single_range_for_cpu(pdata->dev,
-					      rdata->rx.buf.dma_base,
-					      rdata->rx.buf.dma_off,
-					      rdata->rx.buf.dma_len,
-					      DMA_FROM_DEVICE);
-
-		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
-				rdata->rx.buf.pa.pages,
-				rdata->rx.buf.pa.pages_offset,
-				len, rdata->rx.buf.dma_len);
-		rdata->rx.buf.pa.pages = NULL;
-	}
+	skb_copy_to_linear_data(skb, packet, len);
+	skb_put(skb, len);
 
 	return skb;
 }
 
+static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
+				     struct xgbe_packet_data *packet)
+{
+	/* Always zero if not the first descriptor */
+	if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
+		return 0;
+
+	/* First descriptor with split header, return header length */
+	if (rdata->rx.hdr_len)
+		return rdata->rx.hdr_len;
+
+	/* First descriptor but not the last descriptor and no split header,
+	 * so the full buffer was used
+	 */
+	if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
+		return rdata->rx.hdr.dma_len;
+
+	/* First descriptor and last descriptor and no split header, so
+	 * calculate how much of the buffer was used
+	 */
+	return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
+}
+
+static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
+				     struct xgbe_packet_data *packet,
+				     unsigned int len)
+{
+	/* Always the full buffer if not the last descriptor */
+	if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
+		return rdata->rx.buf.dma_len;
+
+	/* Last descriptor so calculate how much of the buffer was used
+	 * for the last bit of data
+	 */
+	return rdata->rx.len - len;
+}
+
 static int xgbe_tx_poll(struct xgbe_channel *channel)
 {
 	struct xgbe_prv_data *pdata = channel->pdata;
@@ -1873,8 +1891,8 @@
 	struct napi_struct *napi;
 	struct sk_buff *skb;
 	struct skb_shared_hwtstamps *hwtstamps;
-	unsigned int incomplete, error, context_next, context;
-	unsigned int len, rdesc_len, max_len;
+	unsigned int last, error, context_next, context;
+	unsigned int len, buf1_len, buf2_len, max_len;
 	unsigned int received = 0;
 	int packet_count = 0;
 
@@ -1884,7 +1902,7 @@
 	if (!ring)
 		return 0;
 
-	incomplete = 0;
+	last = 0;
 	context_next = 0;
 
 	napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
@@ -1918,9 +1936,8 @@
 		received++;
 		ring->cur++;
 
-		incomplete = XGMAC_GET_BITS(packet->attributes,
-					    RX_PACKET_ATTRIBUTES,
-					    INCOMPLETE);
+		last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+				      LAST);
 		context_next = XGMAC_GET_BITS(packet->attributes,
 					      RX_PACKET_ATTRIBUTES,
 					      CONTEXT_NEXT);
@@ -1929,7 +1946,7 @@
 					 CONTEXT);
 
 		/* Earlier error, just drain the remaining data */
-		if ((incomplete || context_next) && error)
+		if ((!last || context_next) && error)
 			goto read_again;
 
 		if (error || packet->errors) {
@@ -1941,16 +1958,22 @@
 		}
 
 		if (!context) {
-			/* Length is cumulative, get this descriptor's length */
-			rdesc_len = rdata->rx.len - len;
-			len += rdesc_len;
+			/* Get the data length in the descriptor buffers */
+			buf1_len = xgbe_rx_buf1_len(rdata, packet);
+			len += buf1_len;
+			buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
+			len += buf2_len;
 
-			if (rdesc_len && !skb) {
+			if (!skb) {
 				skb = xgbe_create_skb(pdata, napi, rdata,
-						      rdesc_len);
-				if (!skb)
+						      buf1_len);
+				if (!skb) {
 					error = 1;
-			} else if (rdesc_len) {
+					goto skip_data;
+				}
+			}
+
+			if (buf2_len) {
 				dma_sync_single_range_for_cpu(pdata->dev,
 							rdata->rx.buf.dma_base,
 							rdata->rx.buf.dma_off,
@@ -1960,13 +1983,14 @@
 				skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 						rdata->rx.buf.pa.pages,
 						rdata->rx.buf.pa.pages_offset,
-						rdesc_len,
+						buf2_len,
 						rdata->rx.buf.dma_len);
 				rdata->rx.buf.pa.pages = NULL;
 			}
 		}
 
-		if (incomplete || context_next)
+skip_data:
+		if (!last || context_next)
 			goto read_again;
 
 		if (!skb)
@@ -2024,7 +2048,7 @@
 	}
 
 	/* Check if we need to save state before leaving */
-	if (received && (incomplete || context_next)) {
+	if (received && (!last || context_next)) {
 		rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
 		rdata->state_saved = 1;
 		rdata->state.skb = skb;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index a4e60e5..0975af2 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3402,7 +3402,8 @@
 
 	bcmgenet_netif_stop(dev);
 
-	phy_suspend(priv->phydev);
+	if (!device_may_wakeup(d))
+		phy_suspend(priv->phydev);
 
 	netif_device_detach(dev);
 
@@ -3499,7 +3500,8 @@
 
 	netif_device_attach(dev);
 
-	phy_resume(priv->phydev);
+	if (!device_may_wakeup(d))
+		phy_resume(priv->phydev);
 
 	if (priv->eee.eee_enabled)
 		bcmgenet_eee_enable_set(dev, true);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index e876076..2f92819 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -220,20 +220,6 @@
 	udelay(60);
 }
 
-static void bcmgenet_internal_phy_setup(struct net_device *dev)
-{
-	struct bcmgenet_priv *priv = netdev_priv(dev);
-	u32 reg;
-
-	/* Power up PHY */
-	bcmgenet_phy_power_set(dev, true);
-	/* enable APD */
-	reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
-	reg |= EXT_PWR_DN_EN_LD;
-	bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
-	bcmgenet_mii_reset(dev);
-}
-
 static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
 {
 	u32 reg;
@@ -281,7 +267,6 @@
 
 		if (priv->internal_phy) {
 			phy_name = "internal PHY";
-			bcmgenet_internal_phy_setup(dev);
 		} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
 			phy_name = "MoCA";
 			bcmgenet_moca_phy_setup(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index bfe410e..3f51a44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -367,6 +367,8 @@
 	case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
 	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
 	case MLX5_CMD_OP_QUERY_Q_COUNTER:
+	case MLX5_CMD_OP_SET_RATE_LIMIT:
+	case MLX5_CMD_OP_QUERY_RATE_LIMIT:
 	case MLX5_CMD_OP_ALLOC_PD:
 	case MLX5_CMD_OP_ALLOC_UAR:
 	case MLX5_CMD_OP_CONFIG_INT_MODERATION:
@@ -500,6 +502,8 @@
 	MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
 	MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
 	MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
+	MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
+	MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
 	MLX5_COMMAND_STR_CASE(ALLOC_PD);
 	MLX5_COMMAND_STR_CASE(DEALLOC_PD);
 	MLX5_COMMAND_STR_CASE(ALLOC_UAR);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 796bdf0..7309ae3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -602,6 +602,10 @@
 	if (lro_num_seg > 1) {
 		mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
 		skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
+		/* Subtract one since we already counted this as one
+		 * "regular" packet in mlx5e_complete_rx_cqe()
+		 */
+		rq->stats.packets += lro_num_seg - 1;
 		rq->stats.lro_packets++;
 		rq->stats.lro_bytes += cqe_bcnt;
 	}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index a543ea6..3fd471a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -427,14 +427,16 @@
 		}
 
 		if (is_tcf_vlan(a)) {
-			if (tcf_vlan_action(a) == VLAN_F_POP) {
+			if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
 				attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
-			} else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
+			} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
 				if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
 					return -EOPNOTSUPP;
 
 				attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
 				attr->vlan = tcf_vlan_push_vid(a);
+			} else { /* action is TCA_VLAN_ACT_MODIFY */
+				return -EOPNOTSUPP;
 			}
 			continue;
 		}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index cfb6837..5743110 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -272,15 +272,18 @@
 			sq->stats.tso_bytes += skb->len - ihs;
 		}
 
+		sq->stats.packets += skb_shinfo(skb)->gso_segs;
 		num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
 	} else {
 		bf = sq->bf_budget &&
 		     !skb->xmit_more &&
 		     !skb_shinfo(skb)->nr_frags;
 		ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
+		sq->stats.packets++;
 		num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
 	}
 
+	sq->stats.bytes += num_bytes;
 	wi->num_bytes = num_bytes;
 
 	if (skb_vlan_tag_present(skb)) {
@@ -377,8 +380,6 @@
 	if (bf)
 		sq->bf_budget--;
 
-	sq->stats.packets++;
-	sq->stats.bytes += num_bytes;
 	return NETDEV_TX_OK;
 
 dma_unmap_wqe_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 0c9ef87..7a196a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -87,7 +87,7 @@
 	[2] = {
 		.mask		= MLX5_PROF_MASK_QP_SIZE |
 				  MLX5_PROF_MASK_MR_CACHE,
-		.log_max_qp	= 17,
+		.log_max_qp	= 18,
 		.mr_cache[0]	= {
 			.size	= 500,
 			.limit	= 250
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 24d5272..0d519a9 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -924,6 +924,8 @@
 	{QMI_FIXED_INTF(0x413c, 0x81a9, 8)},	/* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
 	{QMI_FIXED_INTF(0x413c, 0x81b1, 8)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
 	{QMI_FIXED_INTF(0x413c, 0x81b3, 8)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
+	{QMI_FIXED_INTF(0x413c, 0x81b6, 8)},	/* Dell Wireless 5811e */
+	{QMI_FIXED_INTF(0x413c, 0x81b6, 10)},	/* Dell Wireless 5811e */
 	{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},	/* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
 	{QMI_FIXED_INTF(0x22de, 0x9061, 3)},	/* WeTelecom WPD-600N */
 	{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)},	/* SIMCom 7230E */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index bc744ac..a2afb8e 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -467,8 +467,10 @@
 	}
 
 	if (rt6_local) {
-		if (rt6_local->rt6i_idev)
+		if (rt6_local->rt6i_idev) {
 			in6_dev_put(rt6_local->rt6i_idev);
+			rt6_local->rt6i_idev = NULL;
+		}
 
 		dst = &rt6_local->dst;
 		dev_put(dst->dev);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 3c3c4f1..7a310c4 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -2700,6 +2700,21 @@
 	schedule_work(&pcie_work);
 }
 
+static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter)
+{
+	struct pcie_service_card *card = adapter->card;
+	const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+	if (reg->sleep_cookie)
+		mwifiex_pcie_delete_sleep_cookie_buf(adapter);
+
+	mwifiex_pcie_delete_cmdrsp_buf(adapter);
+	mwifiex_pcie_delete_evtbd_ring(adapter);
+	mwifiex_pcie_delete_rxbd_ring(adapter);
+	mwifiex_pcie_delete_txbd_ring(adapter);
+	card->cmdrsp_buf = NULL;
+}
+
 /*
  * This function initializes the PCI-E host memory space, WCB rings, etc.
  *
@@ -2812,13 +2827,6 @@
 
 /*
  * This function cleans up the allocated card buffers.
- *
- * The following are freed by this function -
- *      - TXBD ring buffers
- *      - RXBD ring buffers
- *      - Event BD ring buffers
- *      - Command response ring buffer
- *      - Sleep cookie buffer
  */
 static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
 {
@@ -2834,6 +2842,8 @@
 				    "Failed to write driver not-ready signature\n");
 	}
 
+	mwifiex_pcie_free_buffers(adapter);
+
 	if (pdev) {
 		pci_iounmap(pdev, card->pci_mmap);
 		pci_iounmap(pdev, card->pci_mmap1);
@@ -3080,10 +3090,7 @@
 	pci_iounmap(pdev, card->pci_mmap1);
 }
 
-/* This function cleans up the PCI-E host memory space.
- * Some code is extracted from mwifiex_unregister_dev()
- *
- */
+/* This function cleans up the PCI-E host memory space. */
 static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
 {
 	struct pcie_service_card *card = adapter->card;
@@ -3095,16 +3102,8 @@
 	adapter->seq_num = 0;
 	adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
 
-	if (card) {
-		if (reg->sleep_cookie)
-			mwifiex_pcie_delete_sleep_cookie_buf(adapter);
-
-		mwifiex_pcie_delete_cmdrsp_buf(adapter);
-		mwifiex_pcie_delete_evtbd_ring(adapter);
-		mwifiex_pcie_delete_rxbd_ring(adapter);
-		mwifiex_pcie_delete_txbd_ring(adapter);
-		card->cmdrsp_buf = NULL;
-	}
+	if (card)
+		mwifiex_pcie_free_buffers(adapter);
 
 	return;
 }
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 7810bad..52a297d 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -118,4 +118,9 @@
 config OF_NUMA
 	bool
 
+config OF_BATTERYDATA
+	def_bool y
+	help
+	  OpenFirmware BatteryData accessors
+
 endif # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 4b8dabe..b2f474a 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -15,5 +15,6 @@
 obj-$(CONFIG_OF_OVERLAY) += overlay.o
 obj-$(CONFIG_OF_SLIMBUS)        += of_slimbus.o
 obj-$(CONFIG_OF_NUMA) += of_numa.o
+obj-$(CONFIG_OF_BATTERYDATA) += of_batterydata.o
 
 obj-$(CONFIG_OF_UNITTEST) += unittest-data/
diff --git a/drivers/of/of_batterydata.c b/drivers/of/of_batterydata.c
new file mode 100644
index 0000000..43417b2
--- /dev/null
+++ b/drivers/of/of_batterydata.c
@@ -0,0 +1,457 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/batterydata-lib.h>
+#include <linux/power_supply.h>
+
+static int of_batterydata_read_lut(const struct device_node *np,
+			int max_cols, int max_rows, int *ncols, int *nrows,
+			int *col_legend_data, int *row_legend_data,
+			int *lut_data)
+{
+	struct property *prop;
+	const __be32 *data;
+	int cols, rows, size, i, j, *out_values;
+
+	prop = of_find_property(np, "qcom,lut-col-legend", NULL);
+	if (!prop) {
+		pr_err("%s: No col legend found\n", np->name);
+		return -EINVAL;
+	} else if (!prop->value) {
+		pr_err("%s: No col legend value found, np->name\n", np->name);
+		return -ENODATA;
+	} else if (prop->length > max_cols * sizeof(int)) {
+		pr_err("%s: Too many columns\n", np->name);
+		return -EINVAL;
+	}
+
+	cols = prop->length/sizeof(int);
+	*ncols = cols;
+	data = prop->value;
+	for (i = 0; i < cols; i++)
+		*col_legend_data++ = be32_to_cpup(data++);
+
+	rows = 0;
+
+	prop = of_find_property(np, "qcom,lut-row-legend", NULL);
+	if (!prop || row_legend_data == NULL) {
+		/* single row lut */
+		rows = 1;
+	} else if (!prop->value) {
+		pr_err("%s: No row legend value found\n", np->name);
+		return -ENODATA;
+	} else if (prop->length > max_rows * sizeof(int)) {
+		pr_err("%s: Too many rows\n", np->name);
+		return -EINVAL;
+	}
+
+	if (rows != 1) {
+		rows = prop->length/sizeof(int);
+		*nrows = rows;
+		data = prop->value;
+		for (i = 0; i < rows; i++)
+			*row_legend_data++ = be32_to_cpup(data++);
+	}
+
+	prop = of_find_property(np, "qcom,lut-data", NULL);
+	if (!prop) {
+		pr_err("prop 'qcom,lut-data' not found\n");
+		return -EINVAL;
+	}
+	data = prop->value;
+	size = prop->length/sizeof(int);
+	if (size != cols * rows) {
+		pr_err("%s: data size mismatch, %dx%d != %d\n",
+				np->name, cols, rows, size);
+		return -EINVAL;
+	}
+	for (i = 0; i < rows; i++) {
+		out_values = lut_data + (max_cols * i);
+		for (j = 0; j < cols; j++) {
+			*out_values++ = be32_to_cpup(data++);
+			pr_debug("Value = %d\n", *(out_values-1));
+		}
+	}
+
+	return 0;
+}
+
+static int of_batterydata_read_sf_lut(struct device_node *data_node,
+				const char *name, struct sf_lut *lut)
+{
+	struct device_node *node = of_find_node_by_name(data_node, name);
+	int rc;
+
+	if (!lut) {
+		pr_debug("No lut provided, skipping\n");
+		return 0;
+	} else if (!node) {
+		pr_err("Couldn't find %s node.\n", name);
+		return -EINVAL;
+	}
+
+	rc = of_batterydata_read_lut(node, PC_CC_COLS, PC_CC_ROWS,
+			&lut->cols, &lut->rows, lut->row_entries,
+			lut->percent, *lut->sf);
+	if (rc) {
+		pr_err("Failed to read %s node.\n", name);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int of_batterydata_read_pc_temp_ocv_lut(struct device_node *data_node,
+				const char *name, struct pc_temp_ocv_lut *lut)
+{
+	struct device_node *node = of_find_node_by_name(data_node, name);
+	int rc;
+
+	if (!lut) {
+		pr_debug("No lut provided, skipping\n");
+		return 0;
+	} else if (!node) {
+		pr_err("Couldn't find %s node.\n", name);
+		return -EINVAL;
+	}
+	rc = of_batterydata_read_lut(node, PC_TEMP_COLS, PC_TEMP_ROWS,
+			&lut->cols, &lut->rows, lut->temp, lut->percent,
+			*lut->ocv);
+	if (rc) {
+		pr_err("Failed to read %s node.\n", name);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int of_batterydata_read_ibat_temp_acc_lut(struct device_node *data_node,
+			const char *name, struct ibat_temp_acc_lut *lut)
+{
+	struct device_node *node = of_find_node_by_name(data_node, name);
+	int rc;
+
+	if (!lut) {
+		pr_debug("No lut provided, skipping\n");
+		return 0;
+	} else if (!node) {
+		pr_debug("Couldn't find %s node.\n", name);
+		return 0;
+	}
+	rc = of_batterydata_read_lut(node, ACC_TEMP_COLS, ACC_IBAT_ROWS,
+			&lut->cols, &lut->rows, lut->temp, lut->ibat,
+			*lut->acc);
+	if (rc) {
+		pr_err("Failed to read %s node.\n", name);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int of_batterydata_read_single_row_lut(struct device_node *data_node,
+				const char *name, struct single_row_lut *lut)
+{
+	struct device_node *node = of_find_node_by_name(data_node, name);
+	int rc;
+
+	if (!lut) {
+		pr_debug("No lut provided, skipping\n");
+		return 0;
+	} else if (!node) {
+		pr_err("Couldn't find %s node.\n", name);
+		return -EINVAL;
+	}
+
+	rc = of_batterydata_read_lut(node, MAX_SINGLE_LUT_COLS, 1,
+			&lut->cols, NULL, lut->x, NULL, lut->y);
+	if (rc) {
+		pr_err("Failed to read %s node.\n", name);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int of_batterydata_read_batt_id_kohm(const struct device_node *np,
+				const char *propname, struct batt_ids *batt_ids)
+{
+	struct property *prop;
+	const __be32 *data;
+	int num, i, *id_kohm = batt_ids->kohm;
+
+	prop = of_find_property(np, "qcom,batt-id-kohm", NULL);
+	if (!prop) {
+		pr_err("%s: No battery id resistor found\n", np->name);
+		return -EINVAL;
+	} else if (!prop->value) {
+		pr_err("%s: No battery id resistor value found, np->name\n",
+						np->name);
+		return -ENODATA;
+	} else if (prop->length > MAX_BATT_ID_NUM * sizeof(__be32)) {
+		pr_err("%s: Too many battery id resistors\n", np->name);
+		return -EINVAL;
+	}
+
+	num = prop->length/sizeof(__be32);
+	batt_ids->num = num;
+	data = prop->value;
+	for (i = 0; i < num; i++)
+		*id_kohm++ = be32_to_cpup(data++);
+
+	return 0;
+}
+
+#define OF_PROP_READ(property, qpnp_dt_property, node, rc, optional)	\
+do {									\
+	if (rc)								\
+		break;							\
+	rc = of_property_read_u32(node, "qcom," qpnp_dt_property,	\
+					&property);			\
+									\
+	if ((rc == -EINVAL) && optional) {				\
+		property = -EINVAL;					\
+		rc = 0;							\
+	} else if (rc) {						\
+		pr_err("Error reading " #qpnp_dt_property		\
+				" property rc = %d\n", rc);		\
+	}								\
+} while (0)
+
+static int of_batterydata_load_battery_data(struct device_node *node,
+				int best_id_kohm,
+				struct bms_battery_data *batt_data)
+{
+	int rc;
+
+	rc = of_batterydata_read_single_row_lut(node, "qcom,fcc-temp-lut",
+			batt_data->fcc_temp_lut);
+	if (rc)
+		return rc;
+
+	rc = of_batterydata_read_pc_temp_ocv_lut(node,
+			"qcom,pc-temp-ocv-lut",
+			batt_data->pc_temp_ocv_lut);
+	if (rc)
+		return rc;
+
+	rc = of_batterydata_read_sf_lut(node, "qcom,rbatt-sf-lut",
+			batt_data->rbatt_sf_lut);
+	if (rc)
+		return rc;
+
+	rc = of_batterydata_read_ibat_temp_acc_lut(node, "qcom,ibat-acc-lut",
+						batt_data->ibat_acc_lut);
+	if (rc)
+		return rc;
+
+	rc = of_property_read_string(node, "qcom,battery-type",
+					&batt_data->battery_type);
+	if (rc) {
+		pr_err("Error reading qcom,battery-type property rc=%d\n", rc);
+		batt_data->battery_type = NULL;
+		return rc;
+	}
+
+	OF_PROP_READ(batt_data->fcc, "fcc-mah", node, rc, false);
+	OF_PROP_READ(batt_data->default_rbatt_mohm,
+			"default-rbatt-mohm", node, rc, false);
+	OF_PROP_READ(batt_data->rbatt_capacitive_mohm,
+			"rbatt-capacitive-mohm", node, rc, false);
+	OF_PROP_READ(batt_data->flat_ocv_threshold_uv,
+			"flat-ocv-threshold-uv", node, rc, true);
+	OF_PROP_READ(batt_data->max_voltage_uv,
+			"max-voltage-uv", node, rc, true);
+	OF_PROP_READ(batt_data->cutoff_uv, "v-cutoff-uv", node, rc, true);
+	OF_PROP_READ(batt_data->iterm_ua, "chg-term-ua", node, rc, true);
+	OF_PROP_READ(batt_data->fastchg_current_ma,
+			"fastchg-current-ma", node, rc, true);
+	OF_PROP_READ(batt_data->fg_cc_cv_threshold_mv,
+			"fg-cc-cv-threshold-mv", node, rc, true);
+
+	batt_data->batt_id_kohm = best_id_kohm;
+
+	return rc;
+}
+
+static int64_t of_batterydata_convert_battery_id_kohm(int batt_id_uv,
+				int rpull_up, int vadc_vdd)
+{
+	int64_t resistor_value_kohm, denom;
+
+	if (batt_id_uv == 0) {
+		/* vadc not correct or batt id line grounded, report 0 kohms */
+		return 0;
+	}
+	/* calculate the battery id resistance reported via ADC */
+	denom = div64_s64(vadc_vdd * 1000000LL, batt_id_uv) - 1000000LL;
+
+	if (denom == 0) {
+		/* batt id connector might be open, return 0 kohms */
+		return 0;
+	}
+	resistor_value_kohm = div64_s64(rpull_up * 1000000LL + denom/2, denom);
+
+	pr_debug("batt id voltage = %d, resistor value = %lld\n",
+			batt_id_uv, resistor_value_kohm);
+
+	return resistor_value_kohm;
+}
+
+struct device_node *of_batterydata_get_best_profile(
+		const struct device_node *batterydata_container_node,
+		int batt_id_kohm, const char *batt_type)
+{
+	struct batt_ids batt_ids;
+	struct device_node *node, *best_node = NULL;
+	const char *battery_type = NULL;
+	int delta = 0, best_delta = 0, best_id_kohm = 0, id_range_pct,
+		i = 0, rc = 0, limit = 0;
+	bool in_range = false;
+
+	/* read battery id range percentage for best profile */
+	rc = of_property_read_u32(batterydata_container_node,
+			"qcom,batt-id-range-pct", &id_range_pct);
+
+	if (rc) {
+		if (rc == -EINVAL) {
+			id_range_pct = 0;
+		} else {
+			pr_err("failed to read battery id range\n");
+			return ERR_PTR(-ENXIO);
+		}
+	}
+
+	/*
+	 * Find the battery data with a battery id resistor closest to this one
+	 */
+	for_each_child_of_node(batterydata_container_node, node) {
+		if (batt_type != NULL) {
+			rc = of_property_read_string(node, "qcom,battery-type",
+							&battery_type);
+			if (!rc && strcmp(battery_type, batt_type) == 0) {
+				best_node = node;
+				best_id_kohm = batt_id_kohm;
+				break;
+			}
+		} else {
+			rc = of_batterydata_read_batt_id_kohm(node,
+							"qcom,batt-id-kohm",
+							&batt_ids);
+			if (rc)
+				continue;
+			for (i = 0; i < batt_ids.num; i++) {
+				delta = abs(batt_ids.kohm[i] - batt_id_kohm);
+				limit = (batt_ids.kohm[i] * id_range_pct) / 100;
+				in_range = (delta <= limit);
+				/*
+				 * Check if the delta is the lowest one
+				 * and also if the limits are in range
+				 * before selecting the best node.
+				 */
+				if ((delta < best_delta || !best_node)
+					&& in_range) {
+					best_node = node;
+					best_delta = delta;
+					best_id_kohm = batt_ids.kohm[i];
+				}
+			}
+		}
+	}
+
+	if (best_node == NULL) {
+		pr_err("No battery data found\n");
+		return best_node;
+	}
+
+	/* check that profile id is in range of the measured batt_id */
+	if (abs(best_id_kohm - batt_id_kohm) >
+			((best_id_kohm * id_range_pct) / 100)) {
+		pr_err("out of range: profile id %d batt id %d pct %d",
+			best_id_kohm, batt_id_kohm, id_range_pct);
+		return NULL;
+	}
+
+	rc = of_property_read_string(best_node, "qcom,battery-type",
+							&battery_type);
+	if (!rc)
+		pr_info("%s found\n", battery_type);
+	else
+		pr_info("%s found\n", best_node->name);
+
+	return best_node;
+}
+
+int of_batterydata_read_data(struct device_node *batterydata_container_node,
+				struct bms_battery_data *batt_data,
+				int batt_id_uv)
+{
+	struct device_node *node, *best_node;
+	struct batt_ids batt_ids;
+	const char *battery_type = NULL;
+	int delta, best_delta, batt_id_kohm, rpull_up_kohm,
+		vadc_vdd_uv, best_id_kohm, i, rc = 0;
+
+	node = batterydata_container_node;
+	OF_PROP_READ(rpull_up_kohm, "rpull-up-kohm", node, rc, false);
+	OF_PROP_READ(vadc_vdd_uv, "vref-batt-therm", node, rc, false);
+	if (rc)
+		return rc;
+
+	batt_id_kohm = of_batterydata_convert_battery_id_kohm(batt_id_uv,
+					rpull_up_kohm, vadc_vdd_uv);
+	best_node = NULL;
+	best_delta = 0;
+	best_id_kohm = 0;
+
+	/*
+	 * Find the battery data with a battery id resistor closest to this one
+	 */
+	for_each_child_of_node(batterydata_container_node, node) {
+		rc = of_batterydata_read_batt_id_kohm(node,
+						"qcom,batt-id-kohm",
+						&batt_ids);
+		if (rc)
+			continue;
+		for (i = 0; i < batt_ids.num; i++) {
+			delta = abs(batt_ids.kohm[i] - batt_id_kohm);
+			if (delta < best_delta || !best_node) {
+				best_node = node;
+				best_delta = delta;
+				best_id_kohm = batt_ids.kohm[i];
+			}
+		}
+	}
+
+	if (best_node == NULL) {
+		pr_err("No battery data found\n");
+		return -ENODATA;
+	}
+	rc = of_property_read_string(best_node, "qcom,battery-type",
+							&battery_type);
+	if (!rc)
+		pr_info("%s loaded\n", battery_type);
+	else
+		pr_info("%s loaded\n", best_node->name);
+
+	return of_batterydata_load_battery_data(best_node,
+					best_id_kohm, batt_data);
+}
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 3308427..4399de34 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -939,8 +939,10 @@
 	 * pardevice fields. -arca
 	 */
 	port->ops->init_state(par_dev, par_dev->state);
-	port->proc_device = par_dev;
-	parport_device_proc_register(par_dev);
+	if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
+		port->proc_device = par_dev;
+		parport_device_proc_register(par_dev);
+	}
 
 	return par_dev;
 
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index b37b572..ad3e1e7 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -27,6 +27,9 @@
 #include <asm/cputype.h>
 #include <asm/irq_regs.h>
 
+#define USE_CPUHP_STATE CPUHP_AP_PERF_ARM_STARTING
+#define USE_CPUHP_STR "AP_PERF_ARM_STARTING"
+
 static int
 armpmu_map_cache_event(const unsigned (*cache_map)
 				      [PERF_COUNT_HW_CACHE_MAX]
@@ -366,6 +369,8 @@
 		return err;
 	}
 
+	armpmu->pmu_state = ARM_PMU_STATE_RUNNING;
+
 	return 0;
 }
 
@@ -568,6 +573,7 @@
 		.read		= armpmu_read,
 		.filter_match	= armpmu_filter_match,
 		.attr_groups	= armpmu->attr_groups,
+		.events_across_hotplug = 1,
 	};
 	armpmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
 		&armpmu_common_attr_group;
@@ -620,6 +626,8 @@
 	struct platform_device *pmu_device = cpu_pmu->plat_device;
 	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
 
+	cpu_pmu->pmu_state = ARM_PMU_STATE_GOING_DOWN;
+
 	irqs = min(pmu_device->num_resources, num_possible_cpus());
 
 	irq = platform_get_irq(pmu_device, 0);
@@ -627,6 +635,7 @@
 		on_each_cpu_mask(&cpu_pmu->supported_cpus,
 				 cpu_pmu_disable_percpu_irq, &irq, 1);
 		free_percpu_irq(irq, &hw_events->percpu_pmu);
+		cpu_pmu->percpu_irq = -1;
 	} else {
 		for (i = 0; i < irqs; ++i) {
 			int cpu = i;
@@ -641,6 +650,7 @@
 				free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
 		}
 	}
+	cpu_pmu->pmu_state = ARM_PMU_STATE_OFF;
 }
 
 static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
@@ -670,6 +680,7 @@
 
 		on_each_cpu_mask(&cpu_pmu->supported_cpus,
 				 cpu_pmu_enable_percpu_irq, &irq, 1);
+		cpu_pmu->percpu_irq = irq;
 	} else {
 		for (i = 0; i < irqs; ++i) {
 			int cpu = i;
@@ -709,22 +720,12 @@
 	return 0;
 }
 
-/*
- * PMU hardware loses all context when a CPU goes offline.
- * When a CPU is hotplugged back in, since some hardware registers are
- * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
- * junk values out of them.
- */
-static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
-{
-	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
-
-	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
-		return 0;
-	if (pmu->reset)
-		pmu->reset(pmu);
-	return 0;
-}
+struct cpu_pm_pmu_args {
+	struct arm_pmu	*armpmu;
+	unsigned long	cmd;
+	int		cpu;
+	int		ret;
+};
 
 #ifdef CONFIG_CPU_PM
 static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
@@ -772,15 +773,19 @@
 	}
 }
 
-static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
-			     void *v)
+static void cpu_pm_pmu_common(void *info)
 {
-	struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
+	struct cpu_pm_pmu_args *data	= info;
+	struct arm_pmu *armpmu		= data->armpmu;
+	unsigned long cmd		= data->cmd;
+	int cpu				= data->cpu;
 	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
 	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
 
-	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
-		return NOTIFY_DONE;
+	if (!cpumask_test_cpu(cpu, &armpmu->supported_cpus)) {
+		data->ret = NOTIFY_DONE;
+		return;
+	}
 
 	/*
 	 * Always reset the PMU registers on power-up even if
@@ -789,8 +794,12 @@
 	if (cmd == CPU_PM_EXIT && armpmu->reset)
 		armpmu->reset(armpmu);
 
-	if (!enabled)
-		return NOTIFY_OK;
+	if (!enabled) {
+		data->ret = NOTIFY_OK;
+		return;
+	}
+
+	data->ret = NOTIFY_OK;
 
 	switch (cmd) {
 	case CPU_PM_ENTER:
@@ -798,15 +807,29 @@
 		cpu_pm_pmu_setup(armpmu, cmd);
 		break;
 	case CPU_PM_EXIT:
-		cpu_pm_pmu_setup(armpmu, cmd);
 	case CPU_PM_ENTER_FAILED:
+		cpu_pm_pmu_setup(armpmu, cmd);
 		armpmu->start(armpmu);
 		break;
 	default:
-		return NOTIFY_DONE;
+		data->ret = NOTIFY_DONE;
+		break;
 	}
 
-	return NOTIFY_OK;
+	return;
+}
+
+static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
+			     void *v)
+{
+	struct cpu_pm_pmu_args data = {
+		.armpmu	= container_of(b, struct arm_pmu, cpu_pm_nb),
+		.cmd	= cmd,
+		.cpu	= smp_processor_id(),
+	};
+
+	cpu_pm_pmu_common(&data);
+	return data.ret;
 }
 
 static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
@@ -819,11 +842,75 @@
 {
 	cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
 }
+
 #else
 static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
 static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
+static void cpu_pm_pmu_common(void *info) { }
 #endif
 
+/*
+ * PMU hardware loses all context when a CPU goes offline.
+ * When a CPU is hotplugged back in, since some hardware registers are
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
+ * junk values out of them.
+ */
+static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
+{
+	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
+
+	struct cpu_pm_pmu_args data = {
+		.armpmu	= pmu,
+		.cpu	= (int)cpu,
+	};
+
+	if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus))
+		return 0;
+
+	data.cmd    = CPU_PM_EXIT;
+	cpu_pm_pmu_common(&data);
+	if (data.ret == NOTIFY_DONE)
+		return 0;
+
+	if (data.armpmu->pmu_state != ARM_PMU_STATE_OFF &&
+		data.armpmu->plat_device) {
+		int irq = data.armpmu->percpu_irq;
+
+		if (irq > 0 && irq_is_percpu(irq))
+			cpu_pmu_enable_percpu_irq(&irq);
+
+	}
+
+	return 0;
+}
+
+static int arm_perf_stopping_cpu(unsigned int cpu, struct hlist_node *node)
+{
+	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
+
+	struct cpu_pm_pmu_args data = {
+		.armpmu	= pmu,
+		.cpu	= (int)cpu,
+	};
+
+	if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus))
+		return 0;
+
+	data.cmd = CPU_PM_ENTER;
+	cpu_pm_pmu_common(&data);
+	/* Disarm the PMU IRQ before disappearing. */
+	if (data.armpmu->pmu_state == ARM_PMU_STATE_RUNNING &&
+		data.armpmu->plat_device) {
+		int irq = data.armpmu->percpu_irq;
+
+		if (irq > 0 && irq_is_percpu(irq))
+			cpu_pmu_disable_percpu_irq(&irq);
+
+	}
+
+	return 0;
+}
+
 static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
 {
 	int err;
@@ -834,14 +921,14 @@
 	if (!cpu_hw_events)
 		return -ENOMEM;
 
-	err = cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+	err = cpuhp_state_add_instance_nocalls(USE_CPUHP_STATE,
 					       &cpu_pmu->node);
 	if (err)
 		goto out_free;
 
 	err = cpu_pm_pmu_register(cpu_pmu);
 	if (err)
-		goto out_unregister;
+		goto out_unreg_perf_starting;
 
 	for_each_possible_cpu(cpu) {
 		struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
@@ -872,8 +959,8 @@
 
 	return 0;
 
-out_unregister:
-	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+out_unreg_perf_starting:
+	cpuhp_state_remove_instance_nocalls(USE_CPUHP_STATE,
 					    &cpu_pmu->node);
 out_free:
 	free_percpu(cpu_hw_events);
@@ -883,7 +970,7 @@
 static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
 {
 	cpu_pm_pmu_unregister(cpu_pmu);
-	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+	cpuhp_state_remove_instance_nocalls(USE_CPUHP_STATE,
 					    &cpu_pmu->node);
 	free_percpu(cpu_pmu->hw_events);
 }
@@ -1064,6 +1151,9 @@
 	if (!__oprofile_cpu_pmu)
 		__oprofile_cpu_pmu = pmu;
 
+	pmu->pmu_state  = ARM_PMU_STATE_OFF;
+	pmu->percpu_irq = -1;
+
 	pr_info("enabled with %s PMU driver, %d counters available\n",
 			pmu->name, pmu->num_events);
 
@@ -1083,11 +1173,12 @@
 {
 	int ret;
 
-	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
-				      "AP_PERF_ARM_STARTING",
-				      arm_perf_starting_cpu, NULL);
+	ret = cpuhp_setup_state_multi(USE_CPUHP_STATE,
+					USE_CPUHP_STR,
+					arm_perf_starting_cpu,
+					arm_perf_stopping_cpu);
 	if (ret)
-		pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
+		pr_err("CPU hotplug ARM PMU STOPPING registering failed: %d\n",
 		       ret);
 	return ret;
 }
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index f9d483b..2a1367e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -597,10 +597,6 @@
 
 	spin_lock_irqsave(&pctrl->lock, flags);
 
-	val = readl(pctrl->regs + g->intr_status_reg);
-	val &= ~BIT(g->intr_status_bit);
-	writel(val, pctrl->regs + g->intr_status_reg);
-
 	val = readl(pctrl->regs + g->intr_cfg_reg);
 	val |= BIT(g->intr_enable_bit);
 	writel(val, pctrl->regs + g->intr_cfg_reg);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 77e9dd7..f06fb1f 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -138,6 +138,7 @@
  * struct pmic_gpio_pad - keep current GPIO settings
  * @base: Address base in SPMI device.
  * @irq: IRQ number which this GPIO generate.
+ * @gpio_idx: The index in GPIO's hardware number space (1-based)
  * @is_enabled: Set to false when GPIO should be put in high Z state.
  * @out_value: Cached pin output value
  * @have_buffer: Set to true if GPIO output could be configured in push-pull,
@@ -158,6 +159,7 @@
 struct pmic_gpio_pad {
 	u16		base;
 	int		irq;
+	int		gpio_idx;
 	bool		is_enabled;
 	bool		out_value;
 	bool		have_buffer;
@@ -179,6 +181,7 @@
 	struct regmap	*map;
 	struct pinctrl_dev *ctrl;
 	struct gpio_chip chip;
+	const char **gpio_groups;
 };
 
 static const struct pinconf_generic_params pmic_gpio_bindings[] = {
@@ -297,7 +300,9 @@
 					 const char *const **groups,
 					 unsigned *const num_qgroups)
 {
-	*groups = pmic_gpio_groups;
+	struct pmic_gpio_state *state = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups = state->gpio_groups;
 	*num_qgroups = pctldev->desc->npins;
 	return 0;
 }
@@ -637,7 +642,7 @@
 
 	pad = pctldev->desc->pins[pin].drv_data;
 
-	seq_printf(s, " gpio%-2d:", pin + PMIC_GPIO_PHYSICAL_OFFSET);
+	seq_printf(s, " gpio%-2d:", pad->gpio_idx);
 
 	val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_EN_CTL);
 
@@ -742,13 +747,29 @@
 			      const struct of_phandle_args *gpio_desc,
 			      u32 *flags)
 {
+	int i;
+	struct pmic_gpio_state *state = gpiochip_get_data(chip);
+	struct pinctrl_desc *desc = state->ctrl->desc;
+	struct pmic_gpio_pad *pad;
+
 	if (chip->of_gpio_n_cells < 2)
 		return -EINVAL;
 
 	if (flags)
 		*flags = gpio_desc->args[1];
 
-	return gpio_desc->args[0] - PMIC_GPIO_PHYSICAL_OFFSET;
+	for (i = 0; i < chip->ngpio; i++) {
+		pad = desc->pins[i].drv_data;
+		if (pad->gpio_idx == gpio_desc->args[0]) {
+			dev_dbg(state->dev, "gpio%-2d xlate to pin%-2d\n",
+						gpio_desc->args[0], i);
+			return i;
+		}
+	}
+
+	dev_err(state->dev, "Couldn't find pin for gpio %d\n",
+				gpio_desc->args[0]);
+	return -ENODEV;
 }
 
 static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
@@ -934,43 +955,124 @@
 	struct pinctrl_desc *pctrldesc;
 	struct pmic_gpio_pad *pad, *pads;
 	struct pmic_gpio_state *state;
-	int ret, npins, i;
-	u32 reg;
+	int ret, npins, ngpios, i, j, pin_idx;
+	int disallowed_count = 0;
+	u32 reg[2], start, size;
+	u32 *disallowed = NULL;
 
-	ret = of_property_read_u32(dev->of_node, "reg", &reg);
+	ret = of_property_read_u32_array(dev->of_node, "reg", reg, 2);
 	if (ret < 0) {
-		dev_err(dev, "missing base address");
+		dev_err(dev, "reg property reading failed\n");
 		return ret;
 	}
+	start = reg[0];
+	size = reg[1];
 
-	npins = platform_irq_count(pdev);
-	if (!npins)
+	ngpios = size / PMIC_GPIO_ADDRESS_RANGE;
+	if (ngpios == 0) {
+		dev_err(dev, "no gpios assigned\n");
+		return -ENODEV;
+	}
+
+	if (ngpios > ARRAY_SIZE(pmic_gpio_groups)) {
+		dev_err(dev, "reg property defines %d gpios, but only %d are allowed\n",
+				ngpios, (int)ARRAY_SIZE(pmic_gpio_groups));
 		return -EINVAL;
-	if (npins < 0)
-		return npins;
+	}
 
-	BUG_ON(npins > ARRAY_SIZE(pmic_gpio_groups));
+	if (of_find_property(dev->of_node, "qcom,gpios-disallowed",
+					&disallowed_count)) {
+		disallowed_count /= sizeof(u32);
+		if (disallowed_count == 0) {
+			dev_err(dev, "No data in gpios-disallowed\n");
+			return -EINVAL;
+		}
+
+		disallowed = kcalloc(disallowed_count, sizeof(u32), GFP_KERNEL);
+		if (disallowed == NULL)
+			return -ENOMEM;
+
+		ret = of_property_read_u32_array(dev->of_node,
+				"qcom,gpios-disallowed",
+				disallowed, disallowed_count);
+		if (ret < 0) {
+			dev_err(dev, "qcom,gpios-disallowed property reading failed, ret=%d\n",
+								ret);
+			goto err_free;
+		}
+
+		for (i = 0; i < disallowed_count; i++) {
+			if (disallowed[i] >= ngpios + PMIC_GPIO_PHYSICAL_OFFSET
+				|| disallowed[i] < PMIC_GPIO_PHYSICAL_OFFSET) {
+				dev_err(dev, "invalid gpio = %d specified in qcom,gpios-disallowed, supported values: %d to %d\n",
+					disallowed[i],
+					PMIC_GPIO_PHYSICAL_OFFSET,
+					ngpios - 1 + PMIC_GPIO_PHYSICAL_OFFSET);
+				ret = -EINVAL;
+				goto err_free;
+			}
+			for (j = 0; j < i; j++) {
+				if (disallowed[i] == disallowed[j]) {
+					dev_err(dev, "duplicate gpio = %d listed in qcom,gpios-disallowed\n",
+							disallowed[i]);
+					ret = -EINVAL;
+					goto err_free;
+				}
+			}
+			dev_dbg(dev, "gpio %d NOT supported\n", disallowed[i]);
+		}
+	} else {
+		disallowed_count = 0;
+	}
+
+	npins = ngpios - disallowed_count;
+	if (npins <= 0) {
+		dev_err(dev, "No pins assigned\n");
+		ret = -ENODEV;
+		goto err_free;
+	}
+	if (platform_irq_count(pdev) != npins) {
+		dev_err(dev, "%d IRQs defined but %d expected\n",
+				platform_irq_count(pdev), npins);
+		ret = -EINVAL;
+		goto err_free;
+	}
 
 	state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
-	if (!state)
-		return -ENOMEM;
+	if (!state) {
+		ret = -ENOMEM;
+		goto err_free;
+	}
 
 	platform_set_drvdata(pdev, state);
 
 	state->dev = &pdev->dev;
 	state->map = dev_get_regmap(dev->parent, NULL);
 
+	state->gpio_groups = devm_kcalloc(dev, sizeof(*state->gpio_groups),
+						npins, GFP_KERNEL);
+	if (!state->gpio_groups) {
+		ret = -ENOMEM;
+		goto err_free;
+	}
+
 	pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL);
-	if (!pindesc)
-		return -ENOMEM;
+	if (!pindesc) {
+		ret = -ENOMEM;
+		goto err_free;
+	}
 
 	pads = devm_kcalloc(dev, npins, sizeof(*pads), GFP_KERNEL);
-	if (!pads)
-		return -ENOMEM;
+	if (!pads) {
+		ret = -ENOMEM;
+		goto err_free;
+	}
 
 	pctrldesc = devm_kzalloc(dev, sizeof(*pctrldesc), GFP_KERNEL);
-	if (!pctrldesc)
-		return -ENOMEM;
+	if (!pctrldesc) {
+		ret = -ENOMEM;
+		goto err_free;
+	}
 
 	pctrldesc->pctlops = &pmic_gpio_pinctrl_ops;
 	pctrldesc->pmxops = &pmic_gpio_pinmux_ops;
@@ -984,22 +1086,42 @@
 #ifdef CONFIG_DEBUG_FS
 	pctrldesc->custom_conf_items = pmic_conf_items;
 #endif
+	for (pin_idx = 0, i = 0; i < ngpios; i++) {
+		for (j = 0; j < disallowed_count; j++) {
+			if (i + PMIC_GPIO_PHYSICAL_OFFSET == disallowed[j])
+				break;
+		}
+		if (j != disallowed_count)
+			continue;
 
-	for (i = 0; i < npins; i++, pindesc++) {
-		pad = &pads[i];
+		pad = &pads[pin_idx];
 		pindesc->drv_data = pad;
-		pindesc->number = i;
+		pindesc->number = pin_idx;
 		pindesc->name = pmic_gpio_groups[i];
 
-		pad->irq = platform_get_irq(pdev, i);
-		if (pad->irq < 0)
-			return pad->irq;
+		pad->gpio_idx = i + PMIC_GPIO_PHYSICAL_OFFSET;
+		pad->irq = platform_get_irq(pdev, pin_idx);
+		if (pad->irq < 0) {
+			dev_err(state->dev,
+				"failed to get irq for gpio %d (pin %d), ret=%d\n",
+					pad->gpio_idx, pin_idx, pad->irq);
+			ret = pad->irq;
+			goto err_free;
+		}
+		/* Every pin is a group */
+		state->gpio_groups[pin_idx] = pmic_gpio_groups[i];
 
-		pad->base = reg + i * PMIC_GPIO_ADDRESS_RANGE;
+		pad->base = start + i * PMIC_GPIO_ADDRESS_RANGE;
 
 		ret = pmic_gpio_populate(state, pad);
-		if (ret < 0)
-			return ret;
+		if (ret < 0) {
+			dev_err(state->dev,
+				"failed to populate gpio %d, ret=%d\n",
+							i, ret);
+			goto err_free;
+		}
+		pindesc++;
+		pin_idx++;
 	}
 
 	state->chip = pmic_gpio_gpio_template;
@@ -1011,25 +1133,29 @@
 	state->chip.can_sleep = false;
 
 	state->ctrl = devm_pinctrl_register(dev, pctrldesc, state);
-	if (IS_ERR(state->ctrl))
-		return PTR_ERR(state->ctrl);
+	if (IS_ERR(state->ctrl)) {
+		ret = PTR_ERR(state->ctrl);
+		dev_err(state->dev, "failed to register pinctrl device, ret=%d\n",
+							ret);
+		goto err_free;
+	}
 
 	ret = gpiochip_add_data(&state->chip, state);
 	if (ret) {
-		dev_err(state->dev, "can't add gpio chip\n");
-		return ret;
+		dev_err(state->dev, "can't add gpio chip, ret=%d\n", ret);
+		goto err_free;
 	}
 
 	ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins);
 	if (ret) {
-		dev_err(dev, "failed to add pin range\n");
-		goto err_range;
+		dev_err(dev, "failed to add pin range\n, ret=%d\n", ret);
+		gpiochip_remove(&state->chip);
+		goto err_free;
 	}
 
-	return 0;
+err_free:
+	kfree(disallowed);
 
-err_range:
-	gpiochip_remove(&state->chip);
 	return ret;
 }
 
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index fff8966..b7685cb 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -415,6 +415,16 @@
 {
 	uint32_t val;
 
+	/*
+	 * allocate new events for this channel first
+	 * before submitting the new TREs.
+	 * for TO_GSI channels the event ring doorbell is rang as part of
+	 * interrupt handling.
+	 */
+	if (ctx->evtr && ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
+		gsi_ring_evt_doorbell(ctx->evtr);
+	ctx->ring.wp = ctx->ring.wp_local;
+
 	/* write order MUST be MSB followed by LSB */
 	val = ((ctx->ring.wp_local >> 32) &
 			GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
@@ -470,8 +480,8 @@
 			cntr = 0;
 			rp = gsi_readl(gsi_ctx->base +
 				GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(i, ee));
-			rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
-				GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(i, ee))) << 32;
+			rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
+
 			ctx->ring.rp = rp;
 			while (ctx->ring.rp_local != rp) {
 				++cntr;
@@ -1529,6 +1539,7 @@
 static int gsi_validate_channel_props(struct gsi_chan_props *props)
 {
 	uint64_t ra;
+	uint64_t last;
 
 	if (props->ch_id >= gsi_ctx->max_ch) {
 		GSIERR("ch_id %u invalid\n", props->ch_id);
@@ -1556,6 +1567,17 @@
 		return -GSI_STATUS_INVALID_PARAMS;
 	}
 
+	last = props->ring_base_addr + props->ring_len - props->re_size;
+
+	/* MSB should stay same within the ring */
+	if ((props->ring_base_addr & 0xFFFFFFFF00000000ULL) !=
+	    (last & 0xFFFFFFFF00000000ULL)) {
+		GSIERR("MSB is not fixed on ring base 0x%llx size 0x%x\n",
+			props->ring_base_addr,
+			props->ring_len);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
 	if (props->prot == GSI_CHAN_PROT_GPI &&
 			!props->ring_base_vaddr) {
 		GSIERR("protocol %u requires ring base VA\n", props->prot);
@@ -2128,29 +2150,22 @@
 		uint16_t *num_free_re)
 {
 	uint16_t start;
-	uint16_t start_hw;
 	uint16_t end;
 	uint64_t rp;
-	uint64_t rp_hw;
 	int ee = gsi_ctx->per.ee;
 	uint16_t used;
-	uint16_t used_hw;
-
-	rp_hw = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
-	rp_hw |= ((uint64_t)gsi_readl(gsi_ctx->base +
-		GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee)))
-		<< 32;
 
 	if (!ctx->evtr) {
-		rp = rp_hw;
+		rp = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
+		rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
+
 		ctx->ring.rp = rp;
 	} else {
 		rp = ctx->ring.rp_local;
 	}
 
 	start = gsi_find_idx_from_addr(&ctx->ring, rp);
-	start_hw = gsi_find_idx_from_addr(&ctx->ring, rp_hw);
 	end = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
 
 	if (end >= start)
@@ -2158,13 +2173,7 @@
 	else
 		used = ctx->ring.max_num_elem + 1 - (start - end);
 
-	if (end >= start_hw)
-		used_hw = end - start_hw;
-	else
-		used_hw = ctx->ring.max_num_elem + 1 - (start_hw - end);
-
 	*num_free_re = ctx->ring.max_num_elem - used;
-	gsi_update_ch_dp_stats(ctx, used_hw);
 }
 
 int gsi_query_channel_info(unsigned long chan_hdl,
@@ -2274,14 +2283,12 @@
 
 	rp = gsi_readl(gsi_ctx->base +
 		GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
-	rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
-		GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee))) << 32;
+	rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
 	ctx->ring.rp = rp;
 
 	wp = gsi_readl(gsi_ctx->base +
 		GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
-	wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
-		GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee))) << 32;
+	wp |= ctx->ring.wp & 0xFFFFFFFF00000000;
 	ctx->ring.wp = wp;
 
 	if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
@@ -2353,6 +2360,8 @@
 			tre.re_type = GSI_RE_XFER;
 		} else if (xfer[i].type == GSI_XFER_ELEM_IMME_CMD) {
 			tre.re_type = GSI_RE_IMMD_CMD;
+		} else if (xfer[i].type == GSI_XFER_ELEM_NOP) {
+			tre.re_type = GSI_RE_NOP;
 		} else {
 			GSIERR("chan_hdl=%lu bad RE type=%u\n", chan_hdl,
 				xfer[i].type);
@@ -2420,6 +2429,9 @@
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
 
+	if (ctx->ring.wp == ctx->ring.wp_local)
+		return GSI_STATUS_SUCCESS;
+
 	gsi_ring_chan_doorbell(ctx);
 
 	return GSI_STATUS_SUCCESS;
@@ -2457,19 +2469,22 @@
 	}
 
 	spin_lock_irqsave(&ctx->evtr->ring.slock, flags);
-	rp = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
-	rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
-		GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(ctx->evtr->id, ee))) << 32;
-	ctx->evtr->ring.rp = rp;
-	if (rp == ctx->evtr->ring.rp_local) {
+	if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) {
+		/* update rp to see of we have anything new to process */
+		rp = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
+		rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
+
+		ctx->evtr->ring.rp = rp;
+	}
+
+	if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) {
 		spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
 		ctx->stats.poll_empty++;
 		return GSI_STATUS_POLL_EMPTY;
 	}
 
 	gsi_process_evt_re(ctx->evtr, notify, false);
-	gsi_ring_evt_doorbell(ctx->evtr);
 	spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
 	ctx->stats.poll_ok++;
 
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index f53a4bd..32fb178 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -209,6 +209,7 @@
 enum gsi_re_type {
 	GSI_RE_XFER = 0x2,
 	GSI_RE_IMMD_CMD = 0x3,
+	GSI_RE_NOP = 0x4,
 };
 
 struct __packed gsi_tre {
diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c
index 717c8917..b1d1dfa 100644
--- a/drivers/platform/msm/gsi/gsi_dbg.c
+++ b/drivers/platform/msm/gsi/gsi_dbg.c
@@ -490,11 +490,6 @@
 		goto error;
 	}
 
-	if (gsi_ctx->chan[ch_id].props.prot == GSI_CHAN_PROT_GPI) {
-		TERR("valid for non GPI channels only\n");
-		goto error;
-	}
-
 	if (gsi_ctx->chan[ch_id].enable_dp_stats == enable) {
 		TERR("ch_%d: already enabled/disabled\n", ch_id);
 		return -EFAULT;
@@ -631,7 +626,7 @@
 	else
 		used_hw = ctx->ring.max_num_elem + 1 - (start_hw - end_hw);
 
-	TERR("ch %d used %d\n", ctx->props.ch_id, used_hw);
+	TDBG("ch %d used %d\n", ctx->props.ch_id, used_hw);
 	gsi_update_ch_dp_stats(ctx, used_hw);
 }
 
@@ -641,7 +636,6 @@
 
 	for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) {
 		if (gsi_ctx->chan[ch_id].allocated &&
-		    gsi_ctx->chan[ch_id].props.prot != GSI_CHAN_PROT_GPI &&
 		    gsi_ctx->chan[ch_id].enable_dp_stats)
 			gsi_dbg_update_ch_dp_stats(&gsi_ctx->chan[ch_id]);
 	}
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 6dd371e..d45fa51 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -1635,6 +1635,25 @@
 EXPORT_SYMBOL(ipa_get_smem_restr_bytes);
 
 /**
+ * ipa_broadcast_wdi_quota_reach_ind() - quota reach
+ * @uint32_t fid: [in] input netdev ID
+ * @uint64_t num_bytes: [in] used bytes
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid,
+		uint64_t num_bytes)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_broadcast_wdi_quota_reach_ind,
+		fid, num_bytes);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_broadcast_wdi_quota_reach_ind);
+
+/**
  * ipa_uc_wdi_get_dbpa() - To retrieve
  * doorbell physical address of wlan pipes
  * @param:  [in/out] input/output parameters
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index 1b8e3d6..bfe1608 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -183,6 +183,9 @@
 
 	u16 (*ipa_get_smem_restr_bytes)(void);
 
+	int (*ipa_broadcast_wdi_quota_reach_ind)(uint32_t fid,
+		uint64_t num_bytes);
+
 	int (*ipa_uc_wdi_get_dbpa)(struct ipa_wdi_db_params *out);
 
 	int (*ipa_uc_reg_rdyCB)(struct ipa_wdi_uc_ready_params *param);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 20b73d8..7759c98 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -2845,6 +2845,13 @@
 		}
 	}
 
+	/* allocate the common PROD event ring */
+	if (ipa3_alloc_common_event_ring()) {
+		IPAERR("ipa3_alloc_common_event_ring failed.\n");
+		result = -EPERM;
+		goto fail_ch20_wa;
+	}
+
 	/* CMD OUT (AP->IPA) */
 	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
 	sys_in.client = IPA_CLIENT_APPS_CMD_PROD;
@@ -4239,6 +4246,52 @@
 	return 0;
 }
 
+static int ipa3_alloc_pkt_init(void)
+{
+	struct ipa_mem_buffer mem;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_imm_cmd_ip_packet_init cmd = {0};
+	int i;
+
+	cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
+		&cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct IMM cmd\n");
+		return -ENOMEM;
+	}
+
+	mem.size = cmd_pyld->len * ipa3_ctx->ipa_num_pipes;
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
+		&mem.phys_base, GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
+		ipahal_destroy_imm_cmd(cmd_pyld);
+		return -ENOMEM;
+	}
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+	memset(mem.base, 0, mem.size);
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		cmd.destination_pipe_index = i;
+		cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT,
+			&cmd, false);
+		if (!cmd_pyld) {
+			IPAERR("failed to construct IMM cmd\n");
+			dma_free_coherent(ipa3_ctx->pdev,
+				mem.size,
+				mem.base,
+				mem.phys_base);
+			return -ENOMEM;
+		}
+		memcpy(mem.base + i * cmd_pyld->len, cmd_pyld->data,
+			cmd_pyld->len);
+		ipa3_ctx->pkt_init_imm[i] = mem.phys_base + i * cmd_pyld->len;
+		ipahal_destroy_imm_cmd(cmd_pyld);
+	}
+
+	return 0;
+}
+
 /**
 * ipa3_pre_init() - Initialize the IPA Driver.
 * This part contains all initialization which doesn't require IPA HW, such
@@ -4648,6 +4701,13 @@
 		goto fail_create_apps_resource;
 	}
 
+	result = ipa3_alloc_pkt_init();
+	if (result) {
+		IPAERR("Failed to alloc pkt_init payload\n");
+		result = -ENODEV;
+		goto fail_create_apps_resource;
+	}
+
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)
 		ipa3_enable_dcd();
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 9bb3e0e..3e4bd79 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -69,6 +69,9 @@
 #define IPA_GSI_CH_20_WA_VIRT_CHAN 29
 
 #define IPA_DEFAULT_SYS_YELLOW_WM 32
+#define IPA_REPL_XFER_THRESH 10
+
+#define IPA_TX_SEND_COMPL_NOP_DELAY_NS (2 * 1000 * 1000)
 
 /*
  * The transport descriptor size was changed to GSI_CHAN_RE_SIZE_16B, but
@@ -183,106 +186,64 @@
 {
 	struct ipa3_tx_pkt_wrapper *tx_pkt;
 	struct ipa3_sys_context *sys;
+	struct ipa3_tx_pkt_wrapper *this_pkt;
 
 	tx_pkt = container_of(work, struct ipa3_tx_pkt_wrapper, work);
 	sys = tx_pkt->sys;
-
+	spin_lock_bh(&sys->spinlock);
+	this_pkt = list_first_entry(&sys->head_desc_list,
+		struct ipa3_tx_pkt_wrapper, link);
+	while (tx_pkt != this_pkt) {
+		spin_unlock_bh(&sys->spinlock);
+		ipa3_wq_write_done_common(sys, this_pkt);
+		spin_lock_bh(&sys->spinlock);
+		this_pkt = list_first_entry(&sys->head_desc_list,
+			struct ipa3_tx_pkt_wrapper, link);
+	}
+	spin_unlock_bh(&sys->spinlock);
 	ipa3_wq_write_done_common(sys, tx_pkt);
 }
 
-/**
- * ipa3_send_one() - Send a single descriptor
- * @sys:	system pipe context
- * @desc:	descriptor to send
- * @in_atomic:  whether caller is in atomic context
- *
- * - Allocate tx_packet wrapper
- * - transfer data to the IPA
- * - after the transfer was done the user will be notified via provided
- *   callback
- *
- * Return codes: 0: success, -EFAULT: failure
- */
-int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
-		bool in_atomic)
+
+static void ipa3_send_nop_desc(struct work_struct *work)
 {
+	struct ipa3_sys_context *sys = container_of(work,
+		struct ipa3_sys_context, work);
+	struct gsi_xfer_elem nop_xfer;
 	struct ipa3_tx_pkt_wrapper *tx_pkt;
-	struct gsi_xfer_elem gsi_xfer;
-	int result;
-	dma_addr_t dma_address;
-	u32 mem_flag = GFP_ATOMIC;
 
-	if (unlikely(!in_atomic))
-		mem_flag = GFP_KERNEL;
-
-	tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, mem_flag);
+	IPADBG_LOW("gsi send NOP for ch: %lu\n", sys->ep->gsi_chan_hdl);
+	tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, GFP_KERNEL);
 	if (!tx_pkt) {
 		IPAERR("failed to alloc tx wrapper\n");
-		goto fail_mem_alloc;
-	}
-
-	if (!desc->dma_address_valid) {
-		dma_address = dma_map_single(ipa3_ctx->pdev, desc->pyld,
-			desc->len, DMA_TO_DEVICE);
-	} else {
-		dma_address = desc->dma_address;
-		tx_pkt->no_unmap_dma = true;
-	}
-	if (!dma_address) {
-		IPAERR("failed to DMA wrap\n");
-		goto fail_dma_map;
+		queue_work(sys->wq, &sys->work);
+		return;
 	}
 
 	INIT_LIST_HEAD(&tx_pkt->link);
-	tx_pkt->type = desc->type;
-	tx_pkt->cnt = 1;    /* only 1 desc in this "set" */
-
-	tx_pkt->mem.phys_base = dma_address;
-	tx_pkt->mem.base = desc->pyld;
-	tx_pkt->mem.size = desc->len;
-	tx_pkt->sys = sys;
-	tx_pkt->callback = desc->callback;
-	tx_pkt->user1 = desc->user1;
-	tx_pkt->user2 = desc->user2;
-
-	memset(&gsi_xfer, 0, sizeof(gsi_xfer));
-	gsi_xfer.addr = dma_address;
-	gsi_xfer.flags |= GSI_XFER_FLAG_EOT;
-	gsi_xfer.xfer_user_data = tx_pkt;
-	if (desc->type == IPA_IMM_CMD_DESC) {
-		gsi_xfer.len = desc->opcode;
-		gsi_xfer.type = GSI_XFER_ELEM_IMME_CMD;
-	} else {
-		gsi_xfer.len = desc->len;
-		gsi_xfer.type = GSI_XFER_ELEM_DATA;
-	}
-
+	tx_pkt->cnt = 1;
 	INIT_WORK(&tx_pkt->work, ipa3_wq_write_done);
-
+	tx_pkt->no_unmap_dma = true;
+	tx_pkt->sys = sys;
 	spin_lock_bh(&sys->spinlock);
 	list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+	spin_unlock_bh(&sys->spinlock);
 
-	result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
-				&gsi_xfer, true);
-	if (result != GSI_STATUS_SUCCESS) {
-		IPAERR("GSI xfer failed.\n");
-		goto fail_transport_send;
+	memset(&nop_xfer, 0, sizeof(nop_xfer));
+	nop_xfer.type = GSI_XFER_ELEM_NOP;
+	nop_xfer.flags = GSI_XFER_FLAG_EOT;
+	nop_xfer.xfer_user_data = tx_pkt;
+	if (gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1, &nop_xfer, true)) {
+		IPAERR("gsi_queue_xfer for ch:%lu failed\n",
+			sys->ep->gsi_chan_hdl);
+		queue_work(sys->wq, &sys->work);
+		return;
 	}
+	sys->len_pending_xfer = 0;
 
-	spin_unlock_bh(&sys->spinlock);
-
-	return 0;
-
-fail_transport_send:
-	list_del(&tx_pkt->link);
-	spin_unlock_bh(&sys->spinlock);
-	dma_unmap_single(ipa3_ctx->pdev, dma_address, desc->len, DMA_TO_DEVICE);
-fail_dma_map:
-	kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
-fail_mem_alloc:
-	return -EFAULT;
 }
 
+
 /**
  * ipa3_send() - Send multiple descriptors in one HW transaction
  * @sys: system pipe context
@@ -437,19 +398,21 @@
 		}
 
 		if (i == (num_desc - 1)) {
-			gsi_xfer_elem_array[i].flags |=
-				GSI_XFER_FLAG_EOT;
-			if (sys->ep->client == IPA_CLIENT_APPS_WAN_PROD
-				&& sys->policy == IPA_POLICY_INTR_MODE)
+			if (!sys->use_comm_evt_ring) {
+				gsi_xfer_elem_array[i].flags |=
+					GSI_XFER_FLAG_EOT;
 				gsi_xfer_elem_array[i].flags |=
 					GSI_XFER_FLAG_BEI;
+			}
 			gsi_xfer_elem_array[i].xfer_user_data =
 				tx_pkt_first;
-		} else
-			gsi_xfer_elem_array[i].flags |=
-				GSI_XFER_FLAG_CHAIN;
+		} else {
+				gsi_xfer_elem_array[i].flags |=
+					GSI_XFER_FLAG_CHAIN;
+		}
 	}
 
+	IPADBG_LOW("ch:%lu queue xfer\n", sys->ep->gsi_chan_hdl);
 	result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, num_desc,
 			gsi_xfer_elem_array, true);
 	if (result != GSI_STATUS_SUCCESS) {
@@ -458,7 +421,18 @@
 	}
 	kfree(gsi_xfer_elem_array);
 
+	kfree(gsi_xfer_elem_array);
 	spin_unlock_bh(&sys->spinlock);
+
+	/* set the timer for sending the NOP descriptor */
+	if (sys->use_comm_evt_ring && !hrtimer_active(&sys->db_timer)) {
+		ktime_t time = ktime_set(0, IPA_TX_SEND_COMPL_NOP_DELAY_NS);
+
+		IPADBG_LOW("scheduling timer for ch %lu\n",
+			sys->ep->gsi_chan_hdl);
+		hrtimer_start(&sys->db_timer, time, HRTIMER_MODE_REL);
+	}
+
 	return 0;
 
 failure:
@@ -491,6 +465,25 @@
 }
 
 /**
+ * ipa3_send_one() - Send a single descriptor
+ * @sys:	system pipe context
+ * @desc:	descriptor to send
+ * @in_atomic:  whether caller is in atomic context
+ *
+ * - Allocate tx_packet wrapper
+ * - transfer data to the IPA
+ * - after the transfer was done the SPS will
+ *   notify the sending user via ipa_sps_irq_comp_tx()
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
+	bool in_atomic)
+{
+	return ipa3_send(sys, 1, desc, in_atomic);
+}
+
+/**
  * ipa3_transport_irq_cmd_ack - callback function which will be called by
  * the transport driver after an immediate command is complete.
  * @user1:	pointer to the descriptor of the transfer
@@ -771,15 +764,14 @@
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 	do {
 		cnt = ipa3_handle_rx_core(sys, true, true);
-		if (cnt == 0) {
+		if (cnt == 0)
 			inactive_cycles++;
-			trace_idle_sleep_enter3(sys->ep->client);
-			usleep_range(POLLING_MIN_SLEEP_RX,
-					POLLING_MAX_SLEEP_RX);
-			trace_idle_sleep_exit3(sys->ep->client);
-		} else {
+		else
 			inactive_cycles = 0;
-		}
+
+		trace_idle_sleep_enter3(sys->ep->client);
+		usleep_range(POLLING_MIN_SLEEP_RX, POLLING_MAX_SLEEP_RX);
+		trace_idle_sleep_exit3(sys->ep->client);
 	} while (inactive_cycles <= POLLING_INACTIVITY_RX);
 
 	trace_poll_to_intr3(sys->ep->client);
@@ -808,6 +800,15 @@
 		ipa3_handle_rx(sys);
 }
 
+enum hrtimer_restart ipa3_ring_doorbell_timer_fn(struct hrtimer *param)
+{
+	struct ipa3_sys_context *sys = container_of(param,
+		struct ipa3_sys_context, db_timer);
+
+	queue_work(sys->wq, &sys->work);
+	return HRTIMER_NORESTART;
+}
+
 /**
  * ipa3_setup_sys_pipe() - Setup an IPA GPI pipe and perform
  * IPA EP configuration
@@ -889,6 +890,9 @@
 		INIT_LIST_HEAD(&ep->sys->head_desc_list);
 		INIT_LIST_HEAD(&ep->sys->rcycl_list);
 		spin_lock_init(&ep->sys->spinlock);
+		hrtimer_init(&ep->sys->db_timer, CLOCK_MONOTONIC,
+			HRTIMER_MODE_REL);
+		ep->sys->db_timer.function = ipa3_ring_doorbell_timer_fn;
 	} else {
 		memset(ep->sys, 0, offsetof(struct ipa3_sys_context, ep));
 	}
@@ -1071,7 +1075,10 @@
 	}
 
 	/* free event ring only when it is present */
-	if (ep->gsi_evt_ring_hdl != ~0) {
+	if (ep->sys->use_comm_evt_ring) {
+		ipa3_ctx->gsi_evt_comm_ring_rem +=
+			ep->gsi_mem_info.chan_ring_len;
+	} else if (ep->gsi_evt_ring_hdl != ~0) {
 		result = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
 		if (result != GSI_STATUS_SUCCESS) {
 			IPAERR("Failed to reset evt ring: %d.\n",
@@ -1145,7 +1152,7 @@
 		dev_kfree_skb_any(skb);
 }
 
-static void ipa3_tx_cmd_comp(void *user1, int user2)
+void ipa3_tx_cmd_comp(void *user1, int user2)
 {
 	ipahal_destroy_imm_cmd(user1);
 }
@@ -1180,7 +1187,6 @@
 	struct ipa3_desc *desc;
 	struct ipa3_desc _desc[3];
 	int dst_ep_idx;
-	struct ipahal_imm_cmd_ip_packet_init cmd;
 	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
 	struct ipa3_sys_context *sys;
 	int src_ep_idx;
@@ -1267,54 +1273,58 @@
 
 	if (dst_ep_idx != -1) {
 		/* SW data path */
-		cmd.destination_pipe_index = dst_ep_idx;
-		cmd_pyld = ipahal_construct_imm_cmd(
-			IPA_IMM_CMD_IP_PACKET_INIT, &cmd, true);
-		if (unlikely(!cmd_pyld)) {
-			IPAERR("failed to construct ip_packet_init imm cmd\n");
-			goto fail_mem;
+		data_idx = 0;
+		if (sys->policy == IPA_POLICY_NOINTR_MODE) {
+			/*
+			 * For non-interrupt mode channel (where there is no
+			 * event ring) TAG STATUS are used for completion
+			 * notification. IPA will generate a status packet with
+			 * tag info as a result of the TAG STATUS command.
+			 */
+			desc[data_idx].opcode =
+				ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
+			desc[data_idx].type = IPA_IMM_CMD_DESC;
+			desc[data_idx].callback = ipa3_tag_destroy_imm;
+			data_idx++;
 		}
-
-		/* the tag field will be populated in ipa3_send() function */
-		desc[0].opcode = ipahal_imm_cmd_get_opcode(
-			IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
-		desc[0].type = IPA_IMM_CMD_DESC;
-		desc[0].callback = ipa3_tag_destroy_imm;
-		desc[1].opcode =
+		desc[data_idx].opcode =
 			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_INIT);
-		desc[1].pyld = cmd_pyld->data;
-		desc[1].len = cmd_pyld->len;
-		desc[1].type = IPA_IMM_CMD_DESC;
-		desc[1].callback = ipa3_tx_cmd_comp;
-		desc[1].user1 = cmd_pyld;
-		desc[2].pyld = skb->data;
-		desc[2].len = skb_headlen(skb);
-		desc[2].type = IPA_DATA_DESC_SKB;
-		desc[2].callback = ipa3_tx_comp_usr_notify_release;
-		desc[2].user1 = skb;
-		desc[2].user2 = (meta && meta->pkt_init_dst_ep_valid &&
+		desc[data_idx].dma_address_valid = true;
+		desc[data_idx].dma_address = ipa3_ctx->pkt_init_imm[dst_ep_idx];
+		desc[data_idx].type = IPA_IMM_CMD_DESC;
+		desc[data_idx].callback = NULL;
+		data_idx++;
+		desc[data_idx].pyld = skb->data;
+		desc[data_idx].len = skb_headlen(skb);
+		desc[data_idx].type = IPA_DATA_DESC_SKB;
+		desc[data_idx].callback = ipa3_tx_comp_usr_notify_release;
+		desc[data_idx].user1 = skb;
+		desc[data_idx].user2 = (meta && meta->pkt_init_dst_ep_valid &&
 				meta->pkt_init_dst_ep_remote) ?
 				src_ep_idx :
 				dst_ep_idx;
 		if (meta && meta->dma_address_valid) {
-			desc[2].dma_address_valid = true;
-			desc[2].dma_address = meta->dma_address;
+			desc[data_idx].dma_address_valid = true;
+			desc[data_idx].dma_address = meta->dma_address;
 		}
+		data_idx++;
 
 		for (f = 0; f < num_frags; f++) {
-			desc[3+f].frag = &skb_shinfo(skb)->frags[f];
-			desc[3+f].type = IPA_DATA_DESC_SKB_PAGED;
-			desc[3+f].len = skb_frag_size(desc[3+f].frag);
+			desc[data_idx + f].frag = &skb_shinfo(skb)->frags[f];
+			desc[data_idx + f].type = IPA_DATA_DESC_SKB_PAGED;
+			desc[data_idx + f].len =
+				skb_frag_size(desc[data_idx + f].frag);
 		}
 		/* don't free skb till frag mappings are released */
 		if (num_frags) {
-			desc[3+f-1].callback = desc[2].callback;
-			desc[3+f-1].user1 = desc[2].user1;
-			desc[3+f-1].user2 = desc[2].user2;
-			desc[2].callback = NULL;
+			desc[data_idx + f - 1].callback = desc[2].callback;
+			desc[data_idx + f - 1].user1 = desc[2].user1;
+			desc[data_idx + f - 1].user2 = desc[2].user2;
+			desc[data_idx - 1].callback = NULL;
 		}
 
-		if (ipa3_send(sys, num_frags + 3, desc, true)) {
+		if (ipa3_send(sys, num_frags + data_idx, desc, true)) {
 			IPAERR("fail to send skb %p num_frags %u SWP\n",
 				skb, num_frags);
 			goto fail_send;
@@ -1699,12 +1709,21 @@
 		gsi_xfer_elem_one.xfer_user_data = rx_pkt;
 
 		ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
-				1, &gsi_xfer_elem_one, true);
+				1, &gsi_xfer_elem_one, false);
 		if (ret != GSI_STATUS_SUCCESS) {
 			IPAERR("failed to provide buffer: %d\n",
 				ret);
 			goto fail_provide_rx_buffer;
 		}
+
+		/*
+		 * As doorbell is a costly operation, notify to GSI
+		 * of new buffers if threshold is exceeded
+		 */
+		if (++sys->len_pending_xfer >= IPA_REPL_XFER_THRESH) {
+			sys->len_pending_xfer = 0;
+			gsi_start_xfer(sys->ep->gsi_chan_hdl);
+		}
 	}
 
 	return;
@@ -1719,7 +1738,7 @@
 fail_skb_alloc:
 	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
 fail_kmem_cache_alloc:
-	if (rx_len_cached == 0)
+	if (rx_len_cached - sys->len_pending_xfer == 0)
 		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
 				msecs_to_jiffies(1));
 }
@@ -1794,12 +1813,21 @@
 		gsi_xfer_elem_one.xfer_user_data = rx_pkt;
 
 		ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
-				1, &gsi_xfer_elem_one, true);
+				1, &gsi_xfer_elem_one, false);
 		if (ret != GSI_STATUS_SUCCESS) {
 			IPAERR("failed to provide buffer: %d\n",
 				ret);
 			goto fail_provide_rx_buffer;
 		}
+
+		/*
+		 * As doorbell is a costly operation, notify to GSI
+		 * of new buffers if threshold is exceeded
+		 */
+		if (++sys->len_pending_xfer >= IPA_REPL_XFER_THRESH) {
+			sys->len_pending_xfer = 0;
+			gsi_start_xfer(sys->ep->gsi_chan_hdl);
+		}
 	}
 
 	return;
@@ -1815,7 +1843,7 @@
 	INIT_LIST_HEAD(&rx_pkt->link);
 	spin_unlock_bh(&sys->spinlock);
 fail_kmem_cache_alloc:
-	if (rx_len_cached == 0)
+	if (rx_len_cached - sys->len_pending_xfer == 0)
 		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
 		msecs_to_jiffies(1));
 }
@@ -1848,12 +1876,22 @@
 		gsi_xfer_elem_one.xfer_user_data = rx_pkt;
 
 		ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
-			&gsi_xfer_elem_one, true);
+			&gsi_xfer_elem_one, false);
 		if (ret != GSI_STATUS_SUCCESS) {
 			IPAERR("failed to provide buffer: %d\n",
 				ret);
 			break;
 		}
+
+		/*
+		 * As doorbell is a costly operation, notify to GSI
+		 * of new buffers if threshold is exceeded
+		 */
+		if (++sys->len_pending_xfer >= IPA_REPL_XFER_THRESH) {
+			sys->len_pending_xfer = 0;
+			gsi_start_xfer(sys->ep->gsi_chan_hdl);
+		}
+
 		rx_len_cached = ++sys->len;
 		curr = (curr + 1) % sys->repl.capacity;
 		/* ensure write is done before setting head index */
@@ -1863,7 +1901,8 @@
 
 	queue_work(sys->repl_wq, &sys->repl_work);
 
-	if (rx_len_cached <= IPA_DEFAULT_SYS_YELLOW_WM) {
+	if (rx_len_cached - sys->len_pending_xfer
+		<= IPA_DEFAULT_SYS_YELLOW_WM) {
 		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty);
 		else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
@@ -2641,6 +2680,7 @@
 	if (in->client == IPA_CLIENT_APPS_CMD_PROD ||
 		in->client == IPA_CLIENT_APPS_WAN_PROD) {
 		sys->policy = IPA_POLICY_INTR_MODE;
+		sys->use_comm_evt_ring = false;
 		return 0;
 	}
 
@@ -2652,12 +2692,12 @@
 	if (IPA_CLIENT_IS_PROD(in->client)) {
 		if (sys->ep->skip_ep_cfg) {
 			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			sys->use_comm_evt_ring = true;
 			atomic_set(&sys->curr_polling_state, 0);
 		} else {
-			sys->policy = IPA_POLICY_NOINTR_MODE;
-			sys->ep->status.status_en = true;
-			sys->ep->status.status_ep = ipa3_get_ep_mapping(
-					IPA_CLIENT_APPS_LAN_CONS);
+			sys->policy = IPA_POLICY_INTR_MODE;
+			sys->use_comm_evt_ring = true;
+			INIT_WORK(&sys->work, ipa3_send_nop_desc);
 		}
 	} else {
 		if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
@@ -3325,6 +3365,46 @@
 	}
 }
 
+int ipa3_alloc_common_event_ring(void)
+{
+	struct gsi_evt_ring_props gsi_evt_ring_props;
+	dma_addr_t evt_dma_addr;
+	int result;
+
+	memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
+	gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
+	gsi_evt_ring_props.intr = GSI_INTR_IRQ;
+	gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
+
+	gsi_evt_ring_props.ring_len = IPA_COMMON_EVENT_RING_SIZE;
+
+	gsi_evt_ring_props.ring_base_vaddr =
+		dma_alloc_coherent(ipa3_ctx->pdev,
+		gsi_evt_ring_props.ring_len, &evt_dma_addr, GFP_KERNEL);
+	if (!gsi_evt_ring_props.ring_base_vaddr) {
+		IPAERR("fail to dma alloc %u bytes\n",
+			gsi_evt_ring_props.ring_len);
+		return -ENOMEM;
+	}
+	gsi_evt_ring_props.ring_base_addr = evt_dma_addr;
+	gsi_evt_ring_props.int_modt = 0;
+	gsi_evt_ring_props.int_modc = 1; /* moderation comes from channel*/
+	gsi_evt_ring_props.rp_update_addr = 0;
+	gsi_evt_ring_props.exclusive = false;
+	gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb;
+	gsi_evt_ring_props.user_data = NULL;
+
+	result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
+		ipa3_ctx->gsi_dev_hdl, &ipa3_ctx->gsi_evt_comm_hdl);
+	if (result) {
+		IPAERR("gsi_alloc_evt_ring failed %d\n", result);
+		return result;
+	}
+	ipa3_ctx->gsi_evt_comm_ring_rem = IPA_COMMON_EVENT_RING_SIZE;
+
+	return 0;
+}
+
 static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
 	struct ipa3_ep_context *ep)
 {
@@ -3344,11 +3424,18 @@
 	evt_dma_addr = 0;
 	ep->gsi_evt_ring_hdl = ~0;
 	memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
-	/*
-	 * allocate event ring for all interrupt-policy
-	 * pipes and IPA consumers pipes
-	 */
-	if (ep->sys->policy != IPA_POLICY_NOINTR_MODE ||
+	if (ep->sys->use_comm_evt_ring) {
+		if (ipa3_ctx->gsi_evt_comm_ring_rem < 2 * in->desc_fifo_sz) {
+			IPAERR("not enough space in common event ring\n");
+			IPAERR("available: %d needed: %d\n",
+				ipa3_ctx->gsi_evt_comm_ring_rem,
+				2 * in->desc_fifo_sz);
+			WARN_ON(1);
+			return -EFAULT;
+		}
+		ipa3_ctx->gsi_evt_comm_ring_rem -= (2 * in->desc_fifo_sz);
+		ep->gsi_evt_ring_hdl = ipa3_ctx->gsi_evt_comm_hdl;
+	} else if (ep->sys->policy != IPA_POLICY_NOINTR_MODE ||
 	     IPA_CLIENT_IS_CONS(ep->client)) {
 		gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
 		gsi_evt_ring_props.intr = GSI_INTR_IRQ;
@@ -3375,10 +3462,7 @@
 			gsi_evt_ring_props.ring_base_vaddr;
 
 		gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT;
-		if (ep->client == IPA_CLIENT_APPS_WAN_PROD)
-			gsi_evt_ring_props.int_modc = 248;
-		else
-			gsi_evt_ring_props.int_modc = 1;
+		gsi_evt_ring_props.int_modc = 1;
 
 		IPADBG("client=%d moderation threshold cycles=%u cnt=%u\n",
 			ep->client,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 244c80c..7419a64 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -45,6 +45,7 @@
 #define IPA3_MAX_NUM_PIPES 31
 #define IPA_SYS_DESC_FIFO_SZ 0x800
 #define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
+#define IPA_COMMON_EVENT_RING_SIZE 0x7C00
 #define IPA_LAN_RX_HEADER_LENGTH (2)
 #define IPA_QMAP_HEADER_LENGTH (4)
 #define IPA_DL_CHECKSUM_LENGTH (8)
@@ -591,9 +592,11 @@
  */
 struct ipa3_sys_context {
 	u32 len;
+	u32 len_pending_xfer;
 	atomic_t curr_polling_state;
 	struct delayed_work switch_to_intr_work;
 	enum ipa3_sys_pipe_policy policy;
+	bool use_comm_evt_ring;
 	int (*pyld_hdlr)(struct sk_buff *skb, struct ipa3_sys_context *sys);
 	struct sk_buff * (*get_skb)(unsigned int len, gfp_t flags);
 	void (*free_skb)(struct sk_buff *skb);
@@ -616,6 +619,7 @@
 	struct list_head head_desc_list;
 	struct list_head rcycl_list;
 	spinlock_t spinlock;
+	struct hrtimer db_timer;
 	struct workqueue_struct *wq;
 	struct workqueue_struct *repl_wq;
 	struct ipa3_status_stats *status_stat;
@@ -702,6 +706,7 @@
  * @user1: cookie1 for above callback
  * @user2: cookie2 for above callback
  * @xfer_done: completion object for sync completion
+ * @skip_db_ring: specifies whether GSI doorbell should not be rang
  */
 struct ipa3_desc {
 	enum ipa3_desc_type type;
@@ -715,6 +720,7 @@
 	void *user1;
 	int user2;
 	struct completion xfer_done;
+	bool skip_db_ring;
 };
 
 /**
@@ -937,6 +943,10 @@
 	struct IpaHwStatsWDIInfoData_t *wdi_uc_stats_mmio;
 	void *priv;
 	ipa_uc_ready_cb uc_ready_cb;
+	/* for AP+STA stats update */
+#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
+	ipa_wdi_meter_notifier_cb stats_notify;
+#endif
 };
 
 /**
@@ -1129,6 +1139,8 @@
 	struct workqueue_struct *transport_power_mgmt_wq;
 	bool tag_process_before_gating;
 	struct ipa3_transport_pm transport_pm;
+	unsigned long gsi_evt_comm_hdl;
+	u32 gsi_evt_comm_ring_rem;
 	u32 clnt_hdl_cmd;
 	u32 clnt_hdl_data_in;
 	u32 clnt_hdl_data_out;
@@ -1161,6 +1173,7 @@
 	u32 curr_ipa_clk_rate;
 	bool q6_proxy_clk_vote_valid;
 	u32 ipa_num_pipes;
+	dma_addr_t pkt_init_imm[IPA3_MAX_NUM_PIPES];
 
 	struct ipa3_wlan_comm_memb wc_memb;
 
@@ -1611,6 +1624,7 @@
 int ipa3_suspend_wdi_pipe(u32 clnt_hdl);
 int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
 u16 ipa3_get_smem_restr_bytes(void);
+int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid, uint64_t num_bytes);
 int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
 		ipa_notify_cb notify, void *priv, u8 hdr_len,
 		struct ipa_ntn_conn_out_params *outp);
@@ -1651,6 +1665,9 @@
 
 bool ipa3_get_client_uplink(int pipe_idx);
 
+int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats);
+
+int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota);
 /*
  * IPADMA
  */
@@ -1963,4 +1980,5 @@
 struct device *ipa3_get_pdev(void);
 void ipa3_enable_dcd(void);
 void ipa3_disable_prefetch(enum ipa_client_type client);
+int ipa3_alloc_common_event_ring(void);
 #endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 343901f..19c3de4a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -884,7 +884,8 @@
 		IPAWANDBG("Quota reached indication on qmux(%d) Mbytes(%lu)\n",
 			  qmi_ind.apn.mux_id,
 			  (unsigned long int) qmi_ind.apn.num_Mbytes);
-		ipa3_broadcast_quota_reach_ind(qmi_ind.apn.mux_id);
+		ipa3_broadcast_quota_reach_ind(qmi_ind.apn.mux_id,
+			IPA_UPSTEAM_MODEM);
 	}
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index 3659a22..4fde261 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -181,7 +181,8 @@
 
 int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data);
 
-void ipa3_broadcast_quota_reach_ind(uint32_t mux_id);
+void ipa3_broadcast_quota_reach_ind(uint32_t mux_id,
+	enum ipa_upstream_type upstream_type);
 
 int rmnet_ipa3_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe
 	*data);
@@ -189,6 +190,8 @@
 int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
 	bool reset);
 
+int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data);
+
 int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
 	struct ipa_get_data_stats_resp_msg_v01 *resp);
 
@@ -283,7 +286,8 @@
 	return -EPERM;
 }
 
-static inline void ipa3_broadcast_quota_reach_ind(uint32_t mux_id) { }
+static inline void ipa3_broadcast_quota_reach_ind(uint32_t mux_id,
+	enum ipa_upstream_type upstream_type) { }
 
 static inline int ipa3_qmi_get_data_stats(
 	struct ipa_get_data_stats_req_msg_v01 *req,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 8f87baf..c69a3d0 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -13,6 +13,7 @@
 #include <linux/dmapool.h>
 #include <linux/delay.h>
 #include <linux/mm.h>
+#include "ipa_qmi_service.h"
 
 #define IPA_HOLB_TMR_DIS 0x0
 
@@ -1190,6 +1191,12 @@
 	ep->client_notify = in->sys.notify;
 	ep->priv = in->sys.priv;
 
+	/* for AP+STA stats update */
+	if (in->wdi_notify)
+		ipa3_ctx->uc_wdi_ctx.stats_notify = in->wdi_notify;
+	else
+		IPADBG("in->wdi_notify is null\n");
+
 	if (!ep->skip_ep_cfg) {
 		if (ipa3_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) {
 			IPAERR("fail to configure EP.\n");
@@ -1281,6 +1288,12 @@
 
 	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
 
+	/* for AP+STA stats update */
+	if (ipa3_ctx->uc_wdi_ctx.stats_notify)
+		ipa3_ctx->uc_wdi_ctx.stats_notify = NULL;
+	else
+		IPADBG("uc_wdi_ctx.stats_notify already null\n");
+
 uc_timeout:
 	return result;
 }
@@ -1626,6 +1639,23 @@
 	return result;
 }
 
+/**
+ * ipa_broadcast_wdi_quota_reach_ind() - quota reach
+ * @uint32_t fid: [in] input netdev ID
+ * @uint64_t num_bytes: [in] used bytes
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid,
+	uint64_t num_bytes)
+{
+	IPAERR("Quota reached indication on fid(%d) Mbytes(%lu)\n",
+			  fid,
+			  (unsigned long int) num_bytes);
+	ipa3_broadcast_quota_reach_ind(0, IPA_UPSTEAM_WLAN);
+	return 0;
+}
+
 int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
 {
 	int result = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 0519563..6cfe25d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -335,12 +335,13 @@
 	[IPA_3_0][IPA_CLIENT_APPS_LAN_PROD] = {
 			14, IPA_v3_0_GROUP_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
-			QMB_MASTER_SELECT_DDR},
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 11, 8, 16, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_APPS_WAN_PROD] = {
 			3, IPA_v3_0_GROUP_UL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 14, 11, 8, 16, IPA_EE_AP } },
+			{ 3, 5, 16, 32, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_APPS_CMD_PROD]	  = {
 			22, IPA_v3_0_GROUP_IMM_CMD, false,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
@@ -568,8 +569,8 @@
 	[IPA_3_5][IPA_CLIENT_A2_EMBEDDED_PROD]    = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_A2_TETHERED_PROD]    = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_APPS_LAN_PROD]   = {
-			8, IPA_v3_5_GROUP_UL_DL, true,
-			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			8, IPA_v3_5_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 8, 9, 8, 16, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_APPS_WAN_PROD] = {
@@ -1738,11 +1739,39 @@
 	}
 }
 
+/* ipa3_get_wlan_stats() - get ipa wifi stats
+ *
+ * Return value: success or failure
+ */
+int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats)
+{
+	if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
+		ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_GET_WDI_SAP_STATS,
+			wdi_sap_stats);
+	} else {
+		IPAERR("uc_wdi_ctx.stats_notify NULL\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota)
+{
+	if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
+		ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_SET_WIFI_QUOTA,
+			wdi_quota);
+	} else {
+		IPAERR("uc_wdi_ctx.stats_notify NULL\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
 /**
  * ipa3_get_client() - provide client mapping
  * @client: client type
  *
- * Return value: none
+ * Return value: client mapping enum
  */
 enum ipacm_client_enum ipa3_get_client(int pipe_idx)
 {
@@ -3742,6 +3771,8 @@
 	api_ctrl->ipa_suspend_wdi_pipe = ipa3_suspend_wdi_pipe;
 	api_ctrl->ipa_get_wdi_stats = ipa3_get_wdi_stats;
 	api_ctrl->ipa_get_smem_restr_bytes = ipa3_get_smem_restr_bytes;
+	api_ctrl->ipa_broadcast_wdi_quota_reach_ind =
+			ipa3_broadcast_wdi_quota_reach_ind;
 	api_ctrl->ipa_uc_wdi_get_dbpa = ipa3_uc_wdi_get_dbpa;
 	api_ctrl->ipa_uc_reg_rdyCB = ipa3_uc_reg_rdyCB;
 	api_ctrl->ipa_uc_dereg_rdyCB = ipa3_uc_dereg_rdyCB;
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index d747771..cf9775b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -52,6 +52,7 @@
 #define DEFAULT_OUTSTANDING_LOW 64
 
 #define IPA_WWAN_DEV_NAME "rmnet_ipa%d"
+#define IPA_UPSTEAM_WLAN_IFACE_NAME "wlan0"
 
 #define IPA_WWAN_RX_SOFTIRQ_THRESH 16
 
@@ -781,6 +782,22 @@
 	return MAX_NUM_OF_MUX_CHANNEL;
 }
 
+static enum ipa_upstream_type find_upstream_type(const char *upstreamIface)
+{
+	int i;
+
+	for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
+		if (strcmp(rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
+					upstreamIface) == 0)
+			return IPA_UPSTEAM_MODEM;
+	}
+
+	if (strcmp(IPA_UPSTEAM_WLAN_IFACE_NAME, upstreamIface) == 0)
+		return IPA_UPSTEAM_WLAN;
+	else
+		return MAX_NUM_OF_MUX_CHANNEL;
+}
+
 static int ipa3_wwan_register_to_ipa(int index)
 {
 	struct ipa_tx_intf tx_properties = {0};
@@ -2627,10 +2644,10 @@
 }
 
 /**
- * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
+ * rmnet_ipa_set_data_quota_modem() - Data quota setting IOCTL handler
  * @data - IOCTL data
  *
- * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * This function handles WAN_IOC_SET_DATA_QUOTA on modem interface.
  * It translates the given interface name to the Modem MUX ID and
  * sends the request of the quota to the IPA Modem driver via QMI.
  *
@@ -2639,12 +2656,17 @@
  * -EFAULT: Invalid interface name provided
  * other: See ipa_qmi_set_data_quota
  */
-int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data)
+static int rmnet_ipa3_set_data_quota_modem(
+	struct wan_ioctl_set_data_quota *data)
 {
 	u32 mux_id;
 	int index;
 	struct ipa_set_data_usage_quota_req_msg_v01 req;
 
+	/* stop quota */
+	if (!data->set_quota)
+		ipa3_qmi_stop_data_qouta();
+
 	index = find_vchannel_name_index(data->interface_name);
 	IPAWANERR("iface name %s, quota %lu\n",
 		  data->interface_name,
@@ -2668,6 +2690,64 @@
 	return ipa3_qmi_set_data_quota(&req);
 }
 
+static int rmnet_ipa3_set_data_quota_wifi(struct wan_ioctl_set_data_quota *data)
+{
+	struct ipa_set_wifi_quota wifi_quota;
+	int rc = 0;
+
+	memset(&wifi_quota, 0, sizeof(struct ipa_set_wifi_quota));
+	wifi_quota.set_quota = data->set_quota;
+	wifi_quota.quota_bytes = data->quota_mbytes;
+	IPAWANERR("iface name %s, quota %lu\n",
+		  data->interface_name,
+		  (unsigned long int) data->quota_mbytes);
+
+	rc = ipa3_set_wlan_quota(&wifi_quota);
+	/* check if wlan-fw takes this quota-set */
+	if (!wifi_quota.set_valid)
+		rc = -EFAULT;
+	return rc;
+}
+
+/**
+ * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * It translates the given interface name to the Modem MUX ID and
+ * sends the request of the quota to the IPA Modem driver via QMI.
+ *
+ * Return codes:
+ * 0: Success
+ * -EFAULT: Invalid interface name provided
+ * other: See ipa_qmi_set_data_quota
+ */
+int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data)
+{
+	enum ipa_upstream_type upstream_type;
+	int rc = 0;
+
+	/* get IPA backhaul type */
+	upstream_type = find_upstream_type(data->interface_name);
+
+	if (upstream_type == IPA_UPSTEAM_MAX) {
+		IPAWANERR("Wrong interface_name name %s\n",
+			data->interface_name);
+	} else if (upstream_type == IPA_UPSTEAM_WLAN) {
+		rc = rmnet_ipa3_set_data_quota_wifi(data);
+		if (rc) {
+			IPAWANERR("set quota on wifi failed\n");
+			return rc;
+		}
+	} else {
+		rc = rmnet_ipa3_set_data_quota_modem(data);
+		if (rc) {
+			IPAWANERR("set quota on modem failed\n");
+			return rc;
+		}
+	}
+	return rc;
+}
  /* rmnet_ipa_set_tether_client_pipe() -
  * @data - IOCTL data
  *
@@ -2732,8 +2812,61 @@
 	return 0;
 }
 
-int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
-	bool reset)
+static int rmnet_ipa3_query_tethering_stats_wifi(
+	struct wan_ioctl_query_tether_stats *data, bool reset)
+{
+	struct ipa_get_wdi_sap_stats *sap_stats;
+	int rc;
+
+	sap_stats = kzalloc(sizeof(struct ipa_get_wdi_sap_stats),
+			GFP_KERNEL);
+	if (!sap_stats) {
+		IPAWANERR("Can't allocate memory for stats message\n");
+		return -ENOMEM;
+	}
+	memset(sap_stats, 0, sizeof(struct ipa_get_wdi_sap_stats));
+
+	sap_stats->reset_stats = reset;
+	IPAWANDBG("reset the pipe stats %d\n", sap_stats->reset_stats);
+
+	rc = ipa3_get_wlan_stats(sap_stats);
+	if (rc) {
+		IPAWANERR("can't get ipa3_get_wlan_stats\n");
+		kfree(sap_stats);
+		return rc;
+	} else if (reset) {
+		kfree(sap_stats);
+		return 0;
+	}
+
+	if (sap_stats->stats_valid) {
+		data->ipv4_tx_packets = sap_stats->ipv4_tx_packets;
+		data->ipv4_tx_bytes = sap_stats->ipv4_tx_bytes;
+		data->ipv4_rx_packets = sap_stats->ipv4_rx_packets;
+		data->ipv4_rx_bytes = sap_stats->ipv4_rx_bytes;
+		data->ipv6_tx_packets = sap_stats->ipv6_tx_packets;
+		data->ipv6_tx_bytes = sap_stats->ipv6_tx_bytes;
+		data->ipv6_rx_packets = sap_stats->ipv6_rx_packets;
+		data->ipv6_rx_bytes = sap_stats->ipv6_rx_bytes;
+	}
+
+	IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
+		(unsigned long int) data->ipv4_rx_packets,
+		(unsigned long int) data->ipv6_rx_packets,
+		(unsigned long int) data->ipv4_rx_bytes,
+		(unsigned long int) data->ipv6_rx_bytes);
+	IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
+		(unsigned long int) data->ipv4_tx_packets,
+		(unsigned long  int) data->ipv6_tx_packets,
+		(unsigned long int) data->ipv4_tx_bytes,
+		(unsigned long int) data->ipv6_tx_bytes);
+
+	kfree(sap_stats);
+	return rc;
+}
+
+static int rmnet_ipa3_query_tethering_stats_modem(
+	struct wan_ioctl_query_tether_stats *data, bool reset)
 {
 	struct ipa_get_data_stats_req_msg_v01 *req;
 	struct ipa_get_data_stats_resp_msg_v01 *resp;
@@ -2820,7 +2953,7 @@
 			}
 		}
 	}
-	IPAWANDBG_LOW("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
+	IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
 		(unsigned long int) data->ipv4_rx_packets,
 		(unsigned long int) data->ipv6_rx_packets,
 		(unsigned long int) data->ipv4_rx_bytes,
@@ -2870,7 +3003,7 @@
 			}
 		}
 	}
-	IPAWANDBG_LOW("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
+	IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
 		(unsigned long int) data->ipv4_tx_packets,
 		(unsigned long  int) data->ipv6_tx_packets,
 		(unsigned long int) data->ipv4_tx_bytes,
@@ -2880,6 +3013,69 @@
 	return 0;
 }
 
+int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
+	bool reset)
+{
+	enum ipa_upstream_type upstream_type;
+	int rc = 0;
+
+	/* get IPA backhaul type */
+	upstream_type = find_upstream_type(data->upstreamIface);
+
+	if (upstream_type == IPA_UPSTEAM_MAX) {
+		IPAWANERR(" Wrong upstreamIface name %s\n",
+			data->upstreamIface);
+	} else if (upstream_type == IPA_UPSTEAM_WLAN) {
+		IPAWANDBG_LOW(" query wifi-backhaul stats\n");
+		rc = rmnet_ipa3_query_tethering_stats_wifi(
+			data, false);
+		if (rc) {
+			IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
+			return rc;
+		}
+	} else {
+		IPAWANDBG_LOW(" query modem-backhaul stats\n");
+		rc = rmnet_ipa3_query_tethering_stats_modem(
+			data, false);
+		if (rc) {
+			IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n");
+			return rc;
+		}
+	}
+	return rc;
+}
+
+int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
+{
+	enum ipa_upstream_type upstream_type;
+	int rc = 0;
+
+	/* get IPA backhaul type */
+	upstream_type = find_upstream_type(data->upstreamIface);
+
+	if (upstream_type == IPA_UPSTEAM_MAX) {
+		IPAWANERR(" Wrong upstreamIface name %s\n",
+			data->upstreamIface);
+	} else if (upstream_type == IPA_UPSTEAM_WLAN) {
+		IPAWANERR(" reset wifi-backhaul stats\n");
+		rc = rmnet_ipa3_query_tethering_stats_wifi(
+			NULL, true);
+		if (rc) {
+			IPAWANERR("reset WLAN stats failed\n");
+			return rc;
+		}
+	} else {
+		IPAWANERR(" reset modem-backhaul stats\n");
+		rc = rmnet_ipa3_query_tethering_stats_modem(
+			NULL, true);
+		if (rc) {
+			IPAWANERR("reset MODEM stats failed\n");
+			return rc;
+		}
+	}
+	return rc;
+}
+
 /**
  * ipa3_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota
  * @mux_id - The MUX ID on which the quota has been reached
@@ -2889,23 +3085,28 @@
  * on the specific interface which matches the mux_id has been reached.
  *
  */
-void ipa3_broadcast_quota_reach_ind(u32 mux_id)
+void ipa3_broadcast_quota_reach_ind(u32 mux_id,
+	enum ipa_upstream_type upstream_type)
 {
 	char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE];
 	char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
 	char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
 	char *envp[IPA_UEVENT_NUM_EVNP] = {
-		alert_msg, iface_name_l, iface_name_m, NULL };
+		alert_msg, iface_name_l, iface_name_m, NULL};
 	int res;
 	int index;
 
-	index = ipa3_find_mux_channel_index(mux_id);
-
-	if (index == MAX_NUM_OF_MUX_CHANNEL) {
-		IPAWANERR("%u is an mux ID\n", mux_id);
+	/* check upstream_type*/
+	if (upstream_type == IPA_UPSTEAM_MAX) {
+		IPAWANERR(" Wrong upstreamIface type %d\n", upstream_type);
 		return;
+	} else if (upstream_type == IPA_UPSTEAM_MODEM) {
+		index = ipa3_find_mux_channel_index(mux_id);
+		if (index == MAX_NUM_OF_MUX_CHANNEL) {
+			IPAWANERR("%u is an mux ID\n", mux_id);
+			return;
+		}
 	}
-
 	res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE,
 			"ALERT_NAME=%s", "quotaReachedAlert");
 	if (res >= IPA_QUOTA_REACH_ALERT_MAX_SIZE) {
@@ -2913,15 +3114,25 @@
 		return;
 	}
 	/* posting msg for L-release for CNE */
+	if (upstream_type == IPA_UPSTEAM_MODEM) {
 	res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
 	    "UPSTREAM=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
+	} else {
+		res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+			"UPSTREAM=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
+	}
 	if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
 		IPAWANERR("message too long (%d)", res);
 		return;
 	}
 	/* posting msg for M-release for CNE */
+	if (upstream_type == IPA_UPSTEAM_MODEM) {
 	res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
 	    "INTERFACE=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
+	} else {
+		res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+			"INTERFACE=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
+	}
 	if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
 		IPAWANERR("message too long (%d)", res);
 		return;
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
index 2abfe17..3ef17f6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -279,8 +279,9 @@
 			break;
 		}
 
-		if (rmnet_ipa3_query_tethering_stats(NULL, true)) {
-			IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n");
+		if (rmnet_ipa3_reset_tethering_stats(
+				(struct wan_ioctl_reset_tether_stats *)param)) {
+			IPAWANERR("WAN_IOC_RESET_TETHER_STATS failed\n");
 			retval = -EFAULT;
 			break;
 		}
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index b0cafa9..b8bdffd 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -2,3 +2,5 @@
 obj-$(CONFIG_POWER_RESET)	+= reset/
 obj-$(CONFIG_POWER_SUPPLY)	+= supply/
 obj-$(CONFIG_ARCH_QCOM)		+= qcom/
+obj-$(CONFIG_ARCH_QCOM)		+= qcom/
+obj-$(CONFIG_POWER_SUPPLY)	+= supply/
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 76806a0..2d5d9bf 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -511,4 +511,6 @@
 	  This driver provides support for the power supply features of
 	  AXP20x PMIC.
 
+source "drivers/power/supply/qcom/Kconfig"
+
 endif # POWER_SUPPLY
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 36c599d..cfbc992 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -71,4 +71,5 @@
 obj-$(CONFIG_CHARGER_TPS65090)	+= tps65090-charger.o
 obj-$(CONFIG_CHARGER_TPS65217)	+= tps65217_charger.o
 obj-$(CONFIG_AXP288_FUEL_GAUGE) += axp288_fuel_gauge.o
+obj-$(CONFIG_ARCH_QCOM)         += qcom/
 obj-$(CONFIG_AXP288_CHARGER)	+= axp288_charger.o
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 1480d9a..f6fa78f 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -44,20 +44,23 @@
 					  struct device_attribute *attr,
 					  char *buf) {
 	static char *type_text[] = {
-		"Unknown", "Battery", "UPS", "Mains", "USB",
-		"USB_DCP", "USB_CDP", "USB_ACA", "USB_C",
-		"USB_PD", "USB_PD_DRP"
+		"Unknown", "Battery", "UPS", "Mains", "USB", "USB_DCP",
+		"USB_CDP", "USB_ACA", "USB_HVDCP", "USB_HVDCP_3", "USB_PD",
+		"Wireless", "BMS", "Parallel", "Main", "Wipower",
+		"TYPEC", "TYPEC_UFP", "TYPEC_DFP"
 	};
 	static char *status_text[] = {
 		"Unknown", "Charging", "Discharging", "Not charging", "Full"
 	};
 	static char *charge_type[] = {
-		"Unknown", "N/A", "Trickle", "Fast"
+		"Unknown", "N/A", "Trickle", "Fast",
+		"Taper"
 	};
 	static char *health_text[] = {
 		"Unknown", "Good", "Overheat", "Dead", "Over voltage",
 		"Unspecified failure", "Cold", "Watchdog timer expire",
-		"Safety timer expire"
+		"Safety timer expire",
+		"Warm", "Cool", "Hot"
 	};
 	static char *technology_text[] = {
 		"Unknown", "NiMH", "Li-ion", "Li-poly", "LiFe", "NiCd",
@@ -102,30 +105,48 @@
 	}
 
 	if (off == POWER_SUPPLY_PROP_STATUS)
-		return sprintf(buf, "%s\n", status_text[value.intval]);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				status_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_CHARGE_TYPE)
-		return sprintf(buf, "%s\n", charge_type[value.intval]);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				charge_type[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_HEALTH)
-		return sprintf(buf, "%s\n", health_text[value.intval]);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				health_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_TECHNOLOGY)
-		return sprintf(buf, "%s\n", technology_text[value.intval]);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				technology_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_CAPACITY_LEVEL)
-		return sprintf(buf, "%s\n", capacity_level_text[value.intval]);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				capacity_level_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_TYPE)
-		return sprintf(buf, "%s\n", type_text[value.intval]);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				type_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_SCOPE)
-		return sprintf(buf, "%s\n", scope_text[value.intval]);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				scope_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_TYPEC_MODE)
-		return sprintf(buf, "%s\n", typec_text[value.intval]);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				typec_text[value.intval]);
 	else if (off == POWER_SUPPLY_PROP_TYPEC_POWER_ROLE)
-		return sprintf(buf, "%s\n", typec_pr_text[value.intval]);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				typec_pr_text[value.intval]);
+	else if (off == POWER_SUPPLY_PROP_DIE_HEALTH)
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				health_text[value.intval]);
+	else if (off == POWER_SUPPLY_PROP_CONNECTOR_HEALTH)
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				health_text[value.intval]);
 	else if (off >= POWER_SUPPLY_PROP_MODEL_NAME)
-		return sprintf(buf, "%s\n", value.strval);
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+				value.strval);
 
 	if (off == POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT)
-		return sprintf(buf, "%lld\n", value.int64val);
+		return scnprintf(buf, PAGE_SIZE, "%lld\n",
+				value.int64val);
 	else
-		return sprintf(buf, "%d\n", value.intval);
+		return scnprintf(buf, PAGE_SIZE, "%d\n",
+				value.intval);
 }
 
 static ssize_t power_supply_store_property(struct device *dev,
@@ -181,6 +202,8 @@
 	POWER_SUPPLY_ATTR(charge_full),
 	POWER_SUPPLY_ATTR(charge_empty),
 	POWER_SUPPLY_ATTR(charge_now),
+	POWER_SUPPLY_ATTR(charge_now_raw),
+	POWER_SUPPLY_ATTR(charge_now_error),
 	POWER_SUPPLY_ATTR(charge_avg),
 	POWER_SUPPLY_ATTR(charge_counter),
 	POWER_SUPPLY_ATTR(constant_charge_current),
@@ -200,6 +223,7 @@
 	POWER_SUPPLY_ATTR(capacity_alert_min),
 	POWER_SUPPLY_ATTR(capacity_alert_max),
 	POWER_SUPPLY_ATTR(capacity_level),
+	POWER_SUPPLY_ATTR(capacity_raw),
 	POWER_SUPPLY_ATTR(temp),
 	POWER_SUPPLY_ATTR(temp_max),
 	POWER_SUPPLY_ATTR(temp_min),
@@ -216,11 +240,51 @@
 	POWER_SUPPLY_ATTR(scope),
 	POWER_SUPPLY_ATTR(charge_term_current),
 	POWER_SUPPLY_ATTR(calibrate),
-	POWER_SUPPLY_ATTR(resistance),
 	/* Local extensions */
 	POWER_SUPPLY_ATTR(usb_hc),
 	POWER_SUPPLY_ATTR(usb_otg),
-	POWER_SUPPLY_ATTR(charge_enabled),
+	POWER_SUPPLY_ATTR(battery_charging_enabled),
+	POWER_SUPPLY_ATTR(charging_enabled),
+	POWER_SUPPLY_ATTR(step_charging_enabled),
+	POWER_SUPPLY_ATTR(step_charging_step),
+	POWER_SUPPLY_ATTR(pin_enabled),
+	POWER_SUPPLY_ATTR(input_suspend),
+	POWER_SUPPLY_ATTR(input_voltage_regulation),
+	POWER_SUPPLY_ATTR(input_current_max),
+	POWER_SUPPLY_ATTR(input_current_trim),
+	POWER_SUPPLY_ATTR(input_current_settled),
+	POWER_SUPPLY_ATTR(input_voltage_settled),
+	POWER_SUPPLY_ATTR(bypass_vchg_loop_debouncer),
+	POWER_SUPPLY_ATTR(charge_counter_shadow),
+	POWER_SUPPLY_ATTR(hi_power),
+	POWER_SUPPLY_ATTR(low_power),
+	POWER_SUPPLY_ATTR(temp_cool),
+	POWER_SUPPLY_ATTR(temp_warm),
+	POWER_SUPPLY_ATTR(system_temp_level),
+	POWER_SUPPLY_ATTR(resistance),
+	POWER_SUPPLY_ATTR(resistance_capacitive),
+	POWER_SUPPLY_ATTR(resistance_id),
+	POWER_SUPPLY_ATTR(resistance_now),
+	POWER_SUPPLY_ATTR(flash_current_max),
+	POWER_SUPPLY_ATTR(update_now),
+	POWER_SUPPLY_ATTR(esr_count),
+	POWER_SUPPLY_ATTR(buck_freq),
+	POWER_SUPPLY_ATTR(boost_current),
+	POWER_SUPPLY_ATTR(safety_timer_enabled),
+	POWER_SUPPLY_ATTR(charge_done),
+	POWER_SUPPLY_ATTR(flash_active),
+	POWER_SUPPLY_ATTR(flash_trigger),
+	POWER_SUPPLY_ATTR(force_tlim),
+	POWER_SUPPLY_ATTR(dp_dm),
+	POWER_SUPPLY_ATTR(input_current_limited),
+	POWER_SUPPLY_ATTR(input_current_now),
+	POWER_SUPPLY_ATTR(current_qnovo),
+	POWER_SUPPLY_ATTR(voltage_qnovo),
+	POWER_SUPPLY_ATTR(rerun_aicl),
+	POWER_SUPPLY_ATTR(cycle_count_id),
+	POWER_SUPPLY_ATTR(safety_timer_expired),
+	POWER_SUPPLY_ATTR(restricted_charging),
+	POWER_SUPPLY_ATTR(current_capability),
 	POWER_SUPPLY_ATTR(typec_mode),
 	POWER_SUPPLY_ATTR(typec_cc_orientation),
 	POWER_SUPPLY_ATTR(typec_power_role),
@@ -229,16 +293,26 @@
 	POWER_SUPPLY_ATTR(pd_in_hard_reset),
 	POWER_SUPPLY_ATTR(pd_current_max),
 	POWER_SUPPLY_ATTR(pd_usb_suspend_supported),
+	POWER_SUPPLY_ATTR(charger_temp),
+	POWER_SUPPLY_ATTR(charger_temp_max),
+	POWER_SUPPLY_ATTR(parallel_disable),
 	POWER_SUPPLY_ATTR(pe_start),
 	POWER_SUPPLY_ATTR(set_ship_mode),
-	POWER_SUPPLY_ATTR(boost_current),
-	POWER_SUPPLY_ATTR(force_tlim),
+	POWER_SUPPLY_ATTR(soc_reporting_ready),
+	POWER_SUPPLY_ATTR(debug_battery),
+	POWER_SUPPLY_ATTR(fcc_delta),
+	POWER_SUPPLY_ATTR(icl_reduction),
+	POWER_SUPPLY_ATTR(parallel_mode),
+	POWER_SUPPLY_ATTR(die_health),
+	POWER_SUPPLY_ATTR(connector_health),
+	POWER_SUPPLY_ATTR(ctm_current_max),
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_ATTR(charge_counter_ext),
 	/* Properties of type `const char *' */
 	POWER_SUPPLY_ATTR(model_name),
 	POWER_SUPPLY_ATTR(manufacturer),
 	POWER_SUPPLY_ATTR(serial_number),
+	POWER_SUPPLY_ATTR(battery_type),
 };
 
 static struct attribute *
diff --git a/drivers/power/supply/qcom/Kconfig b/drivers/power/supply/qcom/Kconfig
new file mode 100644
index 0000000..79ea712
--- /dev/null
+++ b/drivers/power/supply/qcom/Kconfig
@@ -0,0 +1,66 @@
+menu "Qualcomm Technologies Inc Charger and Fuel Gauge support"
+
+config QPNP_FG_GEN3
+	tristate "QPNP GEN3 fuel gauge driver"
+	depends on MFD_SPMI_PMIC
+	help
+	  Say Y here to enable the GEN3 Fuel Gauge driver. This adds support
+	  for battery fuel gauging and state of charge of battery connected to
+	  the fuel gauge. The state of charge is reported through a BMS power
+	  supply property and also sends uevents when the capacity is updated.
+
+config SMB135X_CHARGER
+	tristate "SMB135X Battery Charger"
+	depends on I2C
+	help
+	  Say Y to include support for SMB135X Battery Charger.
+	  SMB135X is a dual path switching mode charger capable of charging
+	  the battery with 3Amps of current.
+	  The driver supports charger enable/disable.
+	  The driver reports the charger status via the power supply framework.
+	  A charger status change triggers an IRQ via the device STAT pin.
+
+config SMB1351_USB_CHARGER
+	tristate "smb1351 usb charger (with VBUS detection)"
+	depends on I2C
+	help
+	 Say Y to enable support for the SMB1351 switching mode based charger.
+	 The driver supports charging control (enable/disable) and
+	 charge-current limiting. It also provides USB VBUS detection and
+	 notification support. The driver controls SMB1351 via I2C and
+	 supports device-tree interface.
+
+config QPNP_SMB2
+	tristate "SMB2 Battery Charger"
+	depends on MFD_SPMI_PMIC
+	help
+	  Say Y to enables support for the SMB2 charging peripheral.
+	  The QPNP SMB2 charger driver supports the charger peripheral
+	  present in the PMICOBALT chip.
+	  The power supply framework is used to communicate battery and
+	  usb properties to userspace and other driver consumers such
+	  as fuel gauge, USB, and USB-PD.
+	  VBUS and VCONN regulators are registered for supporting OTG,
+	  and powered Type-C cables respectively.
+
+config SMB138X_CHARGER
+	tristate "SMB138X Battery Charger"
+	depends on MFD_I2C_PMIC
+	help
+	  Say Y to include support for SMB138X Battery Charger.
+	  SMB1380 is a dual phase 6A battery charger, and SMB1381 is a single
+	  phase 5A battery charger.
+	  The driver supports charger enable/disable.
+	  The driver reports the charger status via the power supply framework.
+	  A charger status change triggers an IRQ via the device STAT pin.
+
+config QPNP_QNOVO
+	bool "QPNP QNOVO driver"
+	depends on MFD_SPMI_PMIC
+	help
+	  Say Y here to enable the Qnovo pulse charging engine. Qnovo driver
+	  accepts pulse parameters via sysfs entries and programs the hardware
+	  module. It also allows userspace code to read diagnostics of voltage
+	  and current measured during certain phases of the pulses.
+
+endmenu
diff --git a/drivers/power/supply/qcom/Makefile b/drivers/power/supply/qcom/Makefile
new file mode 100644
index 0000000..171444f
--- /dev/null
+++ b/drivers/power/supply/qcom/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_QPNP_FG_GEN3)     += qpnp-fg-gen3.o fg-memif.o fg-util.o
+obj-$(CONFIG_SMB135X_CHARGER)   += smb135x-charger.o pmic-voter.o
+obj-$(CONFIG_SMB1351_USB_CHARGER) += smb1351-charger.o pmic-voter.o battery.o
+obj-$(CONFIG_QPNP_SMB2)		+= qpnp-smb2.o smb-lib.o pmic-voter.o storm-watch.o battery.o
+obj-$(CONFIG_SMB138X_CHARGER)	+= smb138x-charger.o smb-lib.o pmic-voter.o storm-watch.o battery.o
+obj-$(CONFIG_QPNP_QNOVO)	+= qpnp-qnovo.o battery.o
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
new file mode 100644
index 0000000..3659b92
--- /dev/null
+++ b/drivers/power/supply/qcom/battery.c
@@ -0,0 +1,936 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "QCOM-BATT: %s: " fmt, __func__
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/power_supply.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/printk.h>
+#include <linux/pm_wakeup.h>
+#include <linux/slab.h>
+#include "pmic-voter.h"
+
+#define DRV_MAJOR_VERSION	1
+#define DRV_MINOR_VERSION	0
+
+#define CHG_STATE_VOTER			"CHG_STATE_VOTER"
+#define TAPER_END_VOTER			"TAPER_END_VOTER"
+#define PL_TAPER_EARLY_BAD_VOTER	"PL_TAPER_EARLY_BAD_VOTER"
+#define PARALLEL_PSY_VOTER		"PARALLEL_PSY_VOTER"
+#define PL_HW_ABSENT_VOTER		"PL_HW_ABSENT_VOTER"
+#define PL_VOTER			"PL_VOTER"
+#define RESTRICT_CHG_VOTER		"RESTRICT_CHG_VOTER"
+
+struct pl_data {
+	int			pl_mode;
+	int			slave_pct;
+	int			taper_pct;
+	int			slave_fcc_ua;
+	int			restricted_current;
+	bool			restricted_charging_enabled;
+	struct votable		*fcc_votable;
+	struct votable		*fv_votable;
+	struct votable		*pl_disable_votable;
+	struct votable		*pl_awake_votable;
+	struct votable		*hvdcp_hw_inov_dis_votable;
+	struct work_struct	status_change_work;
+	struct work_struct	pl_disable_forever_work;
+	struct delayed_work	pl_taper_work;
+	struct power_supply	*main_psy;
+	struct power_supply	*pl_psy;
+	struct power_supply	*batt_psy;
+	int			charge_type;
+	int			main_settled_ua;
+	int			pl_settled_ua;
+	struct class		qcom_batt_class;
+	struct wakeup_source	*pl_ws;
+	struct notifier_block	nb;
+};
+
+struct pl_data *the_chip;
+
+enum print_reason {
+	PR_PARALLEL	= BIT(0),
+};
+
+static int debug_mask;
+module_param_named(debug_mask, debug_mask, int, 0600);
+
+#define pl_dbg(chip, reason, fmt, ...)				\
+	do {								\
+		if (debug_mask & (reason))				\
+			pr_info(fmt, ##__VA_ARGS__);	\
+		else							\
+			pr_debug(fmt, ##__VA_ARGS__);		\
+	} while (0)
+
+enum {
+	VER = 0,
+	SLAVE_PCT,
+	RESTRICT_CHG_ENABLE,
+	RESTRICT_CHG_CURRENT,
+};
+
+/*******
+ * ICL *
+ ********/
+static void split_settled(struct pl_data *chip)
+{
+	int slave_icl_pct;
+	int slave_ua = 0, main_settled_ua = 0;
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	/* TODO some parallel chargers do not have a fine ICL resolution. For
+	 * them implement a psy interface which returns the closest lower ICL
+	 * for desired split
+	 */
+
+	if ((chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN)
+		&& (chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+		return;
+
+	if (!chip->main_psy)
+		return;
+
+	if (!get_effective_result_locked(chip->pl_disable_votable)) {
+		/* read the aicl settled value */
+		rc = power_supply_get_property(chip->main_psy,
+			       POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+			return;
+		}
+		main_settled_ua = pval.intval;
+		/* slave gets 10 percent points less for ICL */
+		slave_icl_pct = max(0, chip->slave_pct - 10);
+		slave_ua = ((main_settled_ua + chip->pl_settled_ua)
+						* slave_icl_pct) / 100;
+	}
+
+	/* ICL_REDUCTION on main could be 0mA when pl is disabled */
+	pval.intval = slave_ua;
+	rc = power_supply_set_property(chip->main_psy,
+			POWER_SUPPLY_PROP_ICL_REDUCTION, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't change slave suspend state rc=%d\n", rc);
+		return;
+	}
+
+	/* set parallel's ICL  could be 0mA when pl is disabled */
+	pval.intval = slave_ua;
+	rc = power_supply_set_property(chip->pl_psy,
+			POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't set parallel icl, rc=%d\n", rc);
+		return;
+	}
+
+	/* main_settled_ua represents the total capability of adapter */
+	if (!chip->main_settled_ua)
+		chip->main_settled_ua = main_settled_ua;
+	chip->pl_settled_ua = slave_ua;
+}
+
+static ssize_t version_show(struct class *c, struct class_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d.%d\n",
+			DRV_MAJOR_VERSION, DRV_MINOR_VERSION);
+}
+
+/*************
+ * SLAVE PCT *
+ **************/
+static ssize_t slave_pct_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	struct pl_data *chip = container_of(c, struct pl_data,
+			qcom_batt_class);
+
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", chip->slave_pct);
+}
+
+static ssize_t slave_pct_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	struct pl_data *chip = container_of(c, struct pl_data,
+			qcom_batt_class);
+	unsigned long val;
+
+	if (kstrtoul(ubuf, 10, &val))
+		return -EINVAL;
+
+	chip->slave_pct = val;
+	rerun_election(chip->fcc_votable);
+	rerun_election(chip->fv_votable);
+	split_settled(chip);
+
+	return count;
+}
+
+/************************
+ * RESTRICTED CHARGIGNG *
+ ************************/
+static ssize_t restrict_chg_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	struct pl_data *chip = container_of(c, struct pl_data,
+			qcom_batt_class);
+
+	return snprintf(ubuf, PAGE_SIZE, "%d\n",
+			chip->restricted_charging_enabled);
+}
+
+static ssize_t restrict_chg_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	struct pl_data *chip = container_of(c, struct pl_data,
+			qcom_batt_class);
+	unsigned long val;
+
+	if (kstrtoul(ubuf, 10, &val))
+		return -EINVAL;
+
+	if (chip->restricted_charging_enabled == !!val)
+		goto no_change;
+
+	chip->restricted_charging_enabled = !!val;
+
+	vote(chip->fcc_votable, RESTRICT_CHG_VOTER,
+				chip->restricted_charging_enabled,
+				chip->restricted_current);
+
+no_change:
+	return count;
+}
+
+static ssize_t restrict_cur_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	struct pl_data *chip = container_of(c, struct pl_data,
+			qcom_batt_class);
+
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", chip->restricted_current);
+}
+
+static ssize_t restrict_cur_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	struct pl_data *chip = container_of(c, struct pl_data,
+			qcom_batt_class);
+	unsigned long val;
+
+	if (kstrtoul(ubuf, 10, &val))
+		return -EINVAL;
+
+	chip->restricted_current = val;
+
+	vote(chip->fcc_votable, RESTRICT_CHG_VOTER,
+				chip->restricted_charging_enabled,
+				chip->restricted_current);
+
+	return count;
+}
+
+static struct class_attribute pl_attributes[] = {
+	[VER]			= __ATTR_RO(version),
+	[SLAVE_PCT]		= __ATTR(parallel_pct, 0644,
+					slave_pct_show, slave_pct_store),
+	[RESTRICT_CHG_ENABLE]	= __ATTR(restricted_charging, 0644,
+					restrict_chg_show, restrict_chg_store),
+	[RESTRICT_CHG_CURRENT]	= __ATTR(restricted_current, 0644,
+					restrict_cur_show, restrict_cur_store),
+	__ATTR_NULL,
+};
+
+/***********
+ *  TAPER  *
+ ************/
+#define MINIMUM_PARALLEL_FCC_UA		500000
+#define PL_TAPER_WORK_DELAY_MS		100
+#define TAPER_RESIDUAL_PCT		75
+static void pl_taper_work(struct work_struct *work)
+{
+	struct pl_data *chip = container_of(work, struct pl_data,
+						pl_taper_work.work);
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	/* exit immediately if parallel is disabled */
+	if (get_effective_result(chip->pl_disable_votable)) {
+		pl_dbg(chip, PR_PARALLEL, "terminating parallel not in progress\n");
+		goto done;
+	}
+
+	pl_dbg(chip, PR_PARALLEL, "entering parallel taper work slave_fcc = %d\n",
+			chip->slave_fcc_ua);
+	if (chip->slave_fcc_ua < MINIMUM_PARALLEL_FCC_UA) {
+		pl_dbg(chip, PR_PARALLEL, "terminating parallel's share lower than 500mA\n");
+		vote(chip->pl_disable_votable, TAPER_END_VOTER, true, 0);
+		goto done;
+	}
+
+	rc = power_supply_get_property(chip->batt_psy,
+			       POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get batt charge type rc=%d\n", rc);
+		goto done;
+	}
+
+	chip->charge_type = pval.intval;
+	if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+		pl_dbg(chip, PR_PARALLEL, "master is taper charging; reducing slave FCC\n");
+
+		vote(chip->pl_awake_votable, TAPER_END_VOTER, true, 0);
+		/* Reduce the taper percent by 25 percent */
+		chip->taper_pct = chip->taper_pct * TAPER_RESIDUAL_PCT / 100;
+		rerun_election(chip->fcc_votable);
+		pl_dbg(chip, PR_PARALLEL, "taper entry scheduling work after %d ms\n",
+				PL_TAPER_WORK_DELAY_MS);
+		schedule_delayed_work(&chip->pl_taper_work,
+				msecs_to_jiffies(PL_TAPER_WORK_DELAY_MS));
+		return;
+	}
+
+	/*
+	 * Master back to Fast Charge, get out of this round of taper reduction
+	 */
+	pl_dbg(chip, PR_PARALLEL, "master is fast charging; waiting for next taper\n");
+
+done:
+	vote(chip->pl_awake_votable, TAPER_END_VOTER, false, 0);
+}
+
+/*********
+ *  FCC  *
+ **********/
+#define EFFICIENCY_PCT	80
+static void split_fcc(struct pl_data *chip, int total_ua,
+			int *master_ua, int *slave_ua)
+{
+	int rc, effective_total_ua, slave_limited_ua, hw_cc_delta_ua = 0,
+		icl_ua, adapter_uv, bcl_ua;
+	union power_supply_propval pval = {0, };
+
+	rc = power_supply_get_property(chip->main_psy,
+			       POWER_SUPPLY_PROP_FCC_DELTA, &pval);
+	if (rc < 0)
+		hw_cc_delta_ua = 0;
+	else
+		hw_cc_delta_ua = pval.intval;
+
+	bcl_ua = INT_MAX;
+	if (chip->pl_mode == POWER_SUPPLY_PL_USBMID_USBMID) {
+		rc = power_supply_get_property(chip->main_psy,
+			       POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+			return;
+		}
+		icl_ua = pval.intval;
+
+		rc = power_supply_get_property(chip->main_psy,
+			       POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get adaptive voltage rc=%d\n", rc);
+			return;
+		}
+		adapter_uv = pval.intval;
+
+		bcl_ua = div64_s64((s64)icl_ua * adapter_uv * EFFICIENCY_PCT,
+			(s64)get_effective_result(chip->fv_votable) * 100);
+	}
+
+	effective_total_ua = max(0, total_ua + hw_cc_delta_ua);
+	slave_limited_ua = min(effective_total_ua, bcl_ua);
+	*slave_ua = (slave_limited_ua * chip->slave_pct) / 100;
+	*slave_ua = (*slave_ua * chip->taper_pct) / 100;
+	/*
+	 * In USBIN_USBIN configuration with internal rsense parallel
+	 * charger's current goes through main charger's BATFET, keep
+	 * the main charger's FCC to the votable result.
+	 */
+	if (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+		*master_ua = max(0, total_ua);
+	else
+		*master_ua = max(0, total_ua - *slave_ua);
+}
+
+static int pl_fcc_vote_callback(struct votable *votable, void *data,
+			int total_fcc_ua, const char *client)
+{
+	struct pl_data *chip = data;
+	union power_supply_propval pval = {0, };
+	int rc, master_fcc_ua = total_fcc_ua, slave_fcc_ua = 0;
+
+	if (total_fcc_ua < 0)
+		return 0;
+
+	if (!chip->main_psy)
+		return 0;
+
+	if (chip->batt_psy) {
+		rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CURRENT_QNOVO,
+			&pval);
+		if (rc < 0) {
+			pr_err("Couldn't get qnovo fcc, rc=%d\n", rc);
+			return rc;
+		}
+
+		if (pval.intval != -EINVAL)
+			total_fcc_ua = pval.intval;
+	}
+
+	if (chip->pl_mode == POWER_SUPPLY_PL_NONE
+	    || get_effective_result_locked(chip->pl_disable_votable)) {
+		pval.intval = total_fcc_ua;
+		rc = power_supply_set_property(chip->main_psy,
+				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+				&pval);
+		if (rc < 0)
+			pr_err("Couldn't set main fcc, rc=%d\n", rc);
+		return rc;
+	}
+
+	split_fcc(chip, total_fcc_ua, &master_fcc_ua, &slave_fcc_ua);
+	pval.intval = slave_fcc_ua;
+	rc = power_supply_set_property(chip->pl_psy,
+			POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't set parallel fcc, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->slave_fcc_ua = slave_fcc_ua;
+
+	pval.intval = master_fcc_ua;
+	rc = power_supply_set_property(chip->main_psy,
+			POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+	if (rc < 0) {
+		pr_err("Could not set main fcc, rc=%d\n", rc);
+		return rc;
+	}
+
+	pl_dbg(chip, PR_PARALLEL, "master_fcc=%d slave_fcc=%d distribution=(%d/%d)\n",
+		   master_fcc_ua, slave_fcc_ua,
+		   (master_fcc_ua * 100) / total_fcc_ua,
+		   (slave_fcc_ua * 100) / total_fcc_ua);
+
+	return 0;
+}
+
+#define PARALLEL_FLOAT_VOLTAGE_DELTA_UV 50000
+static int pl_fv_vote_callback(struct votable *votable, void *data,
+			int fv_uv, const char *client)
+{
+	struct pl_data *chip = data;
+	union power_supply_propval pval = {0, };
+	int rc = 0;
+	int effective_fv_uv = fv_uv;
+
+	if (fv_uv < 0)
+		return 0;
+
+	if (!chip->main_psy)
+		return 0;
+
+	if (chip->batt_psy) {
+		rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_VOLTAGE_QNOVO,
+			&pval);
+		if (rc < 0) {
+			pr_err("Couldn't get qnovo fv, rc=%d\n", rc);
+			return rc;
+		}
+
+		if (pval.intval != -EINVAL)
+			effective_fv_uv = pval.intval;
+	}
+
+	pval.intval = effective_fv_uv;
+
+	rc = power_supply_set_property(chip->main_psy,
+			POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't set main fv, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chip->pl_mode != POWER_SUPPLY_PL_NONE) {
+		pval.intval += PARALLEL_FLOAT_VOLTAGE_DELTA_UV;
+		rc = power_supply_set_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't set float on parallel rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static void pl_disable_forever_work(struct work_struct *work)
+{
+	struct pl_data *chip = container_of(work,
+			struct pl_data, pl_disable_forever_work);
+
+	/* Disable Parallel charger forever */
+	vote(chip->pl_disable_votable, PL_HW_ABSENT_VOTER, true, 0);
+
+	/* Re-enable autonomous mode */
+	if (chip->hvdcp_hw_inov_dis_votable)
+		vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER, false, 0);
+}
+
+static int pl_disable_vote_callback(struct votable *votable,
+		void *data, int pl_disable, const char *client)
+{
+	struct pl_data *chip = data;
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	chip->taper_pct = 100;
+	chip->main_settled_ua = 0;
+	chip->pl_settled_ua = 0;
+
+	if (!pl_disable) { /* enable */
+		rc = power_supply_get_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+		if (rc == -ENODEV) {
+			/*
+			 * -ENODEV is returned only if parallel chip
+			 * is not present in the system.
+			 * Disable parallel charger forever.
+			 */
+			schedule_work(&chip->pl_disable_forever_work);
+			return rc;
+		}
+
+		rerun_election(chip->fv_votable);
+		rerun_election(chip->fcc_votable);
+		/*
+		 * Enable will be called with a valid pl_psy always. The
+		 * PARALLEL_PSY_VOTER keeps it disabled unless a pl_psy
+		 * is seen.
+		 */
+		pval.intval = 0;
+		rc = power_supply_set_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+		if (rc < 0)
+			pr_err("Couldn't change slave suspend state rc=%d\n",
+				rc);
+
+		if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+			|| (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+			split_settled(chip);
+		/*
+		 * we could have been enabled while in taper mode,
+		 *  start the taper work if so
+		 */
+		rc = power_supply_get_property(chip->batt_psy,
+				       POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get batt charge type rc=%d\n", rc);
+		} else {
+			if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+				pl_dbg(chip, PR_PARALLEL,
+					"pl enabled in Taper scheduing work\n");
+				schedule_delayed_work(&chip->pl_taper_work, 0);
+			}
+		}
+	} else {
+		if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+			|| (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+			split_settled(chip);
+
+		/* pl_psy may be NULL while in the disable branch */
+		if (chip->pl_psy) {
+			pval.intval = 1;
+			rc = power_supply_set_property(chip->pl_psy,
+					POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+			if (rc < 0)
+				pr_err("Couldn't change slave suspend state rc=%d\n",
+					rc);
+		}
+		rerun_election(chip->fcc_votable);
+		rerun_election(chip->fv_votable);
+	}
+
+	pl_dbg(chip, PR_PARALLEL, "parallel charging %s\n",
+		   pl_disable ? "disabled" : "enabled");
+
+	return 0;
+}
+
+static int pl_awake_vote_callback(struct votable *votable,
+			void *data, int awake, const char *client)
+{
+	struct pl_data *chip = data;
+
+	if (awake)
+		__pm_stay_awake(chip->pl_ws);
+	else
+		__pm_relax(chip->pl_ws);
+
+	pr_debug("client: %s awake: %d\n", client, awake);
+	return 0;
+}
+
+static bool is_main_available(struct pl_data *chip)
+{
+	if (!chip->main_psy)
+		chip->main_psy = power_supply_get_by_name("main");
+
+	if (!chip->main_psy)
+		return false;
+
+	return true;
+}
+
+static bool is_batt_available(struct pl_data *chip)
+{
+	if (!chip->batt_psy)
+		chip->batt_psy = power_supply_get_by_name("battery");
+
+	if (!chip->batt_psy)
+		return false;
+
+	return true;
+}
+
+static bool is_parallel_available(struct pl_data *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	if (chip->pl_psy)
+		return true;
+
+	chip->pl_psy = power_supply_get_by_name("parallel");
+	if (!chip->pl_psy)
+		return false;
+
+	rc = power_supply_get_property(chip->pl_psy,
+			       POWER_SUPPLY_PROP_PARALLEL_MODE, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get parallel mode from parallel rc=%d\n",
+				rc);
+		return false;
+	}
+	/*
+	 * Note that pl_mode will be updated to anything other than a _NONE
+	 * only after pl_psy is found. IOW pl_mode != _NONE implies that
+	 * pl_psy is present and valid.
+	 */
+	chip->pl_mode = pval.intval;
+
+	/* Disable autonomous votage increments for USBIN-USBIN */
+	if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+		|| (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT)) {
+		if (!chip->hvdcp_hw_inov_dis_votable)
+			chip->hvdcp_hw_inov_dis_votable =
+					find_votable("HVDCP_HW_INOV_DIS");
+		if (chip->hvdcp_hw_inov_dis_votable)
+			/* Read current pulse count */
+			vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER,
+					true, 0);
+		else
+			return false;
+	}
+
+	vote(chip->pl_disable_votable, PARALLEL_PSY_VOTER, false, 0);
+
+	return true;
+}
+
+static void handle_main_charge_type(struct pl_data *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	rc = power_supply_get_property(chip->batt_psy,
+			       POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get batt charge type rc=%d\n", rc);
+		return;
+	}
+
+	/* not fast/not taper state to disables parallel */
+	if ((pval.intval != POWER_SUPPLY_CHARGE_TYPE_FAST)
+		&& (pval.intval != POWER_SUPPLY_CHARGE_TYPE_TAPER)) {
+		vote(chip->pl_disable_votable, CHG_STATE_VOTER, true, 0);
+		chip->taper_pct = 100;
+		vote(chip->pl_disable_votable, TAPER_END_VOTER, false, 0);
+		vote(chip->pl_disable_votable, PL_TAPER_EARLY_BAD_VOTER,
+				false, 0);
+		chip->charge_type = pval.intval;
+		return;
+	}
+
+	/* handle taper charge entry */
+	if (chip->charge_type == POWER_SUPPLY_CHARGE_TYPE_FAST
+		&& (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER)) {
+		chip->charge_type = pval.intval;
+		pl_dbg(chip, PR_PARALLEL, "taper entry scheduling work\n");
+		schedule_delayed_work(&chip->pl_taper_work, 0);
+		return;
+	}
+
+	/* handle fast/taper charge entry */
+	if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER
+			|| pval.intval == POWER_SUPPLY_CHARGE_TYPE_FAST) {
+		pl_dbg(chip, PR_PARALLEL, "chg_state enabling parallel\n");
+		vote(chip->pl_disable_votable, CHG_STATE_VOTER, false, 0);
+		chip->charge_type = pval.intval;
+		return;
+	}
+
+	/* remember the new state only if it isn't any of the above */
+	chip->charge_type = pval.intval;
+}
+
+#define MIN_ICL_CHANGE_DELTA_UA		300000
+static void handle_settled_icl_change(struct pl_data *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	if (get_effective_result(chip->pl_disable_votable))
+		return;
+
+	if (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN
+			|| chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT) {
+		/*
+		 * call aicl split only when USBIN_USBIN and enabled
+		 * and if aicl changed
+		 */
+		rc = power_supply_get_property(chip->main_psy,
+				       POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+				       &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+			return;
+		}
+
+		/* If ICL change is small skip splitting */
+		if (abs((chip->main_settled_ua - chip->pl_settled_ua)
+				- pval.intval) > MIN_ICL_CHANGE_DELTA_UA)
+			split_settled(chip);
+	} else {
+		rerun_election(chip->fcc_votable);
+	}
+}
+
+static void handle_parallel_in_taper(struct pl_data *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	if (get_effective_result_locked(chip->pl_disable_votable))
+		return;
+
+	if (!chip->pl_psy)
+		return;
+
+	rc = power_supply_get_property(chip->pl_psy,
+			       POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get pl charge type rc=%d\n", rc);
+		return;
+	}
+
+	/*
+	 * if parallel is seen in taper mode ever, that is an anomaly and
+	 * we disable parallel charger
+	 */
+	if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+		vote(chip->pl_disable_votable, PL_TAPER_EARLY_BAD_VOTER,
+				true, 0);
+		return;
+	}
+}
+
+static void status_change_work(struct work_struct *work)
+{
+	struct pl_data *chip = container_of(work,
+			struct pl_data, status_change_work);
+
+	if (!is_main_available(chip))
+		return;
+
+	if (!is_batt_available(chip))
+		return;
+
+	is_parallel_available(chip);
+
+	handle_main_charge_type(chip);
+	handle_settled_icl_change(chip);
+	handle_parallel_in_taper(chip);
+}
+
+static int pl_notifier_call(struct notifier_block *nb,
+		unsigned long ev, void *v)
+{
+	struct power_supply *psy = v;
+	struct pl_data *chip = container_of(nb, struct pl_data, nb);
+
+	if (ev != PSY_EVENT_PROP_CHANGED)
+		return NOTIFY_OK;
+
+	if ((strcmp(psy->desc->name, "parallel") == 0)
+	    || (strcmp(psy->desc->name, "battery") == 0)
+	    || (strcmp(psy->desc->name, "main") == 0))
+		schedule_work(&chip->status_change_work);
+
+	return NOTIFY_OK;
+}
+
+static int pl_register_notifier(struct pl_data *chip)
+{
+	int rc;
+
+	chip->nb.notifier_call = pl_notifier_call;
+	rc = power_supply_reg_notifier(&chip->nb);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int pl_determine_initial_status(struct pl_data *chip)
+{
+	status_change_work(&chip->status_change_work);
+	return 0;
+}
+
+#define DEFAULT_RESTRICTED_CURRENT_UA	1000000
+static int pl_init(void)
+{
+	struct pl_data *chip;
+	int rc = 0;
+
+	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+	chip->slave_pct = 50;
+	chip->restricted_current = DEFAULT_RESTRICTED_CURRENT_UA;
+
+	chip->pl_ws = wakeup_source_register("qcom-battery");
+	if (!chip->pl_ws)
+		goto cleanup;
+
+	chip->fcc_votable = create_votable("FCC", VOTE_MIN,
+					pl_fcc_vote_callback,
+					chip);
+	if (IS_ERR(chip->fcc_votable)) {
+		rc = PTR_ERR(chip->fcc_votable);
+		goto release_wakeup_source;
+	}
+
+	chip->fv_votable = create_votable("FV", VOTE_MAX,
+					pl_fv_vote_callback,
+					chip);
+	if (IS_ERR(chip->fv_votable)) {
+		rc = PTR_ERR(chip->fv_votable);
+		goto destroy_votable;
+	}
+
+	chip->pl_disable_votable = create_votable("PL_DISABLE", VOTE_SET_ANY,
+					pl_disable_vote_callback,
+					chip);
+	if (IS_ERR(chip->pl_disable_votable)) {
+		rc = PTR_ERR(chip->pl_disable_votable);
+		goto destroy_votable;
+	}
+	vote(chip->pl_disable_votable, CHG_STATE_VOTER, true, 0);
+	vote(chip->pl_disable_votable, TAPER_END_VOTER, false, 0);
+	vote(chip->pl_disable_votable, PARALLEL_PSY_VOTER, true, 0);
+
+	chip->pl_awake_votable = create_votable("PL_AWAKE", VOTE_SET_ANY,
+					pl_awake_vote_callback,
+					chip);
+	if (IS_ERR(chip->pl_awake_votable)) {
+		rc = PTR_ERR(chip->pl_disable_votable);
+		goto destroy_votable;
+	}
+
+	INIT_WORK(&chip->status_change_work, status_change_work);
+	INIT_DELAYED_WORK(&chip->pl_taper_work, pl_taper_work);
+	INIT_WORK(&chip->pl_disable_forever_work, pl_disable_forever_work);
+
+	rc = pl_register_notifier(chip);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		goto unreg_notifier;
+	}
+
+	rc = pl_determine_initial_status(chip);
+	if (rc < 0) {
+		pr_err("Couldn't determine initial status rc=%d\n", rc);
+		goto unreg_notifier;
+	}
+
+	chip->qcom_batt_class.name = "qcom-battery",
+	chip->qcom_batt_class.owner = THIS_MODULE,
+	chip->qcom_batt_class.class_attrs = pl_attributes;
+
+	rc = class_register(&chip->qcom_batt_class);
+	if (rc < 0) {
+		pr_err("couldn't register pl_data sysfs class rc = %d\n", rc);
+		goto unreg_notifier;
+	}
+
+	return rc;
+
+unreg_notifier:
+	power_supply_unreg_notifier(&chip->nb);
+destroy_votable:
+	destroy_votable(chip->pl_awake_votable);
+	destroy_votable(chip->pl_disable_votable);
+	destroy_votable(chip->fv_votable);
+	destroy_votable(chip->fcc_votable);
+release_wakeup_source:
+	wakeup_source_unregister(chip->pl_ws);
+cleanup:
+	kfree(chip);
+	return rc;
+}
+
+static void pl_deinit(void)
+{
+	struct pl_data *chip = the_chip;
+
+	power_supply_unreg_notifier(&chip->nb);
+	destroy_votable(chip->pl_awake_votable);
+	destroy_votable(chip->pl_disable_votable);
+	destroy_votable(chip->fv_votable);
+	destroy_votable(chip->fcc_votable);
+	wakeup_source_unregister(chip->pl_ws);
+	kfree(chip);
+}
+
+module_init(pl_init);
+module_exit(pl_deinit)
+
+MODULE_DESCRIPTION("");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
new file mode 100644
index 0000000..c0ba5a9
--- /dev/null
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -0,0 +1,442 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __FG_CORE_H__
+#define __FG_CORE_H__
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/string_helpers.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include "pmic-voter.h"
+
+#define fg_dbg(chip, reason, fmt, ...)			\
+	do {							\
+		if (*chip->debug_mask & (reason))		\
+			pr_info(fmt, ##__VA_ARGS__);	\
+		else						\
+			pr_debug(fmt, ##__VA_ARGS__);	\
+	} while (0)
+
+#define is_between(left, right, value) \
+		(((left) >= (right) && (left) >= (value) \
+			&& (value) >= (right)) \
+		|| ((left) <= (right) && (left) <= (value) \
+			&& (value) <= (right)))
+
+/* Awake votable reasons */
+#define SRAM_READ	"fg_sram_read"
+#define SRAM_WRITE	"fg_sram_write"
+#define PROFILE_LOAD	"fg_profile_load"
+#define DELTA_SOC	"fg_delta_soc"
+
+#define DEBUG_PRINT_BUFFER_SIZE		64
+/* 3 byte address + 1 space character */
+#define ADDR_LEN			4
+/* Format is 'XX ' */
+#define CHARS_PER_ITEM			3
+/* 4 data items per line */
+#define ITEMS_PER_LINE			4
+#define MAX_LINE_LENGTH			(ADDR_LEN + (ITEMS_PER_LINE *	\
+					CHARS_PER_ITEM) + 1)		\
+
+#define FG_SRAM_ADDRESS_MAX		255
+#define FG_SRAM_LEN			504
+#define PROFILE_LEN			224
+#define PROFILE_COMP_LEN		148
+#define BUCKET_COUNT			8
+#define BUCKET_SOC_PCT			(256 / BUCKET_COUNT)
+
+#define KI_COEFF_MAX			62200
+#define KI_COEFF_SOC_LEVELS		3
+
+#define SLOPE_LIMIT_COEFF_MAX		31
+
+#define BATT_THERM_NUM_COEFFS		3
+
+/* Debug flag definitions */
+enum fg_debug_flag {
+	FG_IRQ			= BIT(0), /* Show interrupts */
+	FG_STATUS		= BIT(1), /* Show FG status changes */
+	FG_POWER_SUPPLY		= BIT(2), /* Show POWER_SUPPLY */
+	FG_SRAM_WRITE		= BIT(3), /* Show SRAM writes */
+	FG_SRAM_READ		= BIT(4), /* Show SRAM reads */
+	FG_BUS_WRITE		= BIT(5), /* Show REGMAP writes */
+	FG_BUS_READ		= BIT(6), /* Show REGMAP reads */
+	FG_CAP_LEARN		= BIT(7), /* Show capacity learning */
+	FG_TTF			= BIT(8), /* Show time to full */
+};
+
+/* SRAM access */
+enum sram_access_flags {
+	FG_IMA_DEFAULT	= 0,
+	FG_IMA_ATOMIC	= BIT(0),
+	FG_IMA_NO_WLOCK	= BIT(1),
+};
+
+/* JEITA */
+enum {
+	JEITA_COLD = 0,
+	JEITA_COOL,
+	JEITA_WARM,
+	JEITA_HOT,
+	NUM_JEITA_LEVELS,
+};
+
+/* FG irqs */
+enum fg_irq_index {
+	MSOC_FULL_IRQ = 0,
+	MSOC_HIGH_IRQ,
+	MSOC_EMPTY_IRQ,
+	MSOC_LOW_IRQ,
+	MSOC_DELTA_IRQ,
+	BSOC_DELTA_IRQ,
+	SOC_READY_IRQ,
+	SOC_UPDATE_IRQ,
+	BATT_TEMP_DELTA_IRQ,
+	BATT_MISSING_IRQ,
+	ESR_DELTA_IRQ,
+	VBATT_LOW_IRQ,
+	VBATT_PRED_DELTA_IRQ,
+	DMA_GRANT_IRQ,
+	MEM_XCP_IRQ,
+	IMA_RDY_IRQ,
+	FG_IRQ_MAX,
+};
+
+/* WA flags */
+enum {
+	DELTA_SOC_IRQ_WA = BIT(0),
+};
+
+/*
+ * List of FG_SRAM parameters. Please add a parameter only if it is an entry
+ * that will be used either to configure an entity (e.g. termination current)
+ * which might need some encoding (or) it is an entry that will be read from
+ * SRAM and decoded (e.g. CC_SOC_SW) for SW to use at various places. For
+ * generic read/writes to SRAM registers, please use fg_sram_read/write APIs
+ * directly without adding an entry here.
+ */
+enum fg_sram_param_id {
+	FG_SRAM_BATT_SOC = 0,
+	FG_SRAM_FULL_SOC,
+	FG_SRAM_VOLTAGE_PRED,
+	FG_SRAM_OCV,
+	FG_SRAM_ESR,
+	FG_SRAM_RSLOW,
+	FG_SRAM_ALG_FLAGS,
+	FG_SRAM_CC_SOC,
+	FG_SRAM_CC_SOC_SW,
+	FG_SRAM_ACT_BATT_CAP,
+	/* Entries below here are configurable during initialization */
+	FG_SRAM_CUTOFF_VOLT,
+	FG_SRAM_EMPTY_VOLT,
+	FG_SRAM_VBATT_LOW,
+	FG_SRAM_FLOAT_VOLT,
+	FG_SRAM_VBATT_FULL,
+	FG_SRAM_ESR_TIMER_DISCHG_MAX,
+	FG_SRAM_ESR_TIMER_DISCHG_INIT,
+	FG_SRAM_ESR_TIMER_CHG_MAX,
+	FG_SRAM_ESR_TIMER_CHG_INIT,
+	FG_SRAM_SYS_TERM_CURR,
+	FG_SRAM_CHG_TERM_CURR,
+	FG_SRAM_DELTA_MSOC_THR,
+	FG_SRAM_DELTA_BSOC_THR,
+	FG_SRAM_RECHARGE_SOC_THR,
+	FG_SRAM_RECHARGE_VBATT_THR,
+	FG_SRAM_KI_COEFF_MED_DISCHG,
+	FG_SRAM_KI_COEFF_HI_DISCHG,
+	FG_SRAM_ESR_TIGHT_FILTER,
+	FG_SRAM_ESR_BROAD_FILTER,
+	FG_SRAM_SLOPE_LIMIT,
+	FG_SRAM_MAX,
+};
+
+struct fg_sram_param {
+	u16 addr_word;
+	int addr_byte;
+	u8  len;
+	int value;
+	int numrtr;
+	int denmtr;
+	int offset;
+	void (*encode)(struct fg_sram_param *sp, enum fg_sram_param_id id,
+		int val, u8 *buf);
+	int (*decode)(struct fg_sram_param *sp, enum fg_sram_param_id id,
+		int val);
+};
+
+enum fg_alg_flag_id {
+	ALG_FLAG_SOC_LT_OTG_MIN = 0,
+	ALG_FLAG_SOC_LT_RECHARGE,
+	ALG_FLAG_IBATT_LT_ITERM,
+	ALG_FLAG_IBATT_GT_HPM,
+	ALG_FLAG_IBATT_GT_UPM,
+	ALG_FLAG_VBATT_LT_RECHARGE,
+	ALG_FLAG_VBATT_GT_VFLOAT,
+	ALG_FLAG_MAX,
+};
+
+struct fg_alg_flag {
+	char	*name;
+	u8	bit;
+	bool	invalid;
+};
+
+enum wa_flags {
+	PMI8998_V1_REV_WA = BIT(0),
+};
+
+enum slope_limit_status {
+	LOW_TEMP_DISCHARGE = 0,
+	LOW_TEMP_CHARGE,
+	HIGH_TEMP_DISCHARGE,
+	HIGH_TEMP_CHARGE,
+	SLOPE_LIMIT_NUM_COEFFS,
+};
+
+/* DT parameters for FG device */
+struct fg_dt_props {
+	bool	force_load_profile;
+	bool	hold_soc_while_full;
+	bool	auto_recharge_soc;
+	int	cutoff_volt_mv;
+	int	empty_volt_mv;
+	int	vbatt_low_thr_mv;
+	int	chg_term_curr_ma;
+	int	sys_term_curr_ma;
+	int	delta_soc_thr;
+	int	recharge_soc_thr;
+	int	recharge_volt_thr_mv;
+	int	rsense_sel;
+	int	esr_timer_charging;
+	int	esr_timer_awake;
+	int	esr_timer_asleep;
+	int	rconn_mohms;
+	int	esr_clamp_mohms;
+	int	cl_start_soc;
+	int	cl_max_temp;
+	int	cl_min_temp;
+	int	cl_max_cap_inc;
+	int	cl_max_cap_dec;
+	int	cl_max_cap_limit;
+	int	cl_min_cap_limit;
+	int	jeita_hyst_temp;
+	int	batt_temp_delta;
+	int	esr_flt_switch_temp;
+	int	esr_tight_flt_upct;
+	int	esr_broad_flt_upct;
+	int	esr_tight_lt_flt_upct;
+	int	esr_broad_lt_flt_upct;
+	int	slope_limit_temp;
+	int	jeita_thresholds[NUM_JEITA_LEVELS];
+	int	ki_coeff_soc[KI_COEFF_SOC_LEVELS];
+	int	ki_coeff_med_dischg[KI_COEFF_SOC_LEVELS];
+	int	ki_coeff_hi_dischg[KI_COEFF_SOC_LEVELS];
+	int	slope_limit_coeffs[SLOPE_LIMIT_NUM_COEFFS];
+	u8	batt_therm_coeffs[BATT_THERM_NUM_COEFFS];
+};
+
+/* parameters from battery profile */
+struct fg_batt_props {
+	const char	*batt_type_str;
+	char		*batt_profile;
+	int		float_volt_uv;
+	int		vbatt_full_mv;
+	int		fastchg_curr_ma;
+};
+
+struct fg_cyc_ctr_data {
+	bool		en;
+	bool		started[BUCKET_COUNT];
+	u16		count[BUCKET_COUNT];
+	u8		last_soc[BUCKET_COUNT];
+	int		id;
+	struct mutex	lock;
+};
+
+struct fg_cap_learning {
+	bool		active;
+	int		init_cc_soc_sw;
+	int64_t		nom_cap_uah;
+	int64_t		init_cc_uah;
+	int64_t		final_cc_uah;
+	int64_t		learned_cc_uah;
+	struct mutex	lock;
+};
+
+struct fg_irq_info {
+	const char		*name;
+	const irq_handler_t	handler;
+	bool			wakeable;
+	int			irq;
+};
+
+struct fg_circ_buf {
+	int	arr[20];
+	int	size;
+	int	head;
+};
+
+struct fg_pt {
+	s32 x;
+	s32 y;
+};
+
+static const struct fg_pt fg_ln_table[] = {
+	{ 1000,		0 },
+	{ 2000,		693 },
+	{ 4000,		1386 },
+	{ 6000,		1792 },
+	{ 8000,		2079 },
+	{ 16000,	2773 },
+	{ 32000,	3466 },
+	{ 64000,	4159 },
+	{ 128000,	4852 },
+};
+
+struct fg_chip {
+	struct device		*dev;
+	struct pmic_revid_data	*pmic_rev_id;
+	struct regmap		*regmap;
+	struct dentry		*dfs_root;
+	struct power_supply	*fg_psy;
+	struct power_supply	*batt_psy;
+	struct power_supply	*usb_psy;
+	struct power_supply	*dc_psy;
+	struct power_supply	*parallel_psy;
+	struct iio_channel	*batt_id_chan;
+	struct fg_memif		*sram;
+	struct fg_irq_info	*irqs;
+	struct votable		*awake_votable;
+	struct fg_sram_param	*sp;
+	struct fg_alg_flag	*alg_flags;
+	int			*debug_mask;
+	char			batt_profile[PROFILE_LEN];
+	struct fg_dt_props	dt;
+	struct fg_batt_props	bp;
+	struct fg_cyc_ctr_data	cyc_ctr;
+	struct notifier_block	nb;
+	struct fg_cap_learning  cl;
+	struct mutex		bus_lock;
+	struct mutex		sram_rw_lock;
+	struct mutex		batt_avg_lock;
+	struct mutex		charge_full_lock;
+	u32			batt_soc_base;
+	u32			batt_info_base;
+	u32			mem_if_base;
+	u32			rradc_base;
+	u32			wa_flags;
+	int			batt_id_ohms;
+	int			charge_status;
+	int			prev_charge_status;
+	int			charge_done;
+	int			charge_type;
+	int			last_soc;
+	int			last_batt_temp;
+	int			health;
+	int			maint_soc;
+	int			delta_soc;
+	int			last_msoc;
+	enum slope_limit_status	slope_limit_sts;
+	bool			profile_available;
+	bool			profile_loaded;
+	bool			battery_missing;
+	bool			fg_restarting;
+	bool			charge_full;
+	bool			recharge_soc_adjusted;
+	bool			ki_coeff_dischg_en;
+	bool			esr_fcc_ctrl_en;
+	bool			soc_reporting_ready;
+	bool			esr_flt_cold_temp_en;
+	bool			bsoc_delta_irq_en;
+	bool			slope_limit_en;
+	struct completion	soc_update;
+	struct completion	soc_ready;
+	struct delayed_work	profile_load_work;
+	struct work_struct	status_change_work;
+	struct work_struct	cycle_count_work;
+	struct delayed_work	batt_avg_work;
+	struct delayed_work	sram_dump_work;
+	struct fg_circ_buf	ibatt_circ_buf;
+	struct fg_circ_buf	vbatt_circ_buf;
+};
+
+/* Debugfs data structures are below */
+
+/* Log buffer */
+struct fg_log_buffer {
+	size_t		rpos;
+	size_t		wpos;
+	size_t		len;
+	char		data[0];
+};
+
+/* transaction parameters */
+struct fg_trans {
+	struct fg_chip		*chip;
+	struct mutex		fg_dfs_lock; /* Prevent thread concurrency */
+	struct fg_log_buffer	*log;
+	u32			cnt;
+	u16			addr;
+	u32			offset;
+	u8			*data;
+};
+
+struct fg_dbgfs {
+	struct debugfs_blob_wrapper	help_msg;
+	struct fg_chip			*chip;
+	struct dentry			*root;
+	u32				cnt;
+	u32				addr;
+};
+
+extern int fg_sram_write(struct fg_chip *chip, u16 address, u8 offset,
+			u8 *val, int len, int flags);
+extern int fg_sram_read(struct fg_chip *chip, u16 address, u8 offset,
+			u8 *val, int len, int flags);
+extern int fg_sram_masked_write(struct fg_chip *chip, u16 address, u8 offset,
+			u8 mask, u8 val, int flags);
+extern int fg_interleaved_mem_read(struct fg_chip *chip, u16 address,
+			u8 offset, u8 *val, int len);
+extern int fg_interleaved_mem_write(struct fg_chip *chip, u16 address,
+			u8 offset, u8 *val, int len, bool atomic_access);
+extern int fg_read(struct fg_chip *chip, int addr, u8 *val, int len);
+extern int fg_write(struct fg_chip *chip, int addr, u8 *val, int len);
+extern int fg_masked_write(struct fg_chip *chip, int addr, u8 mask, u8 val);
+extern int fg_ima_init(struct fg_chip *chip);
+extern int fg_clear_ima_errors_if_any(struct fg_chip *chip, bool check_hw_sts);
+extern int fg_clear_dma_errors_if_any(struct fg_chip *chip);
+extern int fg_debugfs_create(struct fg_chip *chip);
+extern void fill_string(char *str, size_t str_len, u8 *buf, int buf_len);
+extern void dump_sram(u8 *buf, int addr, int len);
+extern int64_t twos_compliment_extend(int64_t val, int s_bit_pos);
+extern s64 fg_float_decode(u16 val);
+extern bool is_input_present(struct fg_chip *chip);
+extern void fg_circ_buf_add(struct fg_circ_buf *buf, int val);
+extern void fg_circ_buf_clr(struct fg_circ_buf *buf);
+extern int fg_circ_buf_avg(struct fg_circ_buf *buf, int *avg);
+extern int fg_lerp(const struct fg_pt *pts, size_t tablesize, s32 input,
+			s32 *output);
+#endif
diff --git a/drivers/power/supply/qcom/fg-memif.c b/drivers/power/supply/qcom/fg-memif.c
new file mode 100644
index 0000000..2dc7618
--- /dev/null
+++ b/drivers/power/supply/qcom/fg-memif.c
@@ -0,0 +1,740 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"FG: %s: " fmt, __func__
+
+#include "fg-core.h"
+#include "fg-reg.h"
+
+/* Generic definitions */
+#define RETRY_COUNT		3
+#define BYTES_PER_SRAM_WORD	4
+
+enum {
+	FG_READ = 0,
+	FG_WRITE,
+};
+
+static int fg_set_address(struct fg_chip *chip, u16 address)
+{
+	u8 buffer[2];
+	int rc;
+
+	buffer[0] = address & 0xFF;
+	/* MSB has to be written zero */
+	buffer[1] = 0;
+
+	rc = fg_write(chip, MEM_IF_ADDR_LSB(chip), buffer, 2);
+	if (rc < 0) {
+		pr_err("failed to write to 0x%04X, rc=%d\n",
+			MEM_IF_ADDR_LSB(chip), rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int fg_config_access_mode(struct fg_chip *chip, bool access, bool burst)
+{
+	int rc;
+	u8 intf_ctl = 0;
+
+	intf_ctl = ((access == FG_WRITE) ? IMA_WR_EN_BIT : 0) |
+			(burst ? MEM_ACS_BURST_BIT : 0);
+
+	rc = fg_masked_write(chip, MEM_IF_IMA_CTL(chip), IMA_CTL_MASK,
+			intf_ctl);
+	if (rc < 0) {
+		pr_err("failed to write to 0x%04x, rc=%d\n",
+			MEM_IF_IMA_CTL(chip), rc);
+		return -EIO;
+	}
+
+	return rc;
+}
+
+static int fg_run_iacs_clear_sequence(struct fg_chip *chip)
+{
+	u8 val, hw_sts, exp_sts;
+	int rc, tries = 250;
+
+	/*
+	 * Values to write for running IACS clear sequence comes from
+	 * hardware documentation.
+	 */
+	rc = fg_masked_write(chip, MEM_IF_IMA_CFG(chip),
+			IACS_CLR_BIT | STATIC_CLK_EN_BIT,
+			IACS_CLR_BIT | STATIC_CLK_EN_BIT);
+	if (rc < 0) {
+		pr_err("failed to write 0x%04x, rc=%d\n", MEM_IF_IMA_CFG(chip),
+			rc);
+		return rc;
+	}
+
+	rc = fg_config_access_mode(chip, FG_READ, false);
+	if (rc < 0) {
+		pr_err("failed to write to 0x%04x, rc=%d\n",
+			MEM_IF_IMA_CTL(chip), rc);
+		return rc;
+	}
+
+	rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT,
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT);
+	if (rc < 0) {
+		pr_err("failed to set ima_req_access bit rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Delay for the clock to reach FG */
+	usleep_range(35, 40);
+
+	while (1) {
+		val = 0;
+		rc = fg_write(chip, MEM_IF_ADDR_MSB(chip), &val, 1);
+		if (rc < 0) {
+			pr_err("failed to write 0x%04x, rc=%d\n",
+				MEM_IF_ADDR_MSB(chip), rc);
+			return rc;
+		}
+
+		val = 0;
+		rc = fg_write(chip, MEM_IF_WR_DATA3(chip), &val, 1);
+		if (rc < 0) {
+			pr_err("failed to write 0x%04x, rc=%d\n",
+				MEM_IF_WR_DATA3(chip), rc);
+			return rc;
+		}
+
+		rc = fg_read(chip, MEM_IF_RD_DATA3(chip), &val, 1);
+		if (rc < 0) {
+			pr_err("failed to read 0x%04x, rc=%d\n",
+				MEM_IF_RD_DATA3(chip), rc);
+			return rc;
+		}
+
+		/* Delay for IMA hardware to clear */
+		usleep_range(35, 40);
+
+		rc = fg_read(chip, MEM_IF_IMA_HW_STS(chip), &hw_sts, 1);
+		if (rc < 0) {
+			pr_err("failed to read ima_hw_sts rc=%d\n", rc);
+			return rc;
+		}
+
+		if (hw_sts != 0)
+			continue;
+
+		rc = fg_read(chip, MEM_IF_IMA_EXP_STS(chip), &exp_sts, 1);
+		if (rc < 0) {
+			pr_err("failed to read ima_exp_sts rc=%d\n", rc);
+			return rc;
+		}
+
+		if (exp_sts == 0 || !(--tries))
+			break;
+	}
+
+	if (!tries)
+		pr_err("Failed to clear the error? hw_sts: %x exp_sts: %d\n",
+			hw_sts, exp_sts);
+
+	rc = fg_masked_write(chip, MEM_IF_IMA_CFG(chip), IACS_CLR_BIT, 0);
+	if (rc < 0) {
+		pr_err("failed to write 0x%04x, rc=%d\n", MEM_IF_IMA_CFG(chip),
+			rc);
+		return rc;
+	}
+
+	udelay(5);
+
+	rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT, 0);
+	if (rc < 0) {
+		pr_err("failed to write to 0x%04x, rc=%d\n",
+			MEM_IF_MEM_INTF_CFG(chip), rc);
+		return rc;
+	}
+
+	/* Delay before next transaction is attempted */
+	usleep_range(35, 40);
+	fg_dbg(chip, FG_SRAM_READ | FG_SRAM_WRITE, "IACS clear sequence complete\n");
+	return rc;
+}
+
+int fg_clear_dma_errors_if_any(struct fg_chip *chip)
+{
+	int rc;
+	u8 dma_sts;
+
+	rc = fg_read(chip, MEM_IF_DMA_STS(chip), &dma_sts, 1);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			MEM_IF_DMA_STS(chip), rc);
+		return rc;
+	}
+	fg_dbg(chip, FG_STATUS, "dma_sts: %x\n", dma_sts);
+
+	if (dma_sts & (DMA_WRITE_ERROR_BIT | DMA_READ_ERROR_BIT)) {
+		rc = fg_masked_write(chip, MEM_IF_DMA_CTL(chip),
+				DMA_CLEAR_LOG_BIT, DMA_CLEAR_LOG_BIT);
+		if (rc < 0) {
+			pr_err("failed to write addr=0x%04x, rc=%d\n",
+				MEM_IF_DMA_CTL(chip), rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+int fg_clear_ima_errors_if_any(struct fg_chip *chip, bool check_hw_sts)
+{
+	int rc = 0;
+	u8 err_sts, exp_sts = 0, hw_sts = 0;
+	bool run_err_clr_seq = false;
+
+	rc = fg_read(chip, MEM_IF_IMA_EXP_STS(chip), &exp_sts, 1);
+	if (rc < 0) {
+		pr_err("failed to read ima_exp_sts rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_read(chip, MEM_IF_IMA_HW_STS(chip), &hw_sts, 1);
+	if (rc < 0) {
+		pr_err("failed to read ima_hw_sts rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_read(chip, MEM_IF_IMA_ERR_STS(chip), &err_sts, 1);
+	if (rc < 0) {
+		pr_err("failed to read ima_err_sts rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_dbg(chip, FG_SRAM_READ | FG_SRAM_WRITE, "ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
+		err_sts, exp_sts, hw_sts);
+
+	if (check_hw_sts) {
+		/*
+		 * Lower nibble should be equal to upper nibble before SRAM
+		 * transactions begins from SW side. If they are unequal, then
+		 * the error clear sequence should be run irrespective of IMA
+		 * exception errors.
+		 */
+		if ((hw_sts & 0x0F) != hw_sts >> 4) {
+			pr_err("IMA HW not in correct state, hw_sts=%x\n",
+				hw_sts);
+			run_err_clr_seq = true;
+		}
+	}
+
+	if (exp_sts & (IACS_ERR_BIT | XCT_TYPE_ERR_BIT | DATA_RD_ERR_BIT |
+		DATA_WR_ERR_BIT | ADDR_BURST_WRAP_BIT | ADDR_STABLE_ERR_BIT)) {
+		pr_err("IMA exception bit set, exp_sts=%x\n", exp_sts);
+		run_err_clr_seq = true;
+	}
+
+	if (run_err_clr_seq) {
+		/* clear the error */
+		rc = fg_run_iacs_clear_sequence(chip);
+		if (rc < 0) {
+			pr_err("failed to run iacs clear sequence rc=%d\n", rc);
+			return rc;
+		}
+
+		/* Retry again as there was an error in the transaction */
+		return -EAGAIN;
+	}
+
+	return rc;
+}
+
+static int fg_check_iacs_ready(struct fg_chip *chip)
+{
+	int rc = 0, tries = 250;
+	u8 ima_opr_sts = 0;
+
+	/*
+	 * Additional delay to make sure IACS ready bit is set after
+	 * Read/Write operation.
+	 */
+
+	usleep_range(30, 35);
+	while (1) {
+		rc = fg_read(chip, MEM_IF_IMA_OPR_STS(chip), &ima_opr_sts, 1);
+		if (rc < 0) {
+			pr_err("failed to read 0x%04x, rc=%d\n",
+				MEM_IF_IMA_OPR_STS(chip), rc);
+			return rc;
+		}
+
+		if (ima_opr_sts & IACS_RDY_BIT)
+			break;
+
+		if (!(--tries))
+			break;
+
+		/* delay for iacs_ready to be asserted */
+		usleep_range(5000, 7000);
+	}
+
+	if (!tries) {
+		pr_err("IACS_RDY not set, opr_sts: %d\n", ima_opr_sts);
+		/* check for error condition */
+		rc = fg_clear_ima_errors_if_any(chip, false);
+		if (rc < 0) {
+			pr_err("Failed to check for ima errors rc=%d\n", rc);
+			return rc;
+		}
+
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int __fg_interleaved_mem_write(struct fg_chip *chip, u16 address,
+				int offset, u8 *val, int len)
+{
+	int rc = 0, i;
+	u8 *ptr = val, byte_enable = 0, num_bytes = 0;
+
+	fg_dbg(chip, FG_SRAM_WRITE, "length %d addr=%02X offset=%d\n", len,
+		address, offset);
+
+	while (len > 0) {
+		num_bytes = (offset + len) > BYTES_PER_SRAM_WORD ?
+				(BYTES_PER_SRAM_WORD - offset) : len;
+
+		/* write to byte_enable */
+		for (i = offset; i < (offset + num_bytes); i++)
+			byte_enable |= BIT(i);
+
+		rc = fg_write(chip, MEM_IF_IMA_BYTE_EN(chip), &byte_enable, 1);
+		if (rc < 0) {
+			pr_err("Unable to write to byte_en_reg rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* write data */
+		rc = fg_write(chip, MEM_IF_WR_DATA0(chip) + offset, ptr,
+				num_bytes);
+		if (rc < 0) {
+			pr_err("failed to write to 0x%04x, rc=%d\n",
+				MEM_IF_WR_DATA0(chip) + offset, rc);
+			return rc;
+		}
+
+		/*
+		 * The last-byte WR_DATA3 starts the write transaction.
+		 * Write a dummy value to WR_DATA3 if it does not have
+		 * valid data. This dummy data is not written to the
+		 * SRAM as byte_en for WR_DATA3 is not set.
+		 */
+		if (!(byte_enable & BIT(3))) {
+			u8 dummy_byte = 0x0;
+
+			rc = fg_write(chip, MEM_IF_WR_DATA3(chip), &dummy_byte,
+					1);
+			if (rc < 0) {
+				pr_err("failed to write dummy-data to WR_DATA3 rc=%d\n",
+					rc);
+				return rc;
+			}
+		}
+
+		/* check for error condition */
+		rc = fg_clear_ima_errors_if_any(chip, false);
+		if (rc < 0) {
+			pr_err("Failed to check for ima errors rc=%d\n", rc);
+			return rc;
+		}
+
+		ptr += num_bytes;
+		len -= num_bytes;
+		offset = byte_enable = 0;
+
+		rc = fg_check_iacs_ready(chip);
+		if (rc < 0) {
+			pr_debug("IACS_RDY failed rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int __fg_interleaved_mem_read(struct fg_chip *chip, u16 address,
+				int offset, u8 *val, int len)
+{
+	int rc = 0, total_len;
+	u8 *rd_data = val, num_bytes;
+	char str[DEBUG_PRINT_BUFFER_SIZE];
+
+	fg_dbg(chip, FG_SRAM_READ, "length %d addr=%02X\n", len, address);
+
+	total_len = len;
+	while (len > 0) {
+		num_bytes = (offset + len) > BYTES_PER_SRAM_WORD ?
+				(BYTES_PER_SRAM_WORD - offset) : len;
+		rc = fg_read(chip, MEM_IF_RD_DATA0(chip) + offset, rd_data,
+				num_bytes);
+		if (rc < 0) {
+			pr_err("failed to read 0x%04x, rc=%d\n",
+				MEM_IF_RD_DATA0(chip) + offset, rc);
+			return rc;
+		}
+
+		rd_data += num_bytes;
+		len -= num_bytes;
+		offset = 0;
+
+		/* check for error condition */
+		rc = fg_clear_ima_errors_if_any(chip, false);
+		if (rc < 0) {
+			pr_err("Failed to check for ima errors rc=%d\n", rc);
+			return rc;
+		}
+
+		if (len && len < BYTES_PER_SRAM_WORD) {
+			/*
+			 * Move to single mode. Changing address is not
+			 * required here as it must be in burst mode. Address
+			 * will get incremented internally by FG HW once the MSB
+			 * of RD_DATA is read.
+			 */
+			rc = fg_config_access_mode(chip, FG_READ, 0);
+			if (rc < 0) {
+				pr_err("failed to move to single mode rc=%d\n",
+					rc);
+				return -EIO;
+			}
+		}
+
+		rc = fg_check_iacs_ready(chip);
+		if (rc < 0) {
+			pr_debug("IACS_RDY failed rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (*chip->debug_mask & FG_SRAM_READ) {
+		fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, total_len);
+		pr_info("data read: %s\n", str);
+	}
+
+	return rc;
+}
+
+static int fg_get_mem_access_status(struct fg_chip *chip, bool *status)
+{
+	int rc;
+	u8 mem_if_sts;
+
+	rc = fg_read(chip, MEM_IF_MEM_INTF_CFG(chip), &mem_if_sts, 1);
+	if (rc < 0) {
+		pr_err("failed to read rif_mem status rc=%d\n", rc);
+		return rc;
+	}
+
+	*status = mem_if_sts & MEM_ACCESS_REQ_BIT;
+	return 0;
+}
+
+static bool is_mem_access_available(struct fg_chip *chip, int access)
+{
+	bool rif_mem_sts = true;
+	int rc, time_count = 0;
+
+	while (1) {
+		rc = fg_get_mem_access_status(chip, &rif_mem_sts);
+		if (rc < 0)
+			return rc;
+
+		/* This is an inverting logic */
+		if (!rif_mem_sts)
+			break;
+
+		fg_dbg(chip, FG_SRAM_READ | FG_SRAM_WRITE, "MEM_ACCESS_REQ is not clear yet for IMA_%s\n",
+			access ? "write" : "read");
+
+		/*
+		 * Try this no more than 4 times. If MEM_ACCESS_REQ is not
+		 * clear, then return an error instead of waiting for it again.
+		 */
+		if  (time_count > 4) {
+			pr_err("Tried 4 times(~16ms) polling MEM_ACCESS_REQ\n");
+			return false;
+		}
+
+		/* Wait for 4ms before reading MEM_ACCESS_REQ again */
+		usleep_range(4000, 4100);
+		time_count++;
+	}
+	return true;
+}
+
+static int fg_interleaved_mem_config(struct fg_chip *chip, u8 *val,
+		u16 address, int offset, int len, bool access)
+{
+	int rc = 0;
+
+	if (!is_mem_access_available(chip, access))
+		return -EBUSY;
+
+	/* configure for IMA access */
+	rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT,
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT);
+	if (rc < 0) {
+		pr_err("failed to set ima_req_access bit rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure for the read/write, single/burst mode */
+	rc = fg_config_access_mode(chip, access, (offset + len) > 4);
+	if (rc < 0) {
+		pr_err("failed to set memory access rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = fg_check_iacs_ready(chip);
+	if (rc < 0) {
+		pr_err_ratelimited("IACS_RDY failed rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_set_address(chip, address);
+	if (rc < 0) {
+		pr_err("failed to set address rc = %d\n", rc);
+		return rc;
+	}
+
+	if (access == FG_READ) {
+		rc = fg_check_iacs_ready(chip);
+		if (rc < 0) {
+			pr_debug("IACS_RDY failed rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int fg_get_beat_count(struct fg_chip *chip, u8 *count)
+{
+	int rc;
+
+	rc = fg_read(chip, MEM_IF_FG_BEAT_COUNT(chip), count, 1);
+	*count &= BEAT_COUNT_MASK;
+	return rc;
+}
+
+int fg_interleaved_mem_read(struct fg_chip *chip, u16 address, u8 offset,
+				u8 *val, int len)
+{
+	int rc = 0, ret;
+	u8 start_beat_count, end_beat_count, count = 0;
+	bool retry = false;
+
+	if (offset > 3) {
+		pr_err("offset too large %d\n", offset);
+		return -EINVAL;
+	}
+
+retry:
+	if (count >= RETRY_COUNT) {
+		pr_err("Tried %d times\n", RETRY_COUNT);
+		retry = false;
+		goto out;
+	}
+
+	rc = fg_interleaved_mem_config(chip, val, address, offset, len,
+					FG_READ);
+	if (rc < 0) {
+		pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+		count++;
+		retry = true;
+		goto out;
+	}
+
+	/* read the start beat count */
+	rc = fg_get_beat_count(chip, &start_beat_count);
+	if (rc < 0) {
+		pr_err("failed to read beat count rc=%d\n", rc);
+		count++;
+		retry = true;
+		goto out;
+	}
+
+	/* read data */
+	rc = __fg_interleaved_mem_read(chip, address, offset, val, len);
+	if (rc < 0) {
+		count++;
+		if (rc == -EAGAIN) {
+			pr_err("IMA access failed retry_count = %d\n", count);
+			goto retry;
+		}
+		pr_err("failed to read SRAM address rc = %d\n", rc);
+		retry = true;
+		goto out;
+	}
+
+	/* read the end beat count */
+	rc = fg_get_beat_count(chip, &end_beat_count);
+	if (rc < 0) {
+		pr_err("failed to read beat count rc=%d\n", rc);
+		count++;
+		retry = true;
+		goto out;
+	}
+
+	fg_dbg(chip, FG_SRAM_READ, "Start beat_count = %x End beat_count = %x\n",
+		start_beat_count, end_beat_count);
+
+	if (start_beat_count != end_beat_count) {
+		fg_dbg(chip, FG_SRAM_READ, "Beat count(%d/%d) do not match - retry transaction\n",
+			start_beat_count, end_beat_count);
+		count++;
+		retry = true;
+	}
+out:
+	/* Release IMA access */
+	ret = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT, 0);
+	if (rc < 0 && ret < 0) {
+		pr_err("failed to reset IMA access bit ret = %d\n", ret);
+		return ret;
+	}
+
+	if (retry) {
+		retry = false;
+		goto retry;
+	}
+
+	return rc;
+}
+
+int fg_interleaved_mem_write(struct fg_chip *chip, u16 address, u8 offset,
+				u8 *val, int len, bool atomic_access)
+{
+	int rc = 0, ret;
+	u8 start_beat_count, end_beat_count, count = 0;
+	bool retry = false;
+
+	if (offset > 3) {
+		pr_err("offset too large %d\n", offset);
+		return -EINVAL;
+	}
+
+retry:
+	if (count >= RETRY_COUNT) {
+		pr_err("Tried %d times\n", RETRY_COUNT);
+		retry = false;
+		goto out;
+	}
+
+	rc = fg_interleaved_mem_config(chip, val, address, offset, len,
+					FG_WRITE);
+	if (rc < 0) {
+		pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+		count++;
+		retry = true;
+		goto out;
+	}
+
+	/* read the start beat count */
+	rc = fg_get_beat_count(chip, &start_beat_count);
+	if (rc < 0) {
+		pr_err("failed to read beat count rc=%d\n", rc);
+		count++;
+		retry = true;
+		goto out;
+	}
+
+	/* write data */
+	rc = __fg_interleaved_mem_write(chip, address, offset, val, len);
+	if (rc < 0) {
+		count++;
+		if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
+			pr_err("IMA access failed retry_count = %d\n", count);
+			goto retry;
+		}
+		pr_err("failed to write SRAM address rc = %d\n", rc);
+		retry = true;
+		goto out;
+	}
+
+	/* read the end beat count */
+	rc = fg_get_beat_count(chip, &end_beat_count);
+	if (rc < 0) {
+		pr_err("failed to read beat count rc=%d\n", rc);
+		count++;
+		retry = true;
+		goto out;
+	}
+
+	if (atomic_access && start_beat_count != end_beat_count)
+		pr_err("Start beat_count = %x End beat_count = %x\n",
+			start_beat_count, end_beat_count);
+out:
+	/* Release IMA access */
+	ret = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
+				MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT, 0);
+	if (rc < 0 && ret < 0) {
+		pr_err("failed to reset IMA access bit ret = %d\n", ret);
+		return ret;
+	}
+
+	if (retry) {
+		retry = false;
+		goto retry;
+	}
+
+	/* Return the error we got before releasing memory access */
+	return rc;
+}
+
+int fg_ima_init(struct fg_chip *chip)
+{
+	int rc;
+
+	/*
+	 * Change the FG_MEM_INT interrupt to track IACS_READY
+	 * condition instead of end-of-transaction. This makes sure
+	 * that the next transaction starts only after the hw is ready.
+	 */
+	rc = fg_masked_write(chip, MEM_IF_IMA_CFG(chip), IACS_INTR_SRC_SLCT_BIT,
+				IACS_INTR_SRC_SLCT_BIT);
+	if (rc < 0) {
+		pr_err("failed to configure interrupt source %d\n", rc);
+		return rc;
+	}
+
+	/* Clear DMA errors if any before clearing IMA errors */
+	rc = fg_clear_dma_errors_if_any(chip);
+	if (rc < 0) {
+		pr_err("Error in checking DMA errors rc:%d\n", rc);
+		return rc;
+	}
+
+	/* Clear IMA errors if any before SRAM transactions can begin */
+	rc = fg_clear_ima_errors_if_any(chip, true);
+	if (rc < 0 && rc != -EAGAIN) {
+		pr_err("Error in checking IMA errors rc:%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
diff --git a/drivers/power/supply/qcom/fg-reg.h b/drivers/power/supply/qcom/fg-reg.h
new file mode 100644
index 0000000..bf2827f
--- /dev/null
+++ b/drivers/power/supply/qcom/fg-reg.h
@@ -0,0 +1,328 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __FG_REG_H__
+#define __FG_REG_H__
+
+/* FG_ADC_RR register definitions used only for READ */
+#define ADC_RR_FAKE_BATT_LOW_LSB(chip)		(chip->rradc_base + 0x58)
+#define ADC_RR_FAKE_BATT_HIGH_LSB(chip)		(chip->rradc_base + 0x5A)
+
+/* FG_BATT_SOC register definitions */
+#define BATT_SOC_FG_ALG_STS(chip)		(chip->batt_soc_base + 0x06)
+#define BATT_SOC_FG_ALG_AUX_STS0(chip)		(chip->batt_soc_base + 0x07)
+#define BATT_SOC_SLEEP_SHUTDOWN_STS(chip)	(chip->batt_soc_base + 0x08)
+#define BATT_SOC_FG_MONOTONIC_SOC(chip)		(chip->batt_soc_base + 0x09)
+#define BATT_SOC_FG_MONOTONIC_SOC_CP(chip)	(chip->batt_soc_base + 0x0A)
+#define BATT_SOC_INT_RT_STS(chip)		(chip->batt_soc_base + 0x10)
+#define BATT_SOC_EN_CTL(chip)			(chip->batt_soc_base + 0x46)
+#define BATT_SOC_RESTART(chip)			(chip->batt_soc_base + 0x48)
+#define BATT_SOC_STS_CLR(chip)			(chip->batt_soc_base + 0x4A)
+#define BATT_SOC_LOW_PWR_CFG(chip)		(chip->batt_soc_base + 0x52)
+#define BATT_SOC_LOW_PWR_STS(chip)		(chip->batt_soc_base + 0x56)
+
+/* BATT_SOC_INT_RT_STS */
+#define MSOC_EMPTY_BIT				BIT(5)
+
+/* BATT_SOC_EN_CTL */
+#define FG_ALGORITHM_EN_BIT			BIT(7)
+
+/* BATT_SOC_RESTART */
+#define RESTART_GO_BIT				BIT(0)
+
+/* FG_BATT_INFO register definitions */
+#define BATT_INFO_BATT_TEMP_STS(chip)		(chip->batt_info_base + 0x06)
+#define BATT_INFO_SYS_BATT(chip)		(chip->batt_info_base + 0x07)
+#define BATT_INFO_FG_STS(chip)			(chip->batt_info_base + 0x09)
+#define BATT_INFO_INT_RT_STS(chip)		(chip->batt_info_base + 0x10)
+#define BATT_INFO_BATT_REM_LATCH(chip)		(chip->batt_info_base + 0x4F)
+#define BATT_INFO_BATT_TEMP_LSB(chip)		(chip->batt_info_base + 0x50)
+#define BATT_INFO_BATT_TEMP_MSB(chip)		(chip->batt_info_base + 0x51)
+#define BATT_INFO_BATT_TEMP_CFG(chip)		(chip->batt_info_base + 0x56)
+#define BATT_INFO_BATT_TMPR_INTR(chip)		(chip->batt_info_base + 0x59)
+#define BATT_INFO_THERM_C1(chip)		(chip->batt_info_base + 0x5C)
+#define BATT_INFO_THERM_C2(chip)		(chip->batt_info_base + 0x5D)
+#define BATT_INFO_THERM_C3(chip)		(chip->batt_info_base + 0x5E)
+#define BATT_INFO_THERM_HALF_RANGE(chip)	(chip->batt_info_base + 0x5F)
+#define BATT_INFO_JEITA_CTLS(chip)		(chip->batt_info_base + 0x61)
+#define BATT_INFO_JEITA_TOO_COLD(chip)		(chip->batt_info_base + 0x62)
+#define BATT_INFO_JEITA_COLD(chip)		(chip->batt_info_base + 0x63)
+#define BATT_INFO_JEITA_HOT(chip)		(chip->batt_info_base + 0x64)
+#define BATT_INFO_JEITA_TOO_HOT(chip)		(chip->batt_info_base + 0x65)
+
+/* only for v1.1 */
+#define BATT_INFO_ESR_CFG(chip)			(chip->batt_info_base + 0x69)
+/* starting from v2.0 */
+#define BATT_INFO_ESR_GENERAL_CFG(chip)		(chip->batt_info_base + 0x68)
+#define BATT_INFO_ESR_PULL_DN_CFG(chip)		(chip->batt_info_base + 0x69)
+#define BATT_INFO_ESR_FAST_CRG_CFG(chip)	(chip->batt_info_base + 0x6A)
+
+#define BATT_INFO_BATT_MISS_CFG(chip)		(chip->batt_info_base + 0x6B)
+#define BATT_INFO_WATCHDOG_COUNT(chip)		(chip->batt_info_base + 0x70)
+#define BATT_INFO_WATCHDOG_CFG(chip)		(chip->batt_info_base + 0x71)
+#define BATT_INFO_IBATT_SENSING_CFG(chip)	(chip->batt_info_base + 0x73)
+#define BATT_INFO_QNOVO_CFG(chip)		(chip->batt_info_base + 0x74)
+#define BATT_INFO_QNOVO_SCALER(chip)		(chip->batt_info_base + 0x75)
+
+/* starting from v2.0 */
+#define BATT_INFO_CRG_SERVICES(chip)		(chip->batt_info_base + 0x90)
+
+/* Following LSB/MSB address are for v2.0 and above; v1.1 have them swapped */
+#define BATT_INFO_VBATT_LSB(chip)		(chip->batt_info_base + 0xA0)
+#define BATT_INFO_VBATT_MSB(chip)		(chip->batt_info_base + 0xA1)
+#define BATT_INFO_IBATT_LSB(chip)		(chip->batt_info_base + 0xA2)
+#define BATT_INFO_IBATT_MSB(chip)		(chip->batt_info_base + 0xA3)
+#define BATT_INFO_ESR_LSB(chip)			(chip->batt_info_base + 0xA4)
+#define BATT_INFO_ESR_MSB(chip)			(chip->batt_info_base + 0xA5)
+#define BATT_INFO_VBATT_LSB_CP(chip)		(chip->batt_info_base + 0xA6)
+#define BATT_INFO_VBATT_MSB_CP(chip)		(chip->batt_info_base + 0xA7)
+#define BATT_INFO_IBATT_LSB_CP(chip)		(chip->batt_info_base + 0xA8)
+#define BATT_INFO_IBATT_MSB_CP(chip)		(chip->batt_info_base + 0xA9)
+#define BATT_INFO_ESR_LSB_CP(chip)		(chip->batt_info_base + 0xAA)
+#define BATT_INFO_ESR_MSB_CP(chip)		(chip->batt_info_base + 0xAB)
+#define BATT_INFO_VADC_LSB(chip)		(chip->batt_info_base + 0xAC)
+#define BATT_INFO_VADC_MSB(chip)		(chip->batt_info_base + 0xAD)
+#define BATT_INFO_IADC_LSB(chip)		(chip->batt_info_base + 0xAE)
+#define BATT_INFO_IADC_MSB(chip)		(chip->batt_info_base + 0xAF)
+#define BATT_INFO_TM_MISC(chip)			(chip->batt_info_base + 0xE5)
+#define BATT_INFO_TM_MISC1(chip)		(chip->batt_info_base + 0xE6)
+
+/* BATT_INFO_BATT_TEMP_STS */
+#define JEITA_TOO_HOT_STS_BIT			BIT(7)
+#define JEITA_HOT_STS_BIT			BIT(6)
+#define JEITA_COLD_STS_BIT			BIT(5)
+#define JEITA_TOO_COLD_STS_BIT			BIT(4)
+#define BATT_TEMP_DELTA_BIT			BIT(1)
+#define BATT_TEMP_AVAIL_BIT			BIT(0)
+
+/* BATT_INFO_SYS_BATT */
+#define BATT_REM_LATCH_STS_BIT			BIT(4)
+#define BATT_MISSING_HW_BIT			BIT(2)
+#define BATT_MISSING_ALG_BIT			BIT(1)
+#define BATT_MISSING_CMP_BIT			BIT(0)
+
+/* BATT_INFO_FG_STS */
+#define FG_WD_RESET_BIT				BIT(7)
+/* This bit is not present in v1.1 */
+#define FG_CRG_TRM_BIT				BIT(0)
+
+/* BATT_INFO_INT_RT_STS */
+#define BT_TMPR_DELTA_BIT			BIT(6)
+#define WDOG_EXP_BIT				BIT(5)
+#define BT_ATTN_BIT				BIT(4)
+#define BT_MISS_BIT				BIT(3)
+#define ESR_DELTA_BIT				BIT(2)
+#define VBT_LOW_BIT				BIT(1)
+#define VBT_PRD_DELTA_BIT			BIT(0)
+
+/* BATT_INFO_INT_RT_STS */
+#define BATT_REM_LATCH_CLR_BIT			BIT(7)
+
+/* BATT_INFO_BATT_TEMP_LSB/MSB */
+#define BATT_TEMP_LSB_MASK			GENMASK(7, 0)
+#define BATT_TEMP_MSB_MASK			GENMASK(2, 0)
+
+/* BATT_INFO_BATT_TEMP_CFG */
+#define JEITA_TEMP_HYST_MASK			GENMASK(5, 4)
+#define JEITA_TEMP_HYST_SHIFT			4
+#define JEITA_TEMP_NO_HYST			0x0
+#define JEITA_TEMP_HYST_1C			0x1
+#define JEITA_TEMP_HYST_2C			0x2
+#define JEITA_TEMP_HYST_3C			0x3
+
+/* BATT_INFO_BATT_TMPR_INTR */
+#define CHANGE_THOLD_MASK			GENMASK(1, 0)
+#define BTEMP_DELTA_2K				0x0
+#define BTEMP_DELTA_4K				0x1
+#define BTEMP_DELTA_6K				0x2
+#define BTEMP_DELTA_10K				0x3
+
+/* BATT_INFO_THERM_C1/C2/C3 */
+#define BATT_INFO_THERM_COEFF_MASK		GENMASK(7, 0)
+
+/* BATT_INFO_THERM_HALF_RANGE */
+#define BATT_INFO_THERM_TEMP_MASK		GENMASK(7, 0)
+
+/* BATT_INFO_JEITA_CTLS */
+#define JEITA_STS_CLEAR_BIT			BIT(0)
+
+/* BATT_INFO_JEITA_TOO_COLD/COLD/HOT/TOO_HOT */
+#define JEITA_THOLD_MASK			GENMASK(7, 0)
+
+/* BATT_INFO_ESR_CFG */
+#define CFG_ACTIVE_PD_MASK			GENMASK(2, 1)
+#define CFG_FCC_DEC_MASK			GENMASK(4, 3)
+
+/* BATT_INFO_ESR_GENERAL_CFG */
+#define ESR_DEEP_TAPER_EN_BIT			BIT(0)
+
+/* BATT_INFO_ESR_PULL_DN_CFG */
+#define ESR_PULL_DOWN_IVAL_MASK			GENMASK(3, 2)
+#define ESR_MEAS_CUR_60MA			0x0
+#define ESR_MEAS_CUR_120MA			0x1
+#define ESR_MEAS_CUR_180MA			0x2
+#define ESR_MEAS_CUR_240MA			0x3
+#define ESR_PULL_DOWN_MODE_MASK			GENMASK(1, 0)
+#define ESR_NO_PULL_DOWN			0x0
+#define ESR_STATIC_PULL_DOWN			0x1
+#define ESR_CRG_DSC_PULL_DOWN			0x2
+#define ESR_DSC_PULL_DOWN			0x3
+
+/* BATT_INFO_ESR_FAST_CRG_CFG */
+#define ESR_FAST_CRG_IVAL_MASK			GENMASK(3, 1)
+#define ESR_FCC_300MA				0x0
+#define ESR_FCC_600MA				0x1
+#define ESR_FCC_1A				0x2
+#define ESR_FCC_2A				0x3
+#define ESR_FCC_3A				0x4
+#define ESR_FCC_4A				0x5
+#define ESR_FCC_5A				0x6
+#define ESR_FCC_6A				0x7
+#define ESR_FAST_CRG_CTL_EN_BIT			BIT(0)
+
+/* BATT_INFO_BATT_MISS_CFG */
+#define BM_THERM_TH_MASK			GENMASK(5, 4)
+#define RES_TH_0P75_MOHM			0x0
+#define RES_TH_1P00_MOHM			0x1
+#define RES_TH_1P50_MOHM			0x2
+#define RES_TH_3P00_MOHM			0x3
+#define BM_BATT_ID_TH_MASK			GENMASK(3, 2)
+#define BM_FROM_THERM_BIT			BIT(1)
+#define BM_FROM_BATT_ID_BIT			BIT(0)
+
+/* BATT_INFO_WATCHDOG_COUNT */
+#define WATCHDOG_COUNTER			GENMASK(7, 0)
+
+/* BATT_INFO_WATCHDOG_CFG */
+#define RESET_CAPABLE_BIT			BIT(2)
+#define PET_CTRL_BIT				BIT(1)
+#define ENABLE_CTRL_BIT				BIT(0)
+
+/* BATT_INFO_IBATT_SENSING_CFG */
+#define ADC_BITSTREAM_INV_BIT			BIT(4)
+#define SOURCE_SELECT_MASK			GENMASK(1, 0)
+#define SRC_SEL_BATFET				0x0
+#define SRC_SEL_BATFET_SMB			0x2
+#define SRC_SEL_RESERVED			0x3
+
+/* BATT_INFO_QNOVO_CFG */
+#define LD_REG_FORCE_CTL_BIT			BIT(2)
+#define LD_REG_CTRL_FORCE_HIGH			LD_REG_FORCE_CTL_BIT
+#define LD_REG_CTRL_FORCE_LOW			0
+#define LD_REG_CTRL_BIT				BIT(1)
+#define LD_REG_CTRL_REGISTER			LD_REG_CTRL_BIT
+#define LD_REG_CTRL_LOGIC			0
+#define BIT_STREAM_CFG_BIT			BIT(0)
+
+/* BATT_INFO_QNOVO_SCALER */
+#define QNOVO_SCALER_MASK			GENMASK(7, 0)
+
+/* BATT_INFO_CRG_SERVICES */
+#define FG_CRC_TRM_EN_BIT			BIT(0)
+
+/* BATT_INFO_VBATT_LSB/MSB */
+#define VBATT_MASK				GENMASK(7, 0)
+
+/* BATT_INFO_IBATT_LSB/MSB */
+#define IBATT_MASK				GENMASK(7, 0)
+
+/* BATT_INFO_ESR_LSB/MSB */
+#define ESR_LSB_MASK				GENMASK(7, 0)
+#define ESR_MSB_MASK				GENMASK(5, 0)
+
+/* BATT_INFO_VADC_LSB/MSB */
+#define VADC_LSB_MASK				GENMASK(7, 0)
+#define VADC_MSB_MASK				GENMASK(6, 0)
+
+/* BATT_INFO_IADC_LSB/MSB */
+#define IADC_LSB_MASK				GENMASK(7, 0)
+#define IADC_MSB_MASK				GENMASK(6, 0)
+
+/* BATT_INFO_TM_MISC */
+#define FORCE_SEQ_RESP_TOGGLE_BIT		BIT(6)
+#define ALG_DIRECT_VALID_DATA_BIT		BIT(5)
+#define ALG_DIRECT_MODE_EN_BIT			BIT(4)
+#define BATT_VADC_CONV_BIT			BIT(3)
+#define BATT_IADC_CONV_BIT			BIT(2)
+#define ADC_ENABLE_REG_CTRL_BIT			BIT(1)
+#define WDOG_FORCE_EXP_BIT			BIT(0)
+/* only for v1.1 */
+#define ESR_PULSE_FORCE_CTRL_BIT		BIT(7)
+
+/* BATT_INFO_TM_MISC1 */
+/* for v2.0 and above */
+#define ESR_REQ_CTL_BIT				BIT(1)
+#define ESR_REQ_CTL_EN_BIT			BIT(0)
+
+/* FG_MEM_IF register and bit definitions */
+#define MEM_IF_INT_RT_STS(chip)			((chip->mem_if_base) + 0x10)
+#define MEM_IF_MEM_INTF_CFG(chip)		((chip->mem_if_base) + 0x50)
+#define MEM_IF_IMA_CTL(chip)			((chip->mem_if_base) + 0x51)
+#define MEM_IF_IMA_CFG(chip)			((chip->mem_if_base) + 0x52)
+#define MEM_IF_IMA_OPR_STS(chip)		((chip->mem_if_base) + 0x54)
+#define MEM_IF_IMA_EXP_STS(chip)		((chip->mem_if_base) + 0x55)
+#define MEM_IF_IMA_HW_STS(chip)			((chip->mem_if_base) + 0x56)
+#define MEM_IF_FG_BEAT_COUNT(chip)		((chip->mem_if_base) + 0x57)
+#define MEM_IF_IMA_ERR_STS(chip)		((chip->mem_if_base) + 0x5F)
+#define MEM_IF_IMA_BYTE_EN(chip)		((chip->mem_if_base) + 0x60)
+#define MEM_IF_ADDR_LSB(chip)			((chip->mem_if_base) + 0x61)
+#define MEM_IF_ADDR_MSB(chip)			((chip->mem_if_base) + 0x62)
+#define MEM_IF_WR_DATA0(chip)			((chip->mem_if_base) + 0x63)
+#define MEM_IF_WR_DATA3(chip)			((chip->mem_if_base) + 0x66)
+#define MEM_IF_RD_DATA0(chip)			((chip->mem_if_base) + 0x67)
+#define MEM_IF_RD_DATA3(chip)			((chip->mem_if_base) + 0x6A)
+#define MEM_IF_DMA_STS(chip)			((chip->mem_if_base) + 0x70)
+#define MEM_IF_DMA_CTL(chip)			((chip->mem_if_base) + 0x71)
+
+/* MEM_IF_INT_RT_STS */
+#define MEM_XCP_BIT				BIT(1)
+
+/* MEM_IF_MEM_INTF_CFG */
+#define MEM_ACCESS_REQ_BIT			BIT(7)
+#define IACS_SLCT_BIT				BIT(5)
+
+/* MEM_IF_IMA_CTL */
+#define MEM_ACS_BURST_BIT			BIT(7)
+#define IMA_WR_EN_BIT				BIT(6)
+#define IMA_CTL_MASK				GENMASK(7, 6)
+
+/* MEM_IF_IMA_CFG */
+#define IACS_CLR_BIT				BIT(2)
+#define IACS_INTR_SRC_SLCT_BIT			BIT(3)
+#define STATIC_CLK_EN_BIT			BIT(4)
+
+/* MEM_IF_IMA_OPR_STS */
+#define IACS_RDY_BIT				BIT(1)
+
+/* MEM_IF_IMA_EXP_STS */
+#define IACS_ERR_BIT				BIT(0)
+#define XCT_TYPE_ERR_BIT			BIT(1)
+#define DATA_RD_ERR_BIT				BIT(3)
+#define DATA_WR_ERR_BIT				BIT(4)
+#define ADDR_BURST_WRAP_BIT			BIT(5)
+#define ADDR_STABLE_ERR_BIT			BIT(7)
+
+/* MEM_IF_IMA_ERR_STS */
+#define ADDR_STBL_ERR_BIT			BIT(7)
+#define WR_ACS_ERR_BIT				BIT(6)
+#define RD_ACS_ERR_BIT				BIT(5)
+
+/* MEM_IF_FG_BEAT_COUNT */
+#define BEAT_COUNT_MASK				GENMASK(3, 0)
+
+/* MEM_IF_DMA_STS */
+#define DMA_WRITE_ERROR_BIT			BIT(1)
+#define DMA_READ_ERROR_BIT			BIT(2)
+
+/* MEM_IF_DMA_CTL */
+#define DMA_CLEAR_LOG_BIT			BIT(0)
+#endif
diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c
new file mode 100644
index 0000000..839a771
--- /dev/null
+++ b/drivers/power/supply/qcom/fg-util.c
@@ -0,0 +1,901 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "fg-core.h"
+
+void fg_circ_buf_add(struct fg_circ_buf *buf, int val)
+{
+	buf->arr[buf->head] = val;
+	buf->head = (buf->head + 1) % ARRAY_SIZE(buf->arr);
+	buf->size = min(++buf->size, (int)ARRAY_SIZE(buf->arr));
+}
+
+void fg_circ_buf_clr(struct fg_circ_buf *buf)
+{
+	memset(buf, 0, sizeof(*buf));
+}
+
+int fg_circ_buf_avg(struct fg_circ_buf *buf, int *avg)
+{
+	s64 result = 0;
+	int i;
+
+	if (buf->size == 0)
+		return -ENODATA;
+
+	for (i = 0; i < buf->size; i++)
+		result += buf->arr[i];
+
+	*avg = div_s64(result, buf->size);
+	return 0;
+}
+
+int fg_lerp(const struct fg_pt *pts, size_t tablesize, s32 input, s32 *output)
+{
+	int i;
+	s64 temp;
+
+	if (pts == NULL) {
+		pr_err("Table is NULL\n");
+		return -EINVAL;
+	}
+
+	if (tablesize < 1) {
+		pr_err("Table has no entries\n");
+		return -ENOENT;
+	}
+
+	if (tablesize == 1) {
+		*output = pts[0].y;
+		return 0;
+	}
+
+	if (pts[0].x > pts[1].x) {
+		pr_err("Table is not in acending order\n");
+		return -EINVAL;
+	}
+
+	if (input <= pts[0].x) {
+		*output = pts[0].y;
+		return 0;
+	}
+
+	if (input >= pts[tablesize - 1].x) {
+		*output = pts[tablesize - 1].y;
+		return 0;
+	}
+
+	for (i = 1; i < tablesize; i++) {
+		if (input >= pts[i].x)
+			continue;
+
+		temp = (s64)(pts[i].y - pts[i - 1].y) *
+						(s64)(input - pts[i - 1].x);
+		temp = div_s64(temp, pts[i].x - pts[i - 1].x);
+		*output = temp + pts[i - 1].y;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static struct fg_dbgfs dbgfs_data = {
+	.help_msg = {
+	.data =
+	"FG Debug-FS support\n"
+	"\n"
+	"Hierarchy schema:\n"
+	"/sys/kernel/debug/fg_sram\n"
+	"       /help            -- Static help text\n"
+	"       /address  -- Starting register address for reads or writes\n"
+	"       /count    -- Number of registers to read (only used for reads)\n"
+	"       /data     -- Initiates the SRAM read (formatted output)\n"
+	"\n",
+	},
+};
+
+static bool is_usb_present(struct fg_chip *chip)
+{
+	union power_supply_propval pval = {0, };
+
+	if (!chip->usb_psy)
+		chip->usb_psy = power_supply_get_by_name("usb");
+
+	if (chip->usb_psy)
+		power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_PRESENT, &pval);
+	else
+		return false;
+
+	return pval.intval != 0;
+}
+
+static bool is_dc_present(struct fg_chip *chip)
+{
+	union power_supply_propval pval = {0, };
+
+	if (!chip->dc_psy)
+		chip->dc_psy = power_supply_get_by_name("dc");
+
+	if (chip->dc_psy)
+		power_supply_get_property(chip->dc_psy,
+				POWER_SUPPLY_PROP_PRESENT, &pval);
+	else
+		return false;
+
+	return pval.intval != 0;
+}
+
+bool is_input_present(struct fg_chip *chip)
+{
+	return is_usb_present(chip) || is_dc_present(chip);
+}
+
+#define EXPONENT_SHIFT		11
+#define EXPONENT_OFFSET		-9
+#define MANTISSA_SIGN_BIT	10
+#define MICRO_UNIT		1000000
+s64 fg_float_decode(u16 val)
+{
+	s8 exponent;
+	s32 mantissa;
+
+	/* mantissa bits are shifted out during sign extension */
+	exponent = ((s16)val >> EXPONENT_SHIFT) + EXPONENT_OFFSET;
+	/* exponent bits are shifted out during sign extension */
+	mantissa = sign_extend32(val, MANTISSA_SIGN_BIT) * MICRO_UNIT;
+
+	if (exponent < 0)
+		return (s64)mantissa >> -exponent;
+
+	return (s64)mantissa << exponent;
+}
+
+void fill_string(char *str, size_t str_len, u8 *buf, int buf_len)
+{
+	int pos = 0;
+	int i;
+
+	for (i = 0; i < buf_len; i++) {
+		pos += scnprintf(str + pos, str_len - pos, "%02x", buf[i]);
+		if (i < buf_len - 1)
+			pos += scnprintf(str + pos, str_len - pos, " ");
+	}
+}
+
+void dump_sram(u8 *buf, int addr, int len)
+{
+	int i;
+	char str[16];
+
+	/*
+	 * Length passed should be in multiple of 4 as each FG SRAM word
+	 * holds 4 bytes. To keep this simple, even if a length which is
+	 * not a multiple of 4 bytes or less than 4 bytes is passed, SRAM
+	 * registers dumped will be always in multiple of 4 bytes.
+	 */
+	for (i = 0; i < len; i += 4) {
+		str[0] = '\0';
+		fill_string(str, sizeof(str), buf + i, 4);
+		pr_info("%03d %s\n", addr + (i / 4), str);
+	}
+}
+
+static inline bool fg_sram_address_valid(u16 address, int len)
+{
+	if (address > FG_SRAM_ADDRESS_MAX)
+		return false;
+
+	if ((address + DIV_ROUND_UP(len, 4)) > FG_SRAM_ADDRESS_MAX + 1)
+		return false;
+
+	return true;
+}
+
+#define SOC_UPDATE_WAIT_MS	1500
+int fg_sram_write(struct fg_chip *chip, u16 address, u8 offset,
+			u8 *val, int len, int flags)
+{
+	int rc = 0;
+	bool tried_again = false;
+	bool atomic_access = false;
+
+	if (!chip)
+		return -ENXIO;
+
+	if (chip->battery_missing)
+		return -ENODATA;
+
+	if (!fg_sram_address_valid(address, len))
+		return -EFAULT;
+
+	if (!(flags & FG_IMA_NO_WLOCK))
+		vote(chip->awake_votable, SRAM_WRITE, true, 0);
+	mutex_lock(&chip->sram_rw_lock);
+
+	if ((flags & FG_IMA_ATOMIC) && chip->irqs[SOC_UPDATE_IRQ].irq) {
+		/*
+		 * This interrupt need to be enabled only when it is
+		 * required. It will be kept disabled other times.
+		 */
+		reinit_completion(&chip->soc_update);
+		enable_irq(chip->irqs[SOC_UPDATE_IRQ].irq);
+		atomic_access = true;
+	} else {
+		flags = FG_IMA_DEFAULT;
+	}
+wait:
+	/*
+	 * Atomic access mean waiting upon SOC_UPDATE interrupt from
+	 * FG_ALG and do the transaction after that. This is to make
+	 * sure that there will be no SOC update happening when an
+	 * IMA write is happening. SOC_UPDATE interrupt fires every
+	 * FG cycle (~1.47 seconds).
+	 */
+	if (atomic_access) {
+		/* Wait for SOC_UPDATE completion */
+		rc = wait_for_completion_interruptible_timeout(
+			&chip->soc_update,
+			msecs_to_jiffies(SOC_UPDATE_WAIT_MS));
+
+		/* If we were interrupted wait again one more time. */
+		if (rc == -ERESTARTSYS && !tried_again) {
+			tried_again = true;
+			goto wait;
+		} else if (rc <= 0) {
+			pr_err("wait for soc_update timed out rc=%d\n", rc);
+			goto out;
+		}
+	}
+
+	rc = fg_interleaved_mem_write(chip, address, offset, val, len,
+			atomic_access);
+	if (rc < 0)
+		pr_err("Error in writing SRAM address 0x%x[%d], rc=%d\n",
+			address, offset, rc);
+out:
+	if (atomic_access)
+		disable_irq_nosync(chip->irqs[SOC_UPDATE_IRQ].irq);
+
+	mutex_unlock(&chip->sram_rw_lock);
+	if (!(flags & FG_IMA_NO_WLOCK))
+		vote(chip->awake_votable, SRAM_WRITE, false, 0);
+	return rc;
+}
+
+int fg_sram_read(struct fg_chip *chip, u16 address, u8 offset,
+			u8 *val, int len, int flags)
+{
+	int rc = 0;
+
+	if (!chip)
+		return -ENXIO;
+
+	if (chip->battery_missing)
+		return -ENODATA;
+
+	if (!fg_sram_address_valid(address, len))
+		return -EFAULT;
+
+	if (!(flags & FG_IMA_NO_WLOCK))
+		vote(chip->awake_votable, SRAM_READ, true, 0);
+	mutex_lock(&chip->sram_rw_lock);
+
+	rc = fg_interleaved_mem_read(chip, address, offset, val, len);
+	if (rc < 0)
+		pr_err("Error in reading SRAM address 0x%x[%d], rc=%d\n",
+			address, offset, rc);
+
+	mutex_unlock(&chip->sram_rw_lock);
+	if (!(flags & FG_IMA_NO_WLOCK))
+		vote(chip->awake_votable, SRAM_READ, false, 0);
+	return rc;
+}
+
+int fg_sram_masked_write(struct fg_chip *chip, u16 address, u8 offset,
+			u8 mask, u8 val, int flags)
+{
+	int rc = 0;
+	u8 buf[4];
+
+	rc = fg_sram_read(chip, address, 0, buf, 4, flags);
+	if (rc < 0) {
+		pr_err("sram read failed: address=%03X, rc=%d\n", address, rc);
+		return rc;
+	}
+
+	buf[offset] &= ~mask;
+	buf[offset] |= val & mask;
+
+	rc = fg_sram_write(chip, address, 0, buf, 4, flags);
+	if (rc < 0) {
+		pr_err("sram write failed: address=%03X, rc=%d\n", address, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int fg_read(struct fg_chip *chip, int addr, u8 *val, int len)
+{
+	int rc, i;
+
+	if (!chip || !chip->regmap)
+		return -ENXIO;
+
+	rc = regmap_bulk_read(chip->regmap, addr, val, len);
+
+	if (rc < 0) {
+		dev_err(chip->dev, "regmap_read failed for address %04x rc=%d\n",
+			addr, rc);
+		return rc;
+	}
+
+	if (*chip->debug_mask & FG_BUS_READ) {
+		pr_info("length %d addr=%04x\n", len, addr);
+		for (i = 0; i < len; i++)
+			pr_info("val[%d]: %02x\n", i, val[i]);
+	}
+
+	return 0;
+}
+
+int fg_write(struct fg_chip *chip, int addr, u8 *val, int len)
+{
+	int rc, i;
+	bool sec_access = false;
+
+	if (!chip || !chip->regmap)
+		return -ENXIO;
+
+	mutex_lock(&chip->bus_lock);
+	sec_access = (addr & 0x00FF) > 0xD0;
+	if (sec_access) {
+		rc = regmap_write(chip->regmap, (addr & 0xFF00) | 0xD0, 0xA5);
+		if (rc < 0) {
+			dev_err(chip->dev, "regmap_write failed for address %x rc=%d\n",
+				addr, rc);
+			goto out;
+		}
+	}
+
+	if (len > 1)
+		rc = regmap_bulk_write(chip->regmap, addr, val, len);
+	else
+		rc = regmap_write(chip->regmap, addr, *val);
+
+	if (rc < 0) {
+		dev_err(chip->dev, "regmap_write failed for address %04x rc=%d\n",
+			addr, rc);
+		goto out;
+	}
+
+	if (*chip->debug_mask & FG_BUS_WRITE) {
+		pr_info("length %d addr=%04x\n", len, addr);
+		for (i = 0; i < len; i++)
+			pr_info("val[%d]: %02x\n", i, val[i]);
+	}
+out:
+	mutex_unlock(&chip->bus_lock);
+	return rc;
+}
+
+int fg_masked_write(struct fg_chip *chip, int addr, u8 mask, u8 val)
+{
+	int rc;
+	bool sec_access = false;
+
+	if (!chip || !chip->regmap)
+		return -ENXIO;
+
+	mutex_lock(&chip->bus_lock);
+	sec_access = (addr & 0x00FF) > 0xD0;
+	if (sec_access) {
+		rc = regmap_write(chip->regmap, (addr & 0xFF00) | 0xD0, 0xA5);
+		if (rc < 0) {
+			dev_err(chip->dev, "regmap_write failed for address %x rc=%d\n",
+				addr, rc);
+			goto out;
+		}
+	}
+
+	rc = regmap_update_bits(chip->regmap, addr, mask, val);
+	if (rc < 0) {
+		dev_err(chip->dev, "regmap_update_bits failed for address %04x rc=%d\n",
+			addr, rc);
+		goto out;
+	}
+
+	fg_dbg(chip, FG_BUS_WRITE, "addr=%04x mask: %02x val: %02x\n", addr,
+		mask, val);
+out:
+	mutex_unlock(&chip->bus_lock);
+	return rc;
+}
+
+int64_t twos_compliment_extend(int64_t val, int sign_bit_pos)
+{
+	int i, nbytes = DIV_ROUND_UP(sign_bit_pos, 8);
+	int64_t mask, val_out;
+
+	val_out = val;
+	mask = 1 << sign_bit_pos;
+	if (val & mask) {
+		for (i = 8; i > nbytes; i--) {
+			mask = 0xFFLL << ((i - 1) * 8);
+			val_out |= mask;
+		}
+
+		if ((nbytes * 8) - 1 > sign_bit_pos) {
+			mask = 1 << sign_bit_pos;
+			for (i = 1; i <= (nbytes * 8) - sign_bit_pos; i++)
+				val_out |= mask << i;
+		}
+	}
+
+	pr_debug("nbytes: %d val: %llx val_out: %llx\n", nbytes, val, val_out);
+	return val_out;
+}
+
+/* All the debugfs related functions are defined below */
+static int fg_sram_dfs_open(struct inode *inode, struct file *file)
+{
+	struct fg_log_buffer *log;
+	struct fg_trans *trans;
+	u8 *data_buf;
+
+	size_t logbufsize = SZ_4K;
+	size_t databufsize = SZ_4K;
+
+	if (!dbgfs_data.chip) {
+		pr_err("Not initialized data\n");
+		return -EINVAL;
+	}
+
+	/* Per file "transaction" data */
+	trans = devm_kzalloc(dbgfs_data.chip->dev, sizeof(*trans), GFP_KERNEL);
+	if (!trans)
+		return -ENOMEM;
+
+	/* Allocate log buffer */
+	log = devm_kzalloc(dbgfs_data.chip->dev, logbufsize, GFP_KERNEL);
+	if (!log)
+		return -ENOMEM;
+
+	log->rpos = 0;
+	log->wpos = 0;
+	log->len = logbufsize - sizeof(*log);
+
+	/* Allocate data buffer */
+	data_buf = devm_kzalloc(dbgfs_data.chip->dev, databufsize, GFP_KERNEL);
+	if (!data_buf)
+		return -ENOMEM;
+
+	trans->log = log;
+	trans->data = data_buf;
+	trans->cnt = dbgfs_data.cnt;
+	trans->addr = dbgfs_data.addr;
+	trans->chip = dbgfs_data.chip;
+	trans->offset = trans->addr;
+	mutex_init(&trans->fg_dfs_lock);
+
+	file->private_data = trans;
+	return 0;
+}
+
+static int fg_sram_dfs_close(struct inode *inode, struct file *file)
+{
+	struct fg_trans *trans = file->private_data;
+
+	if (trans && trans->log && trans->data) {
+		file->private_data = NULL;
+		mutex_destroy(&trans->fg_dfs_lock);
+		devm_kfree(trans->chip->dev, trans->log);
+		devm_kfree(trans->chip->dev, trans->data);
+		devm_kfree(trans->chip->dev, trans);
+	}
+
+	return 0;
+}
+
+/**
+ * print_to_log: format a string and place into the log buffer
+ * @log: The log buffer to place the result into.
+ * @fmt: The format string to use.
+ * @...: The arguments for the format string.
+ *
+ * The return value is the number of characters written to @log buffer
+ * not including the trailing '\0'.
+ */
+static int print_to_log(struct fg_log_buffer *log, const char *fmt, ...)
+{
+	va_list args;
+	int cnt;
+	char *buf = &log->data[log->wpos];
+	size_t size = log->len - log->wpos;
+
+	va_start(args, fmt);
+	cnt = vscnprintf(buf, size, fmt, args);
+	va_end(args);
+
+	log->wpos += cnt;
+	return cnt;
+}
+
+/**
+ * write_next_line_to_log: Writes a single "line" of data into the log buffer
+ * @trans: Pointer to SRAM transaction data.
+ * @offset: SRAM address offset to start reading from.
+ * @pcnt: Pointer to 'cnt' variable.  Indicates the number of bytes to read.
+ *
+ * The 'offset' is a 12-bit SRAM address.
+ *
+ * On a successful read, the pcnt is decremented by the number of data
+ * bytes read from the SRAM.  When the cnt reaches 0, all requested bytes have
+ * been read.
+ */
+static int write_next_line_to_log(struct fg_trans *trans, int offset,
+				size_t *pcnt)
+{
+	int i;
+	u8 data[ITEMS_PER_LINE];
+	u16 address;
+	struct fg_log_buffer *log = trans->log;
+	int cnt = 0;
+	int items_to_read = min(ARRAY_SIZE(data), *pcnt);
+	int items_to_log = min(ITEMS_PER_LINE, items_to_read);
+
+	/* Buffer needs enough space for an entire line */
+	if ((log->len - log->wpos) < MAX_LINE_LENGTH)
+		goto done;
+
+	memcpy(data, trans->data + (offset - trans->addr), items_to_read);
+	*pcnt -= items_to_read;
+
+	/* address is in word now and it increments by 1. */
+	address = trans->addr + ((offset - trans->addr) / ITEMS_PER_LINE);
+	cnt = print_to_log(log, "%3.3d ", address & 0xfff);
+	if (cnt == 0)
+		goto done;
+
+	/* Log the data items */
+	for (i = 0; i < items_to_log; ++i) {
+		cnt = print_to_log(log, "%2.2X ", data[i]);
+		if (cnt == 0)
+			goto done;
+	}
+
+	/* If the last character was a space, then replace it with a newline */
+	if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+		log->data[log->wpos - 1] = '\n';
+
+done:
+	return cnt;
+}
+
+/**
+ * get_log_data - reads data from SRAM and saves to the log buffer
+ * @trans: Pointer to SRAM transaction data.
+ *
+ * Returns the number of "items" read or SPMI error code for read failures.
+ */
+static int get_log_data(struct fg_trans *trans)
+{
+	int cnt, rc;
+	int last_cnt;
+	int items_read;
+	int total_items_read = 0;
+	u32 offset = trans->offset;
+	size_t item_cnt = trans->cnt;
+	struct fg_log_buffer *log = trans->log;
+
+	if (item_cnt == 0)
+		return 0;
+
+	if (item_cnt > SZ_4K) {
+		pr_err("Reading too many bytes\n");
+		return -EINVAL;
+	}
+
+	pr_debug("addr: %d offset: %d count: %d\n", trans->addr, trans->offset,
+		trans->cnt);
+	rc = fg_sram_read(trans->chip, trans->addr, 0,
+			trans->data, trans->cnt, 0);
+	if (rc < 0) {
+		pr_err("SRAM read failed: rc = %d\n", rc);
+		return rc;
+	}
+	/* Reset the log buffer 'pointers' */
+	log->wpos = log->rpos = 0;
+
+	/* Keep reading data until the log is full */
+	do {
+		last_cnt = item_cnt;
+		cnt = write_next_line_to_log(trans, offset, &item_cnt);
+		items_read = last_cnt - item_cnt;
+		offset += items_read;
+		total_items_read += items_read;
+	} while (cnt && item_cnt > 0);
+
+	/* Adjust the transaction offset and count */
+	trans->cnt = item_cnt;
+	trans->offset += total_items_read;
+
+	return total_items_read;
+}
+
+/**
+ * fg_sram_dfs_reg_read: reads value(s) from SRAM and fills user's buffer a
+ *  byte array (coded as string)
+ * @file: file pointer
+ * @buf: where to put the result
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user bytes read, or negative error value
+ */
+static ssize_t fg_sram_dfs_reg_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	struct fg_trans *trans = file->private_data;
+	struct fg_log_buffer *log = trans->log;
+	size_t ret;
+	size_t len;
+
+	mutex_lock(&trans->fg_dfs_lock);
+	/* Is the the log buffer empty */
+	if (log->rpos >= log->wpos) {
+		if (get_log_data(trans) <= 0) {
+			len = 0;
+			goto unlock_mutex;
+		}
+	}
+
+	len = min(count, log->wpos - log->rpos);
+
+	ret = copy_to_user(buf, &log->data[log->rpos], len);
+	if (ret == len) {
+		pr_err("error copy sram register values to user\n");
+		len = -EFAULT;
+		goto unlock_mutex;
+	}
+
+	/* 'ret' is the number of bytes not copied */
+	len -= ret;
+
+	*ppos += len;
+	log->rpos += len;
+
+unlock_mutex:
+	mutex_unlock(&trans->fg_dfs_lock);
+	return len;
+}
+
+/**
+ * fg_sram_dfs_reg_write: write user's byte array (coded as string) to SRAM.
+ * @file: file pointer
+ * @buf: user data to be written.
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user byte written, or negative error value
+ */
+static ssize_t fg_sram_dfs_reg_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	int bytes_read;
+	int data;
+	int pos = 0;
+	int cnt = 0;
+	u8  *values;
+	char *kbuf;
+	size_t ret = 0;
+	struct fg_trans *trans = file->private_data;
+	u32 address = trans->addr;
+
+	mutex_lock(&trans->fg_dfs_lock);
+	/* Make a copy of the user data */
+	kbuf = kmalloc(count + 1, GFP_KERNEL);
+	if (!kbuf) {
+		ret = -ENOMEM;
+		goto unlock_mutex;
+	}
+
+	ret = copy_from_user(kbuf, buf, count);
+	if (ret == count) {
+		pr_err("failed to copy data from user\n");
+		ret = -EFAULT;
+		goto free_buf;
+	}
+
+	count -= ret;
+	*ppos += count;
+	kbuf[count] = '\0';
+
+	/* Override the text buffer with the raw data */
+	values = kbuf;
+
+	/* Parse the data in the buffer.  It should be a string of numbers */
+	while ((pos < count) &&
+		sscanf(kbuf + pos, "%i%n", &data, &bytes_read) == 1) {
+		/*
+		 * We shouldn't be receiving a string of characters that
+		 * exceeds a size of 5 to keep this functionally correct.
+		 * Also, we should make sure that pos never gets overflowed
+		 * beyond the limit.
+		 */
+		if (bytes_read > 5 || bytes_read > INT_MAX - pos) {
+			cnt = 0;
+			ret = -EINVAL;
+			break;
+		}
+		pos += bytes_read;
+		values[cnt++] = data & 0xff;
+	}
+
+	if (!cnt)
+		goto free_buf;
+
+	pr_debug("address %d, count %d\n", address, cnt);
+	/* Perform the write(s) */
+
+	ret = fg_sram_write(trans->chip, address, 0, values, cnt, 0);
+	if (ret) {
+		pr_err("SRAM write failed, err = %zu\n", ret);
+	} else {
+		ret = count;
+		trans->offset += cnt > 4 ? 4 : cnt;
+	}
+
+free_buf:
+	kfree(kbuf);
+unlock_mutex:
+	mutex_unlock(&trans->fg_dfs_lock);
+	return ret;
+}
+
+static const struct file_operations fg_sram_dfs_reg_fops = {
+	.open		= fg_sram_dfs_open,
+	.release	= fg_sram_dfs_close,
+	.read		= fg_sram_dfs_reg_read,
+	.write		= fg_sram_dfs_reg_write,
+};
+
+/*
+ * fg_debugfs_create: adds new fg_sram debugfs entry
+ * @return zero on success
+ */
+static int fg_sram_debugfs_create(struct fg_chip *chip)
+{
+	struct dentry *dfs_sram;
+	struct dentry *file;
+	mode_t dfs_mode = 0600;
+
+	pr_debug("Creating FG_SRAM debugfs file-system\n");
+	dfs_sram = debugfs_create_dir("sram", chip->dfs_root);
+	if (!dfs_sram) {
+		pr_err("error creating fg sram dfs rc=%ld\n",
+		       (long)dfs_sram);
+		return -ENOMEM;
+	}
+
+	dbgfs_data.help_msg.size = strlen(dbgfs_data.help_msg.data);
+	file = debugfs_create_blob("help", 0444, dfs_sram,
+					&dbgfs_data.help_msg);
+	if (!file) {
+		pr_err("error creating help entry\n");
+		goto err_remove_fs;
+	}
+
+	dbgfs_data.chip = chip;
+
+	file = debugfs_create_u32("count", dfs_mode, dfs_sram,
+					&(dbgfs_data.cnt));
+	if (!file) {
+		pr_err("error creating 'count' entry\n");
+		goto err_remove_fs;
+	}
+
+	file = debugfs_create_x32("address", dfs_mode, dfs_sram,
+					&(dbgfs_data.addr));
+	if (!file) {
+		pr_err("error creating 'address' entry\n");
+		goto err_remove_fs;
+	}
+
+	file = debugfs_create_file("data", dfs_mode, dfs_sram, &dbgfs_data,
+					&fg_sram_dfs_reg_fops);
+	if (!file) {
+		pr_err("error creating 'data' entry\n");
+		goto err_remove_fs;
+	}
+
+	return 0;
+
+err_remove_fs:
+	debugfs_remove_recursive(dfs_sram);
+	return -ENOMEM;
+}
+
+static int fg_alg_flags_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t fg_alg_flags_read(struct file *file, char __user *userbuf,
+				 size_t count, loff_t *ppos)
+{
+	struct fg_chip *chip = file->private_data;
+	char buf[512];
+	u8 alg_flags = 0;
+	int rc, i, len;
+
+	rc = fg_sram_read(chip, chip->sp[FG_SRAM_ALG_FLAGS].addr_word,
+			  chip->sp[FG_SRAM_ALG_FLAGS].addr_byte, &alg_flags, 1,
+			  FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("failed to read algorithm flags rc=%d\n", rc);
+		return -EFAULT;
+	}
+
+	len = 0;
+	for (i = 0; i < ALG_FLAG_MAX; ++i) {
+		if (len > ARRAY_SIZE(buf) - 1)
+			return -EFAULT;
+		if (chip->alg_flags[i].invalid)
+			continue;
+
+		len += snprintf(buf + len, sizeof(buf) - sizeof(*buf) * len,
+				"%s = %d\n", chip->alg_flags[i].name,
+				(bool)(alg_flags & chip->alg_flags[i].bit));
+	}
+
+	return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fg_alg_flags_fops = {
+	.open = fg_alg_flags_open,
+	.read = fg_alg_flags_read,
+};
+
+int fg_debugfs_create(struct fg_chip *chip)
+{
+	int rc;
+
+	pr_debug("Creating debugfs file-system\n");
+	chip->dfs_root = debugfs_create_dir("fg", NULL);
+	if (IS_ERR_OR_NULL(chip->dfs_root)) {
+		if (PTR_ERR(chip->dfs_root) == -ENODEV)
+			pr_err("debugfs is not enabled in the kernel\n");
+		else
+			pr_err("error creating fg dfs root rc=%ld\n",
+			       (long)chip->dfs_root);
+		return -ENODEV;
+	}
+
+	rc = fg_sram_debugfs_create(chip);
+	if (rc < 0) {
+		pr_err("failed to create sram dfs rc=%d\n", rc);
+		goto err_remove_fs;
+	}
+
+	if (!debugfs_create_file("alg_flags", 0400, chip->dfs_root, chip,
+				 &fg_alg_flags_fops)) {
+		pr_err("failed to create alg_flags file\n");
+		goto err_remove_fs;
+	}
+
+	return 0;
+
+err_remove_fs:
+	debugfs_remove_recursive(chip->dfs_root);
+	return -ENOMEM;
+}
diff --git a/drivers/power/supply/qcom/pmic-voter.c b/drivers/power/supply/qcom/pmic-voter.c
new file mode 100644
index 0000000..39a0dcb6
--- /dev/null
+++ b/drivers/power/supply/qcom/pmic-voter.c
@@ -0,0 +1,662 @@
+/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "pmic-voter.h"
+
+#define NUM_MAX_CLIENTS	8
+#define DEBUG_FORCE_CLIENT	"DEBUG_FORCE_CLIENT"
+
+static DEFINE_SPINLOCK(votable_list_slock);
+static LIST_HEAD(votable_list);
+
+static struct dentry *debug_root;
+
+struct client_vote {
+	bool	enabled;
+	int	value;
+};
+
+struct votable {
+	const char		*name;
+	struct list_head	list;
+	struct client_vote	votes[NUM_MAX_CLIENTS];
+	int			num_clients;
+	int			type;
+	int			effective_client_id;
+	int			effective_result;
+	struct mutex		vote_lock;
+	void			*data;
+	int			(*callback)(struct votable *votable,
+						void *data,
+						int effective_result,
+						const char *effective_client);
+	char			*client_strs[NUM_MAX_CLIENTS];
+	bool			voted_on;
+	struct dentry		*root;
+	struct dentry		*status_ent;
+	u32			force_val;
+	struct dentry		*force_val_ent;
+	bool			force_active;
+	struct dentry		*force_active_ent;
+};
+
+/**
+ * vote_set_any()
+ * @votable:	votable object
+ * @client_id:	client number of the latest voter
+ * @eff_res:	sets 0 or 1 based on the voting
+ * @eff_id:	Always returns the client_id argument
+ *
+ * Note that for SET_ANY voter, the value is always same as enabled. There is
+ * no idea of a voter abstaining from the election. Hence there is never a
+ * situation when the effective_id will be invalid, during election.
+ *
+ * Context:
+ *	Must be called with the votable->lock held
+ */
+static void vote_set_any(struct votable *votable, int client_id,
+				int *eff_res, int *eff_id)
+{
+	int i;
+
+	*eff_res = 0;
+
+	for (i = 0; i < votable->num_clients && votable->client_strs[i]; i++)
+		*eff_res |= votable->votes[i].enabled;
+
+	*eff_id = client_id;
+}
+
+/**
+ * vote_min() -
+ * @votable:	votable object
+ * @client_id:	client number of the latest voter
+ * @eff_res:	sets this to the min. of all the values amongst enabled voters.
+ *		If there is no enabled client, this is set to INT_MAX
+ * @eff_id:	sets this to the client id that has the min value amongst all
+ *		the enabled clients. If there is no enabled client, sets this
+ *		to -EINVAL
+ *
+ * Context:
+ *	Must be called with the votable->lock held
+ */
+static void vote_min(struct votable *votable, int client_id,
+				int *eff_res, int *eff_id)
+{
+	int i;
+
+	*eff_res = INT_MAX;
+	*eff_id = -EINVAL;
+	for (i = 0; i < votable->num_clients && votable->client_strs[i]; i++) {
+		if (votable->votes[i].enabled
+			&& *eff_res > votable->votes[i].value) {
+			*eff_res = votable->votes[i].value;
+			*eff_id = i;
+		}
+	}
+	if (*eff_id == -EINVAL)
+		*eff_res = -EINVAL;
+}
+
+/**
+ * vote_max() -
+ * @votable:	votable object
+ * @client_id:	client number of the latest voter
+ * @eff_res:	sets this to the max. of all the values amongst enabled voters.
+ *		If there is no enabled client, this is set to -EINVAL
+ * @eff_id:	sets this to the client id that has the max value amongst all
+ *		the enabled clients. If there is no enabled client, sets this to
+ *		-EINVAL
+ *
+ * Context:
+ *	Must be called with the votable->lock held
+ */
+static void vote_max(struct votable *votable, int client_id,
+				int *eff_res, int *eff_id)
+{
+	int i;
+
+	*eff_res = INT_MIN;
+	*eff_id = -EINVAL;
+	for (i = 0; i < votable->num_clients && votable->client_strs[i]; i++) {
+		if (votable->votes[i].enabled &&
+				*eff_res < votable->votes[i].value) {
+			*eff_res = votable->votes[i].value;
+			*eff_id = i;
+		}
+	}
+	if (*eff_id == -EINVAL)
+		*eff_res = -EINVAL;
+}
+
+static int get_client_id(struct votable *votable, const char *client_str)
+{
+	int i;
+
+	for (i = 0; i < votable->num_clients; i++) {
+		if (votable->client_strs[i]
+		 && (strcmp(votable->client_strs[i], client_str) == 0))
+			return i;
+	}
+
+	/* new client */
+	for (i = 0; i < votable->num_clients; i++) {
+		if (!votable->client_strs[i]) {
+			votable->client_strs[i]
+				= kstrdup(client_str, GFP_KERNEL);
+			if (!votable->client_strs[i])
+				return -ENOMEM;
+			return i;
+		}
+	}
+	return -EINVAL;
+}
+
+static char *get_client_str(struct votable *votable, int client_id)
+{
+	if (client_id == -EINVAL)
+		return NULL;
+
+	return votable->client_strs[client_id];
+}
+
+void lock_votable(struct votable *votable)
+{
+	mutex_lock(&votable->vote_lock);
+}
+
+void unlock_votable(struct votable *votable)
+{
+	mutex_unlock(&votable->vote_lock);
+}
+
+/**
+ * get_client_vote() -
+ * get_client_vote_locked() -
+ *		The unlocked and locked variants of getting a client's voted
+ *		value.
+ * @votable:	the votable object
+ * @client_str: client of interest
+ *
+ * Returns:
+ *	The value the client voted for. -EINVAL is returned if the client
+ *	is not enabled or the client is not found.
+ */
+int get_client_vote_locked(struct votable *votable, const char *client_str)
+{
+	int client_id = get_client_id(votable, client_str);
+
+	if (client_id < 0)
+		return -EINVAL;
+
+	if ((votable->type != VOTE_SET_ANY)
+		&& !votable->votes[client_id].enabled)
+		return -EINVAL;
+
+	return votable->votes[client_id].value;
+}
+
+int get_client_vote(struct votable *votable, const char *client_str)
+{
+	int value;
+
+	lock_votable(votable);
+	value = get_client_vote_locked(votable, client_str);
+	unlock_votable(votable);
+	return value;
+}
+
+/**
+ * get_effective_result() -
+ * get_effective_result_locked() -
+ *		The unlocked and locked variants of getting the effective value
+ *		amongst all the enabled voters.
+ *
+ * @votable:	the votable object
+ *
+ * Returns:
+ *	The effective result.
+ *	For MIN and MAX votable, returns -EINVAL when the votable
+ *	object has been created but no clients have casted their votes or
+ *	the last enabled client disables its vote.
+ *	For SET_ANY votable it returns 0 when no clients have casted their votes
+ *	because for SET_ANY there is no concept of abstaining from election. The
+ *	votes for all the clients of SET_ANY votable is defaulted to false.
+ */
+int get_effective_result_locked(struct votable *votable)
+{
+	if (votable->force_active)
+		return votable->force_val;
+
+	return votable->effective_result;
+}
+
+int get_effective_result(struct votable *votable)
+{
+	int value;
+
+	lock_votable(votable);
+	value = get_effective_result_locked(votable);
+	unlock_votable(votable);
+	return value;
+}
+
+/**
+ * get_effective_client() -
+ * get_effective_client_locked() -
+ *		The unlocked and locked variants of getting the effective client
+ *		amongst all the enabled voters.
+ *
+ * @votable:	the votable object
+ *
+ * Returns:
+ *	The effective client.
+ *	For MIN and MAX votable, returns NULL when the votable
+ *	object has been created but no clients have casted their votes or
+ *	the last enabled client disables its vote.
+ *	For SET_ANY votable it returns NULL too when no clients have casted
+ *	their votes. But for SET_ANY since there is no concept of abstaining
+ *	from election, the only client that casts a vote or the client that
+ *	caused the result to change is returned.
+ */
+const char *get_effective_client_locked(struct votable *votable)
+{
+	if (votable->force_active)
+		return DEBUG_FORCE_CLIENT;
+
+	return get_client_str(votable, votable->effective_client_id);
+}
+
+const char *get_effective_client(struct votable *votable)
+{
+	const char *client_str;
+
+	lock_votable(votable);
+	client_str = get_effective_client_locked(votable);
+	unlock_votable(votable);
+	return client_str;
+}
+
+/**
+ * vote() -
+ *
+ * @votable:	the votable object
+ * @client_str: the voting client
+ * @enabled:	This provides a means for the client to exclude himself from
+ *		election. This clients val (the next argument) will be
+ *		considered only when he has enabled his participation.
+ *		Note that this takes a differnt meaning for SET_ANY type, as
+ *		there is no concept of abstaining from participation.
+ *		Enabled is treated as the boolean value the client is voting.
+ * @val:	The vote value. This is ignored for SET_ANY votable types.
+ *		For MIN, MAX votable types this value is used as the
+ *		clients vote value when the enabled is true, this value is
+ *		ignored if enabled is false.
+ *
+ * The callback is called only when there is a change in the election results or
+ * if it is the first time someone is voting.
+ *
+ * Returns:
+ *	The return from the callback when present and needs to be called
+ *	or zero.
+ */
+int vote(struct votable *votable, const char *client_str, bool enabled, int val)
+{
+	int effective_id = -EINVAL;
+	int effective_result;
+	int client_id;
+	int rc = 0;
+	bool similar_vote = false;
+
+	lock_votable(votable);
+
+	client_id = get_client_id(votable, client_str);
+	if (client_id < 0) {
+		rc = client_id;
+		goto out;
+	}
+
+	/*
+	 * for SET_ANY the val is to be ignored, set it
+	 * to enabled so that the election still works based on
+	 * value regardless of the type
+	 */
+	if (votable->type == VOTE_SET_ANY)
+		val = enabled;
+
+	if ((votable->votes[client_id].enabled == enabled) &&
+		(votable->votes[client_id].value == val)) {
+		pr_debug("%s: %s,%d same vote %s of val=%d\n",
+				votable->name,
+				client_str, client_id,
+				enabled ? "on" : "off",
+				val);
+		similar_vote = true;
+	}
+
+	votable->votes[client_id].enabled = enabled;
+	votable->votes[client_id].value = val;
+
+	if (similar_vote && votable->voted_on) {
+		pr_debug("%s: %s,%d Ignoring similar vote %s of val=%d\n",
+			votable->name,
+			client_str, client_id, enabled ? "on" : "off", val);
+		goto out;
+	}
+
+	pr_debug("%s: %s,%d voting %s of val=%d\n",
+		votable->name,
+		client_str, client_id, enabled ? "on" : "off", val);
+	switch (votable->type) {
+	case VOTE_MIN:
+		vote_min(votable, client_id, &effective_result, &effective_id);
+		break;
+	case VOTE_MAX:
+		vote_max(votable, client_id, &effective_result, &effective_id);
+		break;
+	case VOTE_SET_ANY:
+		vote_set_any(votable, client_id,
+				&effective_result, &effective_id);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/*
+	 * Note that the callback is called with a NULL string and -EINVAL
+	 * result when there are no enabled votes
+	 */
+	if (!votable->voted_on
+			|| (effective_result != votable->effective_result)) {
+		votable->effective_client_id = effective_id;
+		votable->effective_result = effective_result;
+		pr_debug("%s: effective vote is now %d voted by %s,%d\n",
+			votable->name, effective_result,
+			get_client_str(votable, effective_id),
+			effective_id);
+		if (votable->callback && !votable->force_active)
+			rc = votable->callback(votable, votable->data,
+					effective_result,
+					get_client_str(votable, effective_id));
+	}
+
+	votable->voted_on = true;
+out:
+	unlock_votable(votable);
+	return rc;
+}
+
+int rerun_election(struct votable *votable)
+{
+	int rc = 0;
+
+	lock_votable(votable);
+	if (votable->callback)
+		rc = votable->callback(votable,
+				votable->data,
+			votable->effective_result,
+			get_client_str(votable, votable->effective_client_id));
+	unlock_votable(votable);
+	return rc;
+}
+
+struct votable *find_votable(const char *name)
+{
+	unsigned long flags;
+	struct votable *v;
+	bool found = false;
+
+	spin_lock_irqsave(&votable_list_slock, flags);
+	if (list_empty(&votable_list))
+		goto out;
+
+	list_for_each_entry(v, &votable_list, list) {
+		if (strcmp(v->name, name) == 0) {
+			found = true;
+			break;
+		}
+	}
+out:
+	spin_unlock_irqrestore(&votable_list_slock, flags);
+
+	if (found)
+		return v;
+	else
+		return NULL;
+}
+
+static int force_active_get(void *data, u64 *val)
+{
+	struct votable *votable = data;
+
+	*val = votable->force_active;
+
+	return 0;
+}
+
+static int force_active_set(void *data, u64 val)
+{
+	struct votable *votable = data;
+	int rc = 0;
+
+	lock_votable(votable);
+	votable->force_active = !!val;
+
+	if (!votable->callback)
+		goto out;
+
+	if (votable->force_active) {
+		rc = votable->callback(votable, votable->data,
+			votable->force_val,
+			DEBUG_FORCE_CLIENT);
+	} else {
+		rc = votable->callback(votable, votable->data,
+			votable->effective_result,
+			get_client_str(votable, votable->effective_client_id));
+	}
+out:
+	unlock_votable(votable);
+	return rc;
+}
+DEFINE_SIMPLE_ATTRIBUTE(votable_force_ops, force_active_get, force_active_set,
+		"%lld\n");
+
+static int show_votable_clients(struct seq_file *m, void *data)
+{
+	struct votable *votable = m->private;
+	int i;
+	char *type_str = "Unkonwn";
+	const char *effective_client_str;
+
+	lock_votable(votable);
+
+	seq_printf(m, "Votable %s:\n", votable->name);
+	seq_puts(m, "clients:\n");
+	for (i = 0; i < votable->num_clients; i++) {
+		if (votable->client_strs[i]) {
+			seq_printf(m, "%-15s:\t\ten=%d\t\tv=%d\n",
+					votable->client_strs[i],
+					votable->votes[i].enabled,
+					votable->votes[i].value);
+		}
+	}
+
+	switch (votable->type) {
+	case VOTE_MIN:
+		type_str = "Min";
+		break;
+	case VOTE_MAX:
+		type_str = "Max";
+		break;
+	case VOTE_SET_ANY:
+		type_str = "Set_any";
+		break;
+	}
+
+	seq_printf(m, "type: %s\n", type_str);
+	seq_puts(m, "Effective:\n");
+	effective_client_str = get_effective_client_locked(votable);
+	seq_printf(m, "%-15s:\t\tv=%d\n",
+			effective_client_str ? effective_client_str : "none",
+			get_effective_result_locked(votable));
+	unlock_votable(votable);
+
+	return 0;
+}
+
+static int votable_status_open(struct inode *inode, struct file *file)
+{
+	struct votable *votable = inode->i_private;
+
+	return single_open(file, show_votable_clients, votable);
+}
+
+static const struct file_operations votable_status_ops = {
+	.owner		= THIS_MODULE,
+	.open		= votable_status_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+struct votable *create_votable(const char *name,
+				int votable_type,
+				int (*callback)(struct votable *votable,
+					void *data,
+					int effective_result,
+					const char *effective_client),
+				void *data)
+{
+	struct votable *votable;
+	unsigned long flags;
+
+	votable = find_votable(name);
+	if (votable)
+		return ERR_PTR(-EEXIST);
+
+	if (debug_root == NULL) {
+		debug_root = debugfs_create_dir("pmic-votable", NULL);
+		if (!debug_root) {
+			pr_err("Couldn't create debug dir\n");
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	if (votable_type >= NUM_VOTABLE_TYPES) {
+		pr_err("Invalid votable_type specified for voter\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	votable = kzalloc(sizeof(struct votable), GFP_KERNEL);
+	if (!votable)
+		return ERR_PTR(-ENOMEM);
+
+	votable->name = kstrdup(name, GFP_KERNEL);
+	if (!votable->name) {
+		kfree(votable);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	votable->num_clients = NUM_MAX_CLIENTS;
+	votable->callback = callback;
+	votable->type = votable_type;
+	votable->data = data;
+	mutex_init(&votable->vote_lock);
+
+	/*
+	 * Because effective_result and client states are invalid
+	 * before the first vote, initialize them to -EINVAL
+	 */
+	votable->effective_result = -EINVAL;
+	if (votable->type == VOTE_SET_ANY)
+		votable->effective_result = 0;
+	votable->effective_client_id = -EINVAL;
+
+	spin_lock_irqsave(&votable_list_slock, flags);
+	list_add(&votable->list, &votable_list);
+	spin_unlock_irqrestore(&votable_list_slock, flags);
+
+	votable->root = debugfs_create_dir(name, debug_root);
+	if (!votable->root) {
+		pr_err("Couldn't create debug dir %s\n", name);
+		kfree(votable->name);
+		kfree(votable);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	votable->status_ent = debugfs_create_file("status", S_IFREG | 0444,
+				  votable->root, votable,
+				  &votable_status_ops);
+	if (!votable->status_ent) {
+		pr_err("Couldn't create status dbg file for %s\n", name);
+		debugfs_remove_recursive(votable->root);
+		kfree(votable->name);
+		kfree(votable);
+		return ERR_PTR(-EEXIST);
+	}
+
+	votable->force_val_ent = debugfs_create_u32("force_val",
+					S_IFREG | 0644,
+					votable->root,
+					&(votable->force_val));
+
+	if (!votable->force_val_ent) {
+		pr_err("Couldn't create force_val dbg file for %s\n", name);
+		debugfs_remove_recursive(votable->root);
+		kfree(votable->name);
+		kfree(votable);
+		return ERR_PTR(-EEXIST);
+	}
+
+	votable->force_active_ent = debugfs_create_file("force_active",
+					S_IFREG | 0444,
+					votable->root, votable,
+					&votable_force_ops);
+	if (!votable->force_active_ent) {
+		pr_err("Couldn't create force_active dbg file for %s\n", name);
+		debugfs_remove_recursive(votable->root);
+		kfree(votable->name);
+		kfree(votable);
+		return ERR_PTR(-EEXIST);
+	}
+
+	return votable;
+}
+
+void destroy_votable(struct votable *votable)
+{
+	unsigned long flags;
+	int i;
+
+	if (!votable)
+		return;
+
+	spin_lock_irqsave(&votable_list_slock, flags);
+	list_del(&votable->list);
+	spin_unlock_irqrestore(&votable_list_slock, flags);
+
+	debugfs_remove_recursive(votable->root);
+
+	for (i = 0; i < votable->num_clients && votable->client_strs[i]; i++)
+		kfree(votable->client_strs[i]);
+
+	kfree(votable->name);
+	kfree(votable);
+}
diff --git a/drivers/power/supply/qcom/pmic-voter.h b/drivers/power/supply/qcom/pmic-voter.h
new file mode 100644
index 0000000..031b9a0
--- /dev/null
+++ b/drivers/power/supply/qcom/pmic-voter.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PMIC_VOTER_H
+#define __PMIC_VOTER_H
+
+#include <linux/mutex.h>
+
+struct votable;
+
+enum votable_type {
+	VOTE_MIN,
+	VOTE_MAX,
+	VOTE_SET_ANY,
+	NUM_VOTABLE_TYPES,
+};
+
+int get_client_vote(struct votable *votable, const char *client_str);
+int get_client_vote_locked(struct votable *votable, const char *client_str);
+int get_effective_result(struct votable *votable);
+int get_effective_result_locked(struct votable *votable);
+const char *get_effective_client(struct votable *votable);
+const char *get_effective_client_locked(struct votable *votable);
+int vote(struct votable *votable, const char *client_str, bool state, int val);
+int rerun_election(struct votable *votable);
+struct votable *find_votable(const char *name);
+struct votable *create_votable(const char *name,
+				int votable_type,
+				int (*callback)(struct votable *votable,
+						void *data,
+						int effective_result,
+						const char *effective_client),
+				void *data);
+void destroy_votable(struct votable *votable);
+void lock_votable(struct votable *votable);
+void unlock_votable(struct votable *votable);
+
+#endif /* __PMIC_VOTER_H */
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
new file mode 100644
index 0000000..304d0cf
--- /dev/null
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -0,0 +1,4262 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"FG: %s: " fmt, __func__
+
+#include <linux/ktime.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_batterydata.h>
+#include <linux/platform_device.h>
+#include <linux/iio/consumer.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include "fg-core.h"
+#include "fg-reg.h"
+
+#define FG_GEN3_DEV_NAME	"qcom,fg-gen3"
+
+#define PERPH_SUBTYPE_REG		0x05
+#define FG_BATT_SOC_PMI8998		0x10
+#define FG_BATT_INFO_PMI8998		0x11
+#define FG_MEM_INFO_PMI8998		0x0D
+
+/* SRAM address and offset in ascending order */
+#define SLOPE_LIMIT_WORD		3
+#define SLOPE_LIMIT_OFFSET		0
+#define CUTOFF_VOLT_WORD		5
+#define CUTOFF_VOLT_OFFSET		0
+#define SYS_TERM_CURR_WORD		6
+#define SYS_TERM_CURR_OFFSET		0
+#define VBATT_FULL_WORD			7
+#define VBATT_FULL_OFFSET		0
+#define ESR_FILTER_WORD			8
+#define ESR_UPD_TIGHT_OFFSET		0
+#define ESR_UPD_BROAD_OFFSET		1
+#define ESR_UPD_TIGHT_LOW_TEMP_OFFSET	2
+#define ESR_UPD_BROAD_LOW_TEMP_OFFSET	3
+#define KI_COEFF_MED_DISCHG_WORD	9
+#define KI_COEFF_MED_DISCHG_OFFSET	3
+#define KI_COEFF_HI_DISCHG_WORD		10
+#define KI_COEFF_HI_DISCHG_OFFSET	0
+#define KI_COEFF_LOW_DISCHG_WORD	10
+#define KI_COEFF_LOW_DISCHG_OFFSET	2
+#define DELTA_MSOC_THR_WORD		12
+#define DELTA_MSOC_THR_OFFSET		3
+#define DELTA_BSOC_THR_WORD		13
+#define DELTA_BSOC_THR_OFFSET		2
+#define RECHARGE_SOC_THR_WORD		14
+#define RECHARGE_SOC_THR_OFFSET		0
+#define CHG_TERM_CURR_WORD		14
+#define CHG_TERM_CURR_OFFSET		1
+#define EMPTY_VOLT_WORD			15
+#define EMPTY_VOLT_OFFSET		0
+#define VBATT_LOW_WORD			15
+#define VBATT_LOW_OFFSET		1
+#define ESR_TIMER_DISCHG_MAX_WORD	17
+#define ESR_TIMER_DISCHG_MAX_OFFSET	0
+#define ESR_TIMER_DISCHG_INIT_WORD	17
+#define ESR_TIMER_DISCHG_INIT_OFFSET	2
+#define ESR_TIMER_CHG_MAX_WORD		18
+#define ESR_TIMER_CHG_MAX_OFFSET	0
+#define ESR_TIMER_CHG_INIT_WORD		18
+#define ESR_TIMER_CHG_INIT_OFFSET	2
+#define PROFILE_LOAD_WORD		24
+#define PROFILE_LOAD_OFFSET		0
+#define ESR_RSLOW_DISCHG_WORD		34
+#define ESR_RSLOW_DISCHG_OFFSET		0
+#define ESR_RSLOW_CHG_WORD		51
+#define ESR_RSLOW_CHG_OFFSET		0
+#define NOM_CAP_WORD			58
+#define NOM_CAP_OFFSET			0
+#define ACT_BATT_CAP_BKUP_WORD		74
+#define ACT_BATT_CAP_BKUP_OFFSET	0
+#define CYCLE_COUNT_WORD		75
+#define CYCLE_COUNT_OFFSET		0
+#define PROFILE_INTEGRITY_WORD		79
+#define SW_CONFIG_OFFSET		0
+#define PROFILE_INTEGRITY_OFFSET	3
+#define BATT_SOC_WORD			91
+#define BATT_SOC_OFFSET			0
+#define FULL_SOC_WORD			93
+#define FULL_SOC_OFFSET			2
+#define MONOTONIC_SOC_WORD		94
+#define MONOTONIC_SOC_OFFSET		2
+#define CC_SOC_WORD			95
+#define CC_SOC_OFFSET			0
+#define CC_SOC_SW_WORD			96
+#define CC_SOC_SW_OFFSET		0
+#define VOLTAGE_PRED_WORD		97
+#define VOLTAGE_PRED_OFFSET		0
+#define OCV_WORD			97
+#define OCV_OFFSET			2
+#define ESR_WORD			99
+#define ESR_OFFSET			0
+#define RSLOW_WORD			101
+#define RSLOW_OFFSET			0
+#define ACT_BATT_CAP_WORD		117
+#define ACT_BATT_CAP_OFFSET		0
+#define LAST_BATT_SOC_WORD		119
+#define LAST_BATT_SOC_OFFSET		0
+#define LAST_MONOTONIC_SOC_WORD		119
+#define LAST_MONOTONIC_SOC_OFFSET	2
+#define ALG_FLAGS_WORD			120
+#define ALG_FLAGS_OFFSET		1
+
+/* v2 SRAM address and offset in ascending order */
+#define KI_COEFF_LOW_DISCHG_v2_WORD	9
+#define KI_COEFF_LOW_DISCHG_v2_OFFSET	3
+#define KI_COEFF_MED_DISCHG_v2_WORD	10
+#define KI_COEFF_MED_DISCHG_v2_OFFSET	0
+#define KI_COEFF_HI_DISCHG_v2_WORD	10
+#define KI_COEFF_HI_DISCHG_v2_OFFSET	1
+#define DELTA_BSOC_THR_v2_WORD		12
+#define DELTA_BSOC_THR_v2_OFFSET	3
+#define DELTA_MSOC_THR_v2_WORD		13
+#define DELTA_MSOC_THR_v2_OFFSET	0
+#define RECHARGE_SOC_THR_v2_WORD	14
+#define RECHARGE_SOC_THR_v2_OFFSET	1
+#define CHG_TERM_CURR_v2_WORD		15
+#define CHG_TERM_CURR_v2_OFFSET		1
+#define EMPTY_VOLT_v2_WORD		15
+#define EMPTY_VOLT_v2_OFFSET		3
+#define VBATT_LOW_v2_WORD		16
+#define VBATT_LOW_v2_OFFSET		0
+#define RECHARGE_VBATT_THR_v2_WORD	16
+#define RECHARGE_VBATT_THR_v2_OFFSET	1
+#define FLOAT_VOLT_v2_WORD		16
+#define FLOAT_VOLT_v2_OFFSET		2
+
+static int fg_decode_voltage_15b(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int val);
+static int fg_decode_value_16b(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int val);
+static int fg_decode_default(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int val);
+static int fg_decode_cc_soc(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int value);
+static void fg_encode_voltage(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int val_mv, u8 *buf);
+static void fg_encode_current(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int val_ma, u8 *buf);
+static void fg_encode_default(struct fg_sram_param *sp,
+	enum fg_sram_param_id id, int val, u8 *buf);
+
+static struct fg_irq_info fg_irqs[FG_IRQ_MAX];
+
+#define PARAM(_id, _addr_word, _addr_byte, _len, _num, _den, _offset,	\
+	      _enc, _dec)						\
+	[FG_SRAM_##_id] = {						\
+		.addr_word	= _addr_word,				\
+		.addr_byte	= _addr_byte,				\
+		.len		= _len,					\
+		.numrtr		= _num,					\
+		.denmtr		= _den,					\
+		.offset		= _offset,				\
+		.encode		= _enc,					\
+		.decode		= _dec,					\
+	}								\
+
+static struct fg_sram_param pmi8998_v1_sram_params[] = {
+	PARAM(BATT_SOC, BATT_SOC_WORD, BATT_SOC_OFFSET, 4, 1, 1, 0, NULL,
+		fg_decode_default),
+	PARAM(FULL_SOC, FULL_SOC_WORD, FULL_SOC_OFFSET, 2, 1, 1, 0, NULL,
+		fg_decode_default),
+	PARAM(VOLTAGE_PRED, VOLTAGE_PRED_WORD, VOLTAGE_PRED_OFFSET, 2, 1000,
+		244141, 0, NULL, fg_decode_voltage_15b),
+	PARAM(OCV, OCV_WORD, OCV_OFFSET, 2, 1000, 244141, 0, NULL,
+		fg_decode_voltage_15b),
+	PARAM(ESR, ESR_WORD, ESR_OFFSET, 2, 1000, 244141, 0, fg_encode_default,
+		fg_decode_value_16b),
+	PARAM(RSLOW, RSLOW_WORD, RSLOW_OFFSET, 2, 1000, 244141, 0, NULL,
+		fg_decode_value_16b),
+	PARAM(ALG_FLAGS, ALG_FLAGS_WORD, ALG_FLAGS_OFFSET, 1, 1, 1, 0, NULL,
+		fg_decode_default),
+	PARAM(CC_SOC, CC_SOC_WORD, CC_SOC_OFFSET, 4, 1, 1, 0, NULL,
+		fg_decode_cc_soc),
+	PARAM(CC_SOC_SW, CC_SOC_SW_WORD, CC_SOC_SW_OFFSET, 4, 1, 1, 0, NULL,
+		fg_decode_cc_soc),
+	PARAM(ACT_BATT_CAP, ACT_BATT_CAP_BKUP_WORD, ACT_BATT_CAP_BKUP_OFFSET, 2,
+		1, 1, 0, NULL, fg_decode_default),
+	/* Entries below here are configurable during initialization */
+	PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000,
+		244141, 0, fg_encode_voltage, NULL),
+	PARAM(EMPTY_VOLT, EMPTY_VOLT_WORD, EMPTY_VOLT_OFFSET, 1, 100000, 390625,
+		-2500, fg_encode_voltage, NULL),
+	PARAM(VBATT_LOW, VBATT_LOW_WORD, VBATT_LOW_OFFSET, 1, 100000, 390625,
+		-2500, fg_encode_voltage, NULL),
+	PARAM(VBATT_FULL, VBATT_FULL_WORD, VBATT_FULL_OFFSET, 2, 1000,
+		244141, 0, fg_encode_voltage, fg_decode_voltage_15b),
+	PARAM(SYS_TERM_CURR, SYS_TERM_CURR_WORD, SYS_TERM_CURR_OFFSET, 3,
+		1000000, 122070, 0, fg_encode_current, NULL),
+	PARAM(CHG_TERM_CURR, CHG_TERM_CURR_WORD, CHG_TERM_CURR_OFFSET, 1,
+		100000, 390625, 0, fg_encode_current, NULL),
+	PARAM(DELTA_MSOC_THR, DELTA_MSOC_THR_WORD, DELTA_MSOC_THR_OFFSET, 1,
+		2048, 100, 0, fg_encode_default, NULL),
+	PARAM(DELTA_BSOC_THR, DELTA_BSOC_THR_WORD, DELTA_BSOC_THR_OFFSET, 1,
+		2048, 100, 0, fg_encode_default, NULL),
+	PARAM(RECHARGE_SOC_THR, RECHARGE_SOC_THR_WORD, RECHARGE_SOC_THR_OFFSET,
+		1, 256, 100, 0, fg_encode_default, NULL),
+	PARAM(ESR_TIMER_DISCHG_MAX, ESR_TIMER_DISCHG_MAX_WORD,
+		ESR_TIMER_DISCHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default,
+		NULL),
+	PARAM(ESR_TIMER_DISCHG_INIT, ESR_TIMER_DISCHG_INIT_WORD,
+		ESR_TIMER_DISCHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default,
+		NULL),
+	PARAM(ESR_TIMER_CHG_MAX, ESR_TIMER_CHG_MAX_WORD,
+		ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+	PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD,
+		ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+	PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_WORD,
+		KI_COEFF_MED_DISCHG_OFFSET, 1, 1000, 244141, 0,
+		fg_encode_default, NULL),
+	PARAM(KI_COEFF_HI_DISCHG, KI_COEFF_HI_DISCHG_WORD,
+		KI_COEFF_HI_DISCHG_OFFSET, 1, 1000, 244141, 0,
+		fg_encode_default, NULL),
+	PARAM(ESR_TIGHT_FILTER, ESR_FILTER_WORD, ESR_UPD_TIGHT_OFFSET,
+		1, 512, 1000000, 0, fg_encode_default, NULL),
+	PARAM(ESR_BROAD_FILTER, ESR_FILTER_WORD, ESR_UPD_BROAD_OFFSET,
+		1, 512, 1000000, 0, fg_encode_default, NULL),
+	PARAM(SLOPE_LIMIT, SLOPE_LIMIT_WORD, SLOPE_LIMIT_OFFSET, 1, 8192, 1000,
+		0, fg_encode_default, NULL),
+};
+
+static struct fg_sram_param pmi8998_v2_sram_params[] = {
+	PARAM(BATT_SOC, BATT_SOC_WORD, BATT_SOC_OFFSET, 4, 1, 1, 0, NULL,
+		fg_decode_default),
+	PARAM(FULL_SOC, FULL_SOC_WORD, FULL_SOC_OFFSET, 2, 1, 1, 0, NULL,
+		fg_decode_default),
+	PARAM(VOLTAGE_PRED, VOLTAGE_PRED_WORD, VOLTAGE_PRED_OFFSET, 2, 1000,
+		244141, 0, NULL, fg_decode_voltage_15b),
+	PARAM(OCV, OCV_WORD, OCV_OFFSET, 2, 1000, 244141, 0, NULL,
+		fg_decode_voltage_15b),
+	PARAM(ESR, ESR_WORD, ESR_OFFSET, 2, 1000, 244141, 0, fg_encode_default,
+		fg_decode_value_16b),
+	PARAM(RSLOW, RSLOW_WORD, RSLOW_OFFSET, 2, 1000, 244141, 0, NULL,
+		fg_decode_value_16b),
+	PARAM(ALG_FLAGS, ALG_FLAGS_WORD, ALG_FLAGS_OFFSET, 1, 1, 1, 0, NULL,
+		fg_decode_default),
+	PARAM(CC_SOC, CC_SOC_WORD, CC_SOC_OFFSET, 4, 1, 1, 0, NULL,
+		fg_decode_cc_soc),
+	PARAM(CC_SOC_SW, CC_SOC_SW_WORD, CC_SOC_SW_OFFSET, 4, 1, 1, 0, NULL,
+		fg_decode_cc_soc),
+	PARAM(ACT_BATT_CAP, ACT_BATT_CAP_BKUP_WORD, ACT_BATT_CAP_BKUP_OFFSET, 2,
+		1, 1, 0, NULL, fg_decode_default),
+	/* Entries below here are configurable during initialization */
+	PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000,
+		244141, 0, fg_encode_voltage, NULL),
+	PARAM(EMPTY_VOLT, EMPTY_VOLT_v2_WORD, EMPTY_VOLT_v2_OFFSET, 1, 1000,
+		15625, -2000, fg_encode_voltage, NULL),
+	PARAM(VBATT_LOW, VBATT_LOW_v2_WORD, VBATT_LOW_v2_OFFSET, 1, 1000,
+		15625, -2000, fg_encode_voltage, NULL),
+	PARAM(FLOAT_VOLT, FLOAT_VOLT_v2_WORD, FLOAT_VOLT_v2_OFFSET, 1, 1000,
+		15625, -2000, fg_encode_voltage, NULL),
+	PARAM(VBATT_FULL, VBATT_FULL_WORD, VBATT_FULL_OFFSET, 2, 1000,
+		244141, 0, fg_encode_voltage, fg_decode_voltage_15b),
+	PARAM(SYS_TERM_CURR, SYS_TERM_CURR_WORD, SYS_TERM_CURR_OFFSET, 3,
+		1000000, 122070, 0, fg_encode_current, NULL),
+	PARAM(CHG_TERM_CURR, CHG_TERM_CURR_v2_WORD, CHG_TERM_CURR_v2_OFFSET, 1,
+		100000, 390625, 0, fg_encode_current, NULL),
+	PARAM(DELTA_MSOC_THR, DELTA_MSOC_THR_v2_WORD, DELTA_MSOC_THR_v2_OFFSET,
+		1, 2048, 100, 0, fg_encode_default, NULL),
+	PARAM(DELTA_BSOC_THR, DELTA_BSOC_THR_v2_WORD, DELTA_BSOC_THR_v2_OFFSET,
+		1, 2048, 100, 0, fg_encode_default, NULL),
+	PARAM(RECHARGE_SOC_THR, RECHARGE_SOC_THR_v2_WORD,
+		RECHARGE_SOC_THR_v2_OFFSET, 1, 256, 100, 0, fg_encode_default,
+		NULL),
+	PARAM(RECHARGE_VBATT_THR, RECHARGE_VBATT_THR_v2_WORD,
+		RECHARGE_VBATT_THR_v2_OFFSET, 1, 1000, 15625, -2000,
+		fg_encode_voltage, NULL),
+	PARAM(ESR_TIMER_DISCHG_MAX, ESR_TIMER_DISCHG_MAX_WORD,
+		ESR_TIMER_DISCHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default,
+		NULL),
+	PARAM(ESR_TIMER_DISCHG_INIT, ESR_TIMER_DISCHG_INIT_WORD,
+		ESR_TIMER_DISCHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default,
+		NULL),
+	PARAM(ESR_TIMER_CHG_MAX, ESR_TIMER_CHG_MAX_WORD,
+		ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+	PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD,
+		ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+	PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_v2_WORD,
+		KI_COEFF_MED_DISCHG_v2_OFFSET, 1, 1000, 244141, 0,
+		fg_encode_default, NULL),
+	PARAM(KI_COEFF_HI_DISCHG, KI_COEFF_HI_DISCHG_v2_WORD,
+		KI_COEFF_HI_DISCHG_v2_OFFSET, 1, 1000, 244141, 0,
+		fg_encode_default, NULL),
+	PARAM(ESR_TIGHT_FILTER, ESR_FILTER_WORD, ESR_UPD_TIGHT_OFFSET,
+		1, 512, 1000000, 0, fg_encode_default, NULL),
+	PARAM(ESR_BROAD_FILTER, ESR_FILTER_WORD, ESR_UPD_BROAD_OFFSET,
+		1, 512, 1000000, 0, fg_encode_default, NULL),
+	PARAM(SLOPE_LIMIT, SLOPE_LIMIT_WORD, SLOPE_LIMIT_OFFSET, 1, 8192, 1000,
+		0, fg_encode_default, NULL),
+};
+
+static struct fg_alg_flag pmi8998_v1_alg_flags[] = {
+	[ALG_FLAG_SOC_LT_OTG_MIN]	= {
+		.name	= "SOC_LT_OTG_MIN",
+		.bit	= BIT(0),
+	},
+	[ALG_FLAG_SOC_LT_RECHARGE]	= {
+		.name	= "SOC_LT_RECHARGE",
+		.bit	= BIT(1),
+	},
+	[ALG_FLAG_IBATT_LT_ITERM]	= {
+		.name	= "IBATT_LT_ITERM",
+		.bit	= BIT(2),
+	},
+	[ALG_FLAG_IBATT_GT_HPM]		= {
+		.name	= "IBATT_GT_HPM",
+		.bit	= BIT(3),
+	},
+	[ALG_FLAG_IBATT_GT_UPM]		= {
+		.name	= "IBATT_GT_UPM",
+		.bit	= BIT(4),
+	},
+	[ALG_FLAG_VBATT_LT_RECHARGE]	= {
+		.name	= "VBATT_LT_RECHARGE",
+		.bit	= BIT(5),
+	},
+	[ALG_FLAG_VBATT_GT_VFLOAT]	= {
+		.invalid = true,
+	},
+};
+
+static struct fg_alg_flag pmi8998_v2_alg_flags[] = {
+	[ALG_FLAG_SOC_LT_OTG_MIN]	= {
+		.name	= "SOC_LT_OTG_MIN",
+		.bit	= BIT(0),
+	},
+	[ALG_FLAG_SOC_LT_RECHARGE]	= {
+		.name	= "SOC_LT_RECHARGE",
+		.bit	= BIT(1),
+	},
+	[ALG_FLAG_IBATT_LT_ITERM]	= {
+		.name	= "IBATT_LT_ITERM",
+		.bit	= BIT(2),
+	},
+	[ALG_FLAG_IBATT_GT_HPM]		= {
+		.name	= "IBATT_GT_HPM",
+		.bit	= BIT(4),
+	},
+	[ALG_FLAG_IBATT_GT_UPM]		= {
+		.name	= "IBATT_GT_UPM",
+		.bit	= BIT(5),
+	},
+	[ALG_FLAG_VBATT_LT_RECHARGE]	= {
+		.name	= "VBATT_LT_RECHARGE",
+		.bit	= BIT(6),
+	},
+	[ALG_FLAG_VBATT_GT_VFLOAT]	= {
+		.name	= "VBATT_GT_VFLOAT",
+		.bit	= BIT(7),
+	},
+};
+
+static int fg_gen3_debug_mask;
+module_param_named(
+	debug_mask, fg_gen3_debug_mask, int, 0600
+);
+
+static bool fg_profile_dump;
+module_param_named(
+	profile_dump, fg_profile_dump, bool, 0600
+);
+
+static int fg_sram_dump_period_ms = 20000;
+module_param_named(
+	sram_dump_period_ms, fg_sram_dump_period_ms, int, 0600
+);
+
+static int fg_restart;
+static bool fg_sram_dump;
+
+/* All getters HERE */
+
+#define VOLTAGE_15BIT_MASK	GENMASK(14, 0)
+static int fg_decode_voltage_15b(struct fg_sram_param *sp,
+				enum fg_sram_param_id id, int value)
+{
+	value &= VOLTAGE_15BIT_MASK;
+	sp[id].value = div_u64((u64)value * sp[id].denmtr, sp[id].numrtr);
+	pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
+		sp[id].value);
+	return sp[id].value;
+}
+
+static int fg_decode_cc_soc(struct fg_sram_param *sp,
+				enum fg_sram_param_id id, int value)
+{
+	sp[id].value = div_s64((s64)value * sp[id].denmtr, sp[id].numrtr);
+	sp[id].value = sign_extend32(sp[id].value, 31);
+	pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
+		sp[id].value);
+	return sp[id].value;
+}
+
+static int fg_decode_value_16b(struct fg_sram_param *sp,
+				enum fg_sram_param_id id, int value)
+{
+	sp[id].value = div_u64((u64)(u16)value * sp[id].denmtr, sp[id].numrtr);
+	pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
+		sp[id].value);
+	return sp[id].value;
+}
+
+static int fg_decode_default(struct fg_sram_param *sp, enum fg_sram_param_id id,
+				int value)
+{
+	sp[id].value = value;
+	return sp[id].value;
+}
+
+static int fg_decode(struct fg_sram_param *sp, enum fg_sram_param_id id,
+			int value)
+{
+	if (!sp[id].decode) {
+		pr_err("No decoding function for parameter %d\n", id);
+		return -EINVAL;
+	}
+
+	return sp[id].decode(sp, id, value);
+}
+
+static void fg_encode_voltage(struct fg_sram_param *sp,
+				enum fg_sram_param_id  id, int val_mv, u8 *buf)
+{
+	int i, mask = 0xff;
+	int64_t temp;
+
+	val_mv += sp[id].offset;
+	temp = (int64_t)div_u64((u64)val_mv * sp[id].numrtr, sp[id].denmtr);
+	pr_debug("temp: %llx id: %d, val_mv: %d, buf: [ ", temp, id, val_mv);
+	for (i = 0; i < sp[id].len; i++) {
+		buf[i] = temp & mask;
+		temp >>= 8;
+		pr_debug("%x ", buf[i]);
+	}
+	pr_debug("]\n");
+}
+
+static void fg_encode_current(struct fg_sram_param *sp,
+				enum fg_sram_param_id  id, int val_ma, u8 *buf)
+{
+	int i, mask = 0xff;
+	int64_t temp;
+	s64 current_ma;
+
+	current_ma = val_ma;
+	temp = (int64_t)div_s64(current_ma * sp[id].numrtr, sp[id].denmtr);
+	pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val_ma);
+	for (i = 0; i < sp[id].len; i++) {
+		buf[i] = temp & mask;
+		temp >>= 8;
+		pr_debug("%x ", buf[i]);
+	}
+	pr_debug("]\n");
+}
+
+static void fg_encode_default(struct fg_sram_param *sp,
+				enum fg_sram_param_id  id, int val, u8 *buf)
+{
+	int i, mask = 0xff;
+	int64_t temp;
+
+	temp = DIV_ROUND_CLOSEST(val * sp[id].numrtr, sp[id].denmtr);
+	pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val);
+	for (i = 0; i < sp[id].len; i++) {
+		buf[i] = temp & mask;
+		temp >>= 8;
+		pr_debug("%x ", buf[i]);
+	}
+	pr_debug("]\n");
+}
+
+static void fg_encode(struct fg_sram_param *sp, enum fg_sram_param_id id,
+			int val, u8 *buf)
+{
+	if (!sp[id].encode) {
+		pr_err("No encoding function for parameter %d\n", id);
+		return;
+	}
+
+	sp[id].encode(sp, id, val, buf);
+}
+
+/*
+ * Please make sure *_sram_params table has the entry for the parameter
+ * obtained through this function. In addition to address, offset,
+ * length from where this SRAM parameter is read, a decode function
+ * need to be specified.
+ */
+static int fg_get_sram_prop(struct fg_chip *chip, enum fg_sram_param_id id,
+				int *val)
+{
+	int temp, rc, i;
+	u8 buf[4];
+
+	if (id < 0 || id > FG_SRAM_MAX || chip->sp[id].len > sizeof(buf))
+		return -EINVAL;
+
+	if (chip->battery_missing)
+		return -ENODATA;
+
+	rc = fg_sram_read(chip, chip->sp[id].addr_word, chip->sp[id].addr_byte,
+		buf, chip->sp[id].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error reading address 0x%04x[%d] rc=%d\n",
+			chip->sp[id].addr_word, chip->sp[id].addr_byte, rc);
+		return rc;
+	}
+
+	for (i = 0, temp = 0; i < chip->sp[id].len; i++)
+		temp |= buf[i] << (8 * i);
+
+	*val = fg_decode(chip->sp, id, temp);
+	return 0;
+}
+
+#define CC_SOC_30BIT	GENMASK(29, 0)
+static int fg_get_cc_soc(struct fg_chip *chip, int *val)
+{
+	int rc, cc_soc;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC, &cc_soc);
+	if (rc < 0) {
+		pr_err("Error in getting CC_SOC, rc=%d\n", rc);
+		return rc;
+	}
+
+	*val = div_s64(cc_soc * chip->cl.nom_cap_uah, CC_SOC_30BIT);
+	return 0;
+}
+
+static int fg_get_cc_soc_sw(struct fg_chip *chip, int *val)
+{
+	int rc, cc_soc;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc);
+	if (rc < 0) {
+		pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+		return rc;
+	}
+
+	*val = div_s64(cc_soc * chip->cl.learned_cc_uah, CC_SOC_30BIT);
+	return 0;
+}
+
+#define BATT_TEMP_NUMR		1
+#define BATT_TEMP_DENR		1
+static int fg_get_battery_temp(struct fg_chip *chip, int *val)
+{
+	int rc = 0, temp;
+	u8 buf[2];
+
+	rc = fg_read(chip, BATT_INFO_BATT_TEMP_LSB(chip), buf, 2);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			BATT_INFO_BATT_TEMP_LSB(chip), rc);
+		return rc;
+	}
+
+	temp = ((buf[1] & BATT_TEMP_MSB_MASK) << 8) |
+		(buf[0] & BATT_TEMP_LSB_MASK);
+	temp = DIV_ROUND_CLOSEST(temp, 4);
+
+	/* Value is in Kelvin; Convert it to deciDegC */
+	temp = (temp - 273) * 10;
+	*val = temp;
+	return 0;
+}
+
+static int fg_get_battery_resistance(struct fg_chip *chip, int *val)
+{
+	int rc, esr_uohms, rslow_uohms;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_ESR, &esr_uohms);
+	if (rc < 0) {
+		pr_err("failed to get ESR, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_RSLOW, &rslow_uohms);
+	if (rc < 0) {
+		pr_err("failed to get Rslow, rc=%d\n", rc);
+		return rc;
+	}
+
+	*val = esr_uohms + rslow_uohms;
+	return 0;
+}
+
+#define BATT_CURRENT_NUMR	488281
+#define BATT_CURRENT_DENR	1000
+static int fg_get_battery_current(struct fg_chip *chip, int *val)
+{
+	int rc = 0;
+	int64_t temp = 0;
+	u8 buf[2];
+
+	rc = fg_read(chip, BATT_INFO_IBATT_LSB(chip), buf, 2);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			BATT_INFO_IBATT_LSB(chip), rc);
+		return rc;
+	}
+
+	if (chip->wa_flags & PMI8998_V1_REV_WA)
+		temp = buf[0] << 8 | buf[1];
+	else
+		temp = buf[1] << 8 | buf[0];
+
+	pr_debug("buf: %x %x temp: %llx\n", buf[0], buf[1], temp);
+	/* Sign bit is bit 15 */
+	temp = twos_compliment_extend(temp, 15);
+	*val = div_s64((s64)temp * BATT_CURRENT_NUMR, BATT_CURRENT_DENR);
+	return 0;
+}
+
+#define BATT_VOLTAGE_NUMR	122070
+#define BATT_VOLTAGE_DENR	1000
+static int fg_get_battery_voltage(struct fg_chip *chip, int *val)
+{
+	int rc = 0;
+	u16 temp = 0;
+	u8 buf[2];
+
+	rc = fg_read(chip, BATT_INFO_VBATT_LSB(chip), buf, 2);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			BATT_INFO_VBATT_LSB(chip), rc);
+		return rc;
+	}
+
+	if (chip->wa_flags & PMI8998_V1_REV_WA)
+		temp = buf[0] << 8 | buf[1];
+	else
+		temp = buf[1] << 8 | buf[0];
+
+	pr_debug("buf: %x %x temp: %x\n", buf[0], buf[1], temp);
+	*val = div_u64((u64)temp * BATT_VOLTAGE_NUMR, BATT_VOLTAGE_DENR);
+	return 0;
+}
+
+#define MAX_TRIES_SOC		5
+static int fg_get_msoc_raw(struct fg_chip *chip, int *val)
+{
+	u8 cap[2];
+	int rc, tries = 0;
+
+	while (tries < MAX_TRIES_SOC) {
+		rc = fg_read(chip, BATT_SOC_FG_MONOTONIC_SOC(chip), cap, 2);
+		if (rc < 0) {
+			pr_err("failed to read addr=0x%04x, rc=%d\n",
+				BATT_SOC_FG_MONOTONIC_SOC(chip), rc);
+			return rc;
+		}
+
+		if (cap[0] == cap[1])
+			break;
+
+		tries++;
+	}
+
+	if (tries == MAX_TRIES_SOC) {
+		pr_err("shadow registers do not match\n");
+		return -EINVAL;
+	}
+
+	fg_dbg(chip, FG_POWER_SUPPLY, "raw: 0x%02x\n", cap[0]);
+	*val = cap[0];
+	return 0;
+}
+
+#define FULL_CAPACITY	100
+#define FULL_SOC_RAW	255
+static int fg_get_msoc(struct fg_chip *chip, int *msoc)
+{
+	int rc;
+
+	rc = fg_get_msoc_raw(chip, msoc);
+	if (rc < 0)
+		return rc;
+
+	*msoc = DIV_ROUND_CLOSEST(*msoc * FULL_CAPACITY, FULL_SOC_RAW);
+	return 0;
+}
+
+static bool is_batt_empty(struct fg_chip *chip)
+{
+	u8 status;
+	int rc, vbatt_uv, msoc;
+
+	rc = fg_read(chip, BATT_SOC_INT_RT_STS(chip), &status, 1);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			BATT_SOC_INT_RT_STS(chip), rc);
+		return false;
+	}
+
+	if (!(status & MSOC_EMPTY_BIT))
+		return false;
+
+	rc = fg_get_battery_voltage(chip, &vbatt_uv);
+	if (rc < 0) {
+		pr_err("failed to get battery voltage, rc=%d\n", rc);
+		return false;
+	}
+
+	rc = fg_get_msoc(chip, &msoc);
+	if (!rc)
+		pr_warn("batt_soc_rt_sts: %x vbatt: %d uV msoc:%d\n", status,
+			vbatt_uv, msoc);
+
+	return ((vbatt_uv < chip->dt.cutoff_volt_mv * 1000) ? true : false);
+}
+
+static int fg_get_debug_batt_id(struct fg_chip *chip, int *batt_id)
+{
+	int rc;
+	u64 temp;
+	u8 buf[2];
+
+	rc = fg_read(chip, ADC_RR_FAKE_BATT_LOW_LSB(chip), buf, 2);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			ADC_RR_FAKE_BATT_LOW_LSB(chip), rc);
+		return rc;
+	}
+
+	/*
+	 * Fake battery threshold is encoded in the following format.
+	 * Threshold (code) = (battery_id in Ohms) * 0.00015 * 2^10 / 2.5
+	 */
+	temp = (buf[1] << 8 | buf[0]) * 2500000;
+	do_div(temp, 150 * 1024);
+	batt_id[0] = temp;
+	rc = fg_read(chip, ADC_RR_FAKE_BATT_HIGH_LSB(chip), buf, 2);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			ADC_RR_FAKE_BATT_HIGH_LSB(chip), rc);
+		return rc;
+	}
+
+	temp = (buf[1] << 8 | buf[0]) * 2500000;
+	do_div(temp, 150 * 1024);
+	batt_id[1] = temp;
+	pr_debug("debug batt_id range: [%d %d]\n", batt_id[0], batt_id[1]);
+	return 0;
+}
+
+static bool is_debug_batt_id(struct fg_chip *chip)
+{
+	int debug_batt_id[2], rc;
+
+	if (!chip->batt_id_ohms)
+		return false;
+
+	rc = fg_get_debug_batt_id(chip, debug_batt_id);
+	if (rc < 0) {
+		pr_err("Failed to get debug batt_id, rc=%d\n", rc);
+		return false;
+	}
+
+	if (is_between(debug_batt_id[0], debug_batt_id[1],
+		chip->batt_id_ohms)) {
+		fg_dbg(chip, FG_POWER_SUPPLY, "Debug battery id: %dohms\n",
+			chip->batt_id_ohms);
+		return true;
+	}
+
+	return false;
+}
+
+#define DEBUG_BATT_SOC	67
+#define BATT_MISS_SOC	50
+#define EMPTY_SOC	0
+static int fg_get_prop_capacity(struct fg_chip *chip, int *val)
+{
+	int rc, msoc;
+
+	if (is_debug_batt_id(chip)) {
+		*val = DEBUG_BATT_SOC;
+		return 0;
+	}
+
+	if (chip->fg_restarting) {
+		*val = chip->last_soc;
+		return 0;
+	}
+
+	if (chip->battery_missing) {
+		*val = BATT_MISS_SOC;
+		return 0;
+	}
+
+	if (is_batt_empty(chip)) {
+		*val = EMPTY_SOC;
+		return 0;
+	}
+
+	if (chip->charge_full) {
+		*val = FULL_CAPACITY;
+		return 0;
+	}
+
+	rc = fg_get_msoc(chip, &msoc);
+	if (rc < 0)
+		return rc;
+
+	if (chip->delta_soc > 0)
+		*val = chip->maint_soc;
+	else
+		*val = msoc;
+	return 0;
+}
+
+#define DEFAULT_BATT_TYPE	"Unknown Battery"
+#define MISSING_BATT_TYPE	"Missing Battery"
+#define LOADING_BATT_TYPE	"Loading Battery"
+static const char *fg_get_battery_type(struct fg_chip *chip)
+{
+	if (chip->battery_missing)
+		return MISSING_BATT_TYPE;
+
+	if (chip->bp.batt_type_str) {
+		if (chip->profile_loaded)
+			return chip->bp.batt_type_str;
+		else if (chip->profile_available)
+			return LOADING_BATT_TYPE;
+	}
+
+	return DEFAULT_BATT_TYPE;
+}
+
+static int fg_batt_missing_config(struct fg_chip *chip, bool enable)
+{
+	int rc;
+
+	rc = fg_masked_write(chip, BATT_INFO_BATT_MISS_CFG(chip),
+			BM_FROM_BATT_ID_BIT, enable ? BM_FROM_BATT_ID_BIT : 0);
+	if (rc < 0)
+		pr_err("Error in writing to %04x, rc=%d\n",
+			BATT_INFO_BATT_MISS_CFG(chip), rc);
+	return rc;
+}
+
+static int fg_get_batt_id(struct fg_chip *chip)
+{
+	int rc, ret, batt_id = 0;
+
+	if (!chip->batt_id_chan)
+		return -EINVAL;
+
+	rc = fg_batt_missing_config(chip, false);
+	if (rc < 0) {
+		pr_err("Error in disabling BMD, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = iio_read_channel_processed(chip->batt_id_chan, &batt_id);
+	if (rc < 0) {
+		pr_err("Error in reading batt_id channel, rc:%d\n", rc);
+		goto out;
+	}
+
+	/* Wait for 200ms before enabling BMD again */
+	msleep(200);
+
+	fg_dbg(chip, FG_STATUS, "batt_id: %d\n", batt_id);
+	chip->batt_id_ohms = batt_id;
+out:
+	ret = fg_batt_missing_config(chip, true);
+	if (ret < 0) {
+		pr_err("Error in enabling BMD, ret=%d\n", ret);
+		return ret;
+	}
+
+	return rc;
+}
+
+static int fg_get_batt_profile(struct fg_chip *chip)
+{
+	struct device_node *node = chip->dev->of_node;
+	struct device_node *batt_node, *profile_node;
+	const char *data;
+	int rc, len;
+
+	batt_node = of_find_node_by_name(node, "qcom,battery-data");
+	if (!batt_node) {
+		pr_err("Batterydata not available\n");
+		return -ENXIO;
+	}
+
+	profile_node = of_batterydata_get_best_profile(batt_node,
+				chip->batt_id_ohms / 1000, NULL);
+	if (IS_ERR(profile_node))
+		return PTR_ERR(profile_node);
+
+	if (!profile_node) {
+		pr_err("couldn't find profile handle\n");
+		return -ENODATA;
+	}
+
+	rc = of_property_read_string(profile_node, "qcom,battery-type",
+			&chip->bp.batt_type_str);
+	if (rc < 0) {
+		pr_err("battery type unavailable, rc:%d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv",
+			&chip->bp.float_volt_uv);
+	if (rc < 0) {
+		pr_err("battery float voltage unavailable, rc:%d\n", rc);
+		chip->bp.float_volt_uv = -EINVAL;
+	}
+
+	rc = of_property_read_u32(profile_node, "qcom,fastchg-current-ma",
+			&chip->bp.fastchg_curr_ma);
+	if (rc < 0) {
+		pr_err("battery fastchg current unavailable, rc:%d\n", rc);
+		chip->bp.fastchg_curr_ma = -EINVAL;
+	}
+
+	rc = of_property_read_u32(profile_node, "qcom,fg-cc-cv-threshold-mv",
+			&chip->bp.vbatt_full_mv);
+	if (rc < 0) {
+		pr_err("battery cc_cv threshold unavailable, rc:%d\n", rc);
+		chip->bp.vbatt_full_mv = -EINVAL;
+	}
+
+	data = of_get_property(profile_node, "qcom,fg-profile-data", &len);
+	if (!data) {
+		pr_err("No profile data available\n");
+		return -ENODATA;
+	}
+
+	if (len != PROFILE_LEN) {
+		pr_err("battery profile incorrect size: %d\n", len);
+		return -EINVAL;
+	}
+
+	chip->profile_available = true;
+	memcpy(chip->batt_profile, data, len);
+
+	return 0;
+}
+
+static inline void get_temp_setpoint(int threshold, u8 *val)
+{
+	/* Resolution is 0.5C. Base is -30C. */
+	*val = DIV_ROUND_CLOSEST((threshold + 30) * 10, 5);
+}
+
+static inline void get_batt_temp_delta(int delta, u8 *val)
+{
+	switch (delta) {
+	case 2:
+		*val = BTEMP_DELTA_2K;
+		break;
+	case 4:
+		*val = BTEMP_DELTA_4K;
+		break;
+	case 6:
+		*val = BTEMP_DELTA_6K;
+		break;
+	case 10:
+		*val = BTEMP_DELTA_10K;
+		break;
+	default:
+		*val = BTEMP_DELTA_2K;
+		break;
+	};
+}
+
+static int fg_set_esr_timer(struct fg_chip *chip, int cycles, bool charging,
+				int flags)
+{
+	u8 buf[2];
+	int rc, timer_max, timer_init;
+
+	if (charging) {
+		timer_max = FG_SRAM_ESR_TIMER_CHG_MAX;
+		timer_init = FG_SRAM_ESR_TIMER_CHG_INIT;
+	} else {
+		timer_max = FG_SRAM_ESR_TIMER_DISCHG_MAX;
+		timer_init = FG_SRAM_ESR_TIMER_DISCHG_INIT;
+	}
+
+	fg_encode(chip->sp, timer_max, cycles, buf);
+	rc = fg_sram_write(chip,
+			chip->sp[timer_max].addr_word,
+			chip->sp[timer_max].addr_byte, buf,
+			chip->sp[timer_max].len, flags);
+	if (rc < 0) {
+		pr_err("Error in writing esr_timer_dischg_max, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, timer_init, cycles, buf);
+	rc = fg_sram_write(chip,
+			chip->sp[timer_init].addr_word,
+			chip->sp[timer_init].addr_byte, buf,
+			chip->sp[timer_init].len, flags);
+	if (rc < 0) {
+		pr_err("Error in writing esr_timer_dischg_init, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+/* Other functions HERE */
+
+static void fg_notify_charger(struct fg_chip *chip)
+{
+	union power_supply_propval prop = {0, };
+	int rc;
+
+	if (!chip->batt_psy)
+		return;
+
+	if (!chip->profile_available)
+		return;
+
+	prop.intval = chip->bp.float_volt_uv;
+	rc = power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_VOLTAGE_MAX, &prop);
+	if (rc < 0) {
+		pr_err("Error in setting voltage_max property on batt_psy, rc=%d\n",
+			rc);
+		return;
+	}
+
+	prop.intval = chip->bp.fastchg_curr_ma * 1000;
+	rc = power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &prop);
+	if (rc < 0) {
+		pr_err("Error in setting constant_charge_current_max property on batt_psy, rc=%d\n",
+			rc);
+		return;
+	}
+
+	fg_dbg(chip, FG_STATUS, "Notified charger on float voltage and FCC\n");
+}
+
+static int fg_awake_cb(struct votable *votable, void *data, int awake,
+			const char *client)
+{
+	struct fg_chip *chip = data;
+
+	if (awake)
+		pm_stay_awake(chip->dev);
+	else
+		pm_relax(chip->dev);
+
+	pr_debug("client: %s awake: %d\n", client, awake);
+	return 0;
+}
+
+static bool batt_psy_initialized(struct fg_chip *chip)
+{
+	if (chip->batt_psy)
+		return true;
+
+	chip->batt_psy = power_supply_get_by_name("battery");
+	if (!chip->batt_psy)
+		return false;
+
+	/* batt_psy is initialized, set the fcc and fv */
+	fg_notify_charger(chip);
+
+	return true;
+}
+
+static bool is_parallel_charger_available(struct fg_chip *chip)
+{
+	if (!chip->parallel_psy)
+		chip->parallel_psy = power_supply_get_by_name("parallel");
+
+	if (!chip->parallel_psy)
+		return false;
+
+	return true;
+}
+
+static int fg_save_learned_cap_to_sram(struct fg_chip *chip)
+{
+	int16_t cc_mah;
+	int rc;
+
+	if (chip->battery_missing || !chip->cl.learned_cc_uah)
+		return -EPERM;
+
+	cc_mah = div64_s64(chip->cl.learned_cc_uah, 1000);
+	/* Write to a backup register to use across reboot */
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ACT_BATT_CAP].addr_word,
+			chip->sp[FG_SRAM_ACT_BATT_CAP].addr_byte, (u8 *)&cc_mah,
+			chip->sp[FG_SRAM_ACT_BATT_CAP].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing act_batt_cap_bkup, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Write to actual capacity register for coulomb counter operation */
+	rc = fg_sram_write(chip, ACT_BATT_CAP_WORD, ACT_BATT_CAP_OFFSET,
+			(u8 *)&cc_mah, chip->sp[FG_SRAM_ACT_BATT_CAP].len,
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing act_batt_cap, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_dbg(chip, FG_CAP_LEARN, "learned capacity %llduah/%dmah stored\n",
+		chip->cl.learned_cc_uah, cc_mah);
+	return 0;
+}
+
+#define CAPACITY_DELTA_DECIPCT	500
+static int fg_load_learned_cap_from_sram(struct fg_chip *chip)
+{
+	int rc, act_cap_mah;
+	int64_t delta_cc_uah, pct_nom_cap_uah;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_mah);
+	if (rc < 0) {
+		pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->cl.learned_cc_uah = act_cap_mah * 1000;
+
+	if (chip->cl.learned_cc_uah != chip->cl.nom_cap_uah) {
+		if (chip->cl.learned_cc_uah == 0)
+			chip->cl.learned_cc_uah = chip->cl.nom_cap_uah;
+
+		delta_cc_uah = abs(chip->cl.learned_cc_uah -
+					chip->cl.nom_cap_uah);
+		pct_nom_cap_uah = div64_s64((int64_t)chip->cl.nom_cap_uah *
+				CAPACITY_DELTA_DECIPCT, 1000);
+		/*
+		 * If the learned capacity is out of range by 50% from the
+		 * nominal capacity, then overwrite the learned capacity with
+		 * the nominal capacity.
+		 */
+		if (chip->cl.nom_cap_uah && delta_cc_uah > pct_nom_cap_uah) {
+			fg_dbg(chip, FG_CAP_LEARN, "learned_cc_uah: %lld is higher than expected, capping it to nominal: %lld\n",
+				chip->cl.learned_cc_uah, chip->cl.nom_cap_uah);
+			chip->cl.learned_cc_uah = chip->cl.nom_cap_uah;
+		}
+
+		rc = fg_save_learned_cap_to_sram(chip);
+		if (rc < 0)
+			pr_err("Error in saving learned_cc_uah, rc=%d\n", rc);
+	}
+
+	fg_dbg(chip, FG_CAP_LEARN, "learned_cc_uah:%lld nom_cap_uah: %lld\n",
+		chip->cl.learned_cc_uah, chip->cl.nom_cap_uah);
+	return 0;
+}
+
+static bool is_temp_valid_cap_learning(struct fg_chip *chip)
+{
+	int rc, batt_temp;
+
+	rc = fg_get_battery_temp(chip, &batt_temp);
+	if (rc < 0) {
+		pr_err("Error in getting batt_temp\n");
+		return false;
+	}
+
+	if (batt_temp > chip->dt.cl_max_temp ||
+		batt_temp < chip->dt.cl_min_temp) {
+		fg_dbg(chip, FG_CAP_LEARN, "batt temp %d out of range [%d %d]\n",
+			batt_temp, chip->dt.cl_min_temp, chip->dt.cl_max_temp);
+		return false;
+	}
+
+	return true;
+}
+
+static void fg_cap_learning_post_process(struct fg_chip *chip)
+{
+	int64_t max_inc_val, min_dec_val, old_cap;
+	int rc;
+
+	max_inc_val = chip->cl.learned_cc_uah
+			* (1000 + chip->dt.cl_max_cap_inc);
+	do_div(max_inc_val, 1000);
+
+	min_dec_val = chip->cl.learned_cc_uah
+			* (1000 - chip->dt.cl_max_cap_dec);
+	do_div(min_dec_val, 1000);
+
+	old_cap = chip->cl.learned_cc_uah;
+	if (chip->cl.final_cc_uah > max_inc_val)
+		chip->cl.learned_cc_uah = max_inc_val;
+	else if (chip->cl.final_cc_uah < min_dec_val)
+		chip->cl.learned_cc_uah = min_dec_val;
+	else
+		chip->cl.learned_cc_uah =
+			chip->cl.final_cc_uah;
+
+	if (chip->dt.cl_max_cap_limit) {
+		max_inc_val = (int64_t)chip->cl.nom_cap_uah * (1000 +
+				chip->dt.cl_max_cap_limit);
+		do_div(max_inc_val, 1000);
+		if (chip->cl.final_cc_uah > max_inc_val) {
+			fg_dbg(chip, FG_CAP_LEARN, "learning capacity %lld goes above max limit %lld\n",
+				chip->cl.final_cc_uah, max_inc_val);
+			chip->cl.learned_cc_uah = max_inc_val;
+		}
+	}
+
+	if (chip->dt.cl_min_cap_limit) {
+		min_dec_val = (int64_t)chip->cl.nom_cap_uah * (1000 -
+				chip->dt.cl_min_cap_limit);
+		do_div(min_dec_val, 1000);
+		if (chip->cl.final_cc_uah < min_dec_val) {
+			fg_dbg(chip, FG_CAP_LEARN, "learning capacity %lld goes below min limit %lld\n",
+				chip->cl.final_cc_uah, min_dec_val);
+			chip->cl.learned_cc_uah = min_dec_val;
+		}
+	}
+
+	rc = fg_save_learned_cap_to_sram(chip);
+	if (rc < 0)
+		pr_err("Error in saving learned_cc_uah, rc=%d\n", rc);
+
+	fg_dbg(chip, FG_CAP_LEARN, "final cc_uah = %lld, learned capacity %lld -> %lld uah\n",
+		chip->cl.final_cc_uah, old_cap, chip->cl.learned_cc_uah);
+}
+
+static int  fg_cap_learning_process_full_data(struct fg_chip *chip)
+{
+	int rc, cc_soc_sw, cc_soc_delta_pct;
+	int64_t delta_cc_uah;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc_sw);
+	if (rc < 0) {
+		pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+		return rc;
+	}
+
+	cc_soc_delta_pct = DIV_ROUND_CLOSEST(
+				abs(cc_soc_sw - chip->cl.init_cc_soc_sw) * 100,
+				CC_SOC_30BIT);
+	delta_cc_uah = div64_s64(chip->cl.learned_cc_uah * cc_soc_delta_pct,
+				100);
+	chip->cl.final_cc_uah = chip->cl.init_cc_uah + delta_cc_uah;
+	fg_dbg(chip, FG_CAP_LEARN, "Current cc_soc=%d cc_soc_delta_pct=%d total_cc_uah=%lld\n",
+		cc_soc_sw, cc_soc_delta_pct, chip->cl.final_cc_uah);
+	return 0;
+}
+
+static int fg_cap_learning_begin(struct fg_chip *chip, int batt_soc)
+{
+	int rc, cc_soc_sw;
+
+	if (DIV_ROUND_CLOSEST(batt_soc * 100, FULL_SOC_RAW) >
+		chip->dt.cl_start_soc) {
+		fg_dbg(chip, FG_CAP_LEARN, "Battery SOC %d is high!, not starting\n",
+			batt_soc);
+		return -EINVAL;
+	}
+
+	chip->cl.init_cc_uah = div64_s64(chip->cl.learned_cc_uah * batt_soc,
+					FULL_SOC_RAW);
+	rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc_sw);
+	if (rc < 0) {
+		pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->cl.init_cc_soc_sw = cc_soc_sw;
+	chip->cl.active = true;
+	fg_dbg(chip, FG_CAP_LEARN, "Capacity learning started @ battery SOC %d init_cc_soc_sw:%d\n",
+		batt_soc, chip->cl.init_cc_soc_sw);
+	return 0;
+}
+
+static int fg_cap_learning_done(struct fg_chip *chip)
+{
+	int rc, cc_soc_sw;
+
+	rc = fg_cap_learning_process_full_data(chip);
+	if (rc < 0) {
+		pr_err("Error in processing cap learning full data, rc=%d\n",
+			rc);
+		goto out;
+	}
+
+	/* Write a FULL value to cc_soc_sw */
+	cc_soc_sw = CC_SOC_30BIT;
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_CC_SOC_SW].addr_word,
+		chip->sp[FG_SRAM_CC_SOC_SW].addr_byte, (u8 *)&cc_soc_sw,
+		chip->sp[FG_SRAM_CC_SOC_SW].len, FG_IMA_ATOMIC);
+	if (rc < 0) {
+		pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
+		goto out;
+	}
+
+	fg_cap_learning_post_process(chip);
+out:
+	return rc;
+}
+
+#define FULL_SOC_RAW	255
+static void fg_cap_learning_update(struct fg_chip *chip)
+{
+	int rc, batt_soc;
+
+	mutex_lock(&chip->cl.lock);
+
+	if (!is_temp_valid_cap_learning(chip) || !chip->cl.learned_cc_uah ||
+		chip->battery_missing) {
+		fg_dbg(chip, FG_CAP_LEARN, "Aborting cap_learning %lld\n",
+			chip->cl.learned_cc_uah);
+		chip->cl.active = false;
+		chip->cl.init_cc_uah = 0;
+		goto out;
+	}
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
+	if (rc < 0) {
+		pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+		goto out;
+	}
+
+	/* We need only the most significant byte here */
+	batt_soc = (u32)batt_soc >> 24;
+
+	fg_dbg(chip, FG_CAP_LEARN, "Chg_status: %d cl_active: %d batt_soc: %d\n",
+		chip->charge_status, chip->cl.active, batt_soc);
+
+	/* Initialize the starting point of learning capacity */
+	if (!chip->cl.active) {
+		if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
+			rc = fg_cap_learning_begin(chip, batt_soc);
+			chip->cl.active = (rc == 0);
+		}
+
+	} else {
+		if (chip->charge_done) {
+			rc = fg_cap_learning_done(chip);
+			if (rc < 0)
+				pr_err("Error in completing capacity learning, rc=%d\n",
+					rc);
+
+			chip->cl.active = false;
+			chip->cl.init_cc_uah = 0;
+		}
+
+		if (chip->charge_status == POWER_SUPPLY_STATUS_NOT_CHARGING) {
+			fg_dbg(chip, FG_CAP_LEARN, "Capacity learning aborted @ battery SOC %d\n",
+				batt_soc);
+			chip->cl.active = false;
+			chip->cl.init_cc_uah = 0;
+		}
+	}
+
+out:
+	mutex_unlock(&chip->cl.lock);
+}
+
+#define KI_COEFF_MED_DISCHG_DEFAULT	1500
+#define KI_COEFF_HI_DISCHG_DEFAULT	2200
+static int fg_adjust_ki_coeff_dischg(struct fg_chip *chip)
+{
+	int rc, i, msoc;
+	int ki_coeff_med = KI_COEFF_MED_DISCHG_DEFAULT;
+	int ki_coeff_hi = KI_COEFF_HI_DISCHG_DEFAULT;
+	u8 val;
+
+	if (!chip->ki_coeff_dischg_en)
+		return 0;
+
+	rc = fg_get_prop_capacity(chip, &msoc);
+	if (rc < 0) {
+		pr_err("Error in getting capacity, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chip->charge_status == POWER_SUPPLY_STATUS_DISCHARGING) {
+		for (i = KI_COEFF_SOC_LEVELS - 1; i >= 0; i--) {
+			if (msoc < chip->dt.ki_coeff_soc[i]) {
+				ki_coeff_med = chip->dt.ki_coeff_med_dischg[i];
+				ki_coeff_hi = chip->dt.ki_coeff_hi_dischg[i];
+			}
+		}
+	}
+
+	fg_encode(chip->sp, FG_SRAM_KI_COEFF_MED_DISCHG, ki_coeff_med, &val);
+	rc = fg_sram_write(chip,
+			chip->sp[FG_SRAM_KI_COEFF_MED_DISCHG].addr_word,
+			chip->sp[FG_SRAM_KI_COEFF_MED_DISCHG].addr_byte, &val,
+			chip->sp[FG_SRAM_KI_COEFF_MED_DISCHG].len,
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ki_coeff_med, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_KI_COEFF_HI_DISCHG, ki_coeff_hi, &val);
+	rc = fg_sram_write(chip,
+			chip->sp[FG_SRAM_KI_COEFF_HI_DISCHG].addr_word,
+			chip->sp[FG_SRAM_KI_COEFF_HI_DISCHG].addr_byte, &val,
+			chip->sp[FG_SRAM_KI_COEFF_HI_DISCHG].len,
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ki_coeff_hi, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_dbg(chip, FG_STATUS, "Wrote ki_coeff_med %d ki_coeff_hi %d\n",
+		ki_coeff_med, ki_coeff_hi);
+	return 0;
+}
+
+static int fg_set_recharge_voltage(struct fg_chip *chip, int voltage_mv)
+{
+	u8 buf;
+	int rc;
+
+	if (chip->dt.auto_recharge_soc)
+		return 0;
+
+	/* This configuration is available only for pmicobalt v2.0 and above */
+	if (chip->wa_flags & PMI8998_V1_REV_WA)
+		return 0;
+
+	fg_dbg(chip, FG_STATUS, "Setting recharge voltage to %dmV\n",
+		voltage_mv);
+	fg_encode(chip->sp, FG_SRAM_RECHARGE_VBATT_THR, voltage_mv, &buf);
+	rc = fg_sram_write(chip,
+			chip->sp[FG_SRAM_RECHARGE_VBATT_THR].addr_word,
+			chip->sp[FG_SRAM_RECHARGE_VBATT_THR].addr_byte,
+			&buf, chip->sp[FG_SRAM_RECHARGE_VBATT_THR].len,
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing recharge_vbatt_thr, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+#define AUTO_RECHG_VOLT_LOW_LIMIT_MV	3700
+static int fg_charge_full_update(struct fg_chip *chip)
+{
+	union power_supply_propval prop = {0, };
+	int rc, msoc, bsoc, recharge_soc;
+	u8 full_soc[2] = {0xFF, 0xFF};
+
+	if (!chip->dt.hold_soc_while_full)
+		return 0;
+
+	if (!batt_psy_initialized(chip))
+		return 0;
+
+	mutex_lock(&chip->charge_full_lock);
+	if (!chip->charge_done && chip->bsoc_delta_irq_en) {
+		disable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
+		disable_irq_nosync(fg_irqs[BSOC_DELTA_IRQ].irq);
+		chip->bsoc_delta_irq_en = false;
+	} else if (chip->charge_done && !chip->bsoc_delta_irq_en) {
+		enable_irq(fg_irqs[BSOC_DELTA_IRQ].irq);
+		enable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
+		chip->bsoc_delta_irq_en = true;
+	}
+
+	rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
+		&prop);
+	if (rc < 0) {
+		pr_err("Error in getting battery health, rc=%d\n", rc);
+		goto out;
+	}
+
+	chip->health = prop.intval;
+	recharge_soc = chip->dt.recharge_soc_thr;
+	recharge_soc = DIV_ROUND_CLOSEST(recharge_soc * FULL_SOC_RAW,
+				FULL_CAPACITY);
+	rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &bsoc);
+	if (rc < 0) {
+		pr_err("Error in getting BATT_SOC, rc=%d\n", rc);
+		goto out;
+	}
+
+	/* We need 2 most significant bytes here */
+	bsoc = (u32)bsoc >> 16;
+	rc = fg_get_msoc(chip, &msoc);
+	if (rc < 0) {
+		pr_err("Error in getting msoc, rc=%d\n", rc);
+		goto out;
+	}
+
+	fg_dbg(chip, FG_STATUS, "msoc: %d bsoc: %x health: %d status: %d full: %d\n",
+		msoc, bsoc, chip->health, chip->charge_status,
+		chip->charge_full);
+	if (chip->charge_done && !chip->charge_full) {
+		if (msoc >= 99 && chip->health == POWER_SUPPLY_HEALTH_GOOD) {
+			fg_dbg(chip, FG_STATUS, "Setting charge_full to true\n");
+			chip->charge_full = true;
+			/*
+			 * Lower the recharge voltage so that VBAT_LT_RECHG
+			 * signal will not be asserted soon.
+			 */
+			rc = fg_set_recharge_voltage(chip,
+					AUTO_RECHG_VOLT_LOW_LIMIT_MV);
+			if (rc < 0) {
+				pr_err("Error in reducing recharge voltage, rc=%d\n",
+					rc);
+				goto out;
+			}
+		} else {
+			fg_dbg(chip, FG_STATUS, "Terminated charging @ SOC%d\n",
+				msoc);
+		}
+	} else if ((bsoc >> 8) <= recharge_soc && chip->charge_full) {
+		chip->delta_soc = FULL_CAPACITY - msoc;
+
+		/*
+		 * We're spreading out the delta SOC over every 10% change
+		 * in monotonic SOC. We cannot spread more than 9% in the
+		 * range of 0-100 skipping the first 10%.
+		 */
+		if (chip->delta_soc > 9) {
+			chip->delta_soc = 0;
+			chip->maint_soc = 0;
+		} else {
+			chip->maint_soc = FULL_CAPACITY;
+			chip->last_msoc = msoc;
+		}
+
+		chip->charge_full = false;
+
+		/*
+		 * Raise the recharge voltage so that VBAT_LT_RECHG signal
+		 * will be asserted soon as battery SOC had dropped below
+		 * the recharge SOC threshold.
+		 */
+		rc = fg_set_recharge_voltage(chip,
+					chip->dt.recharge_volt_thr_mv);
+		if (rc < 0) {
+			pr_err("Error in setting recharge voltage, rc=%d\n",
+				rc);
+			goto out;
+		}
+		fg_dbg(chip, FG_STATUS, "bsoc: %d recharge_soc: %d delta_soc: %d\n",
+			bsoc >> 8, recharge_soc, chip->delta_soc);
+	} else {
+		goto out;
+	}
+
+	if (!chip->charge_full)
+		goto out;
+
+	/*
+	 * During JEITA conditions, charge_full can happen early. FULL_SOC
+	 * and MONOTONIC_SOC needs to be updated to reflect the same. Write
+	 * battery SOC to FULL_SOC and write a full value to MONOTONIC_SOC.
+	 */
+	rc = fg_sram_write(chip, FULL_SOC_WORD, FULL_SOC_OFFSET, (u8 *)&bsoc, 2,
+			FG_IMA_ATOMIC);
+	if (rc < 0) {
+		pr_err("failed to write full_soc rc=%d\n", rc);
+		goto out;
+	}
+
+	rc = fg_sram_write(chip, MONOTONIC_SOC_WORD, MONOTONIC_SOC_OFFSET,
+			full_soc, 2, FG_IMA_ATOMIC);
+	if (rc < 0) {
+		pr_err("failed to write monotonic_soc rc=%d\n", rc);
+		goto out;
+	}
+
+	fg_dbg(chip, FG_STATUS, "Set charge_full to true @ soc %d\n", msoc);
+out:
+	mutex_unlock(&chip->charge_full_lock);
+	return rc;
+}
+
+#define RCONN_CONFIG_BIT	BIT(0)
+static int fg_rconn_config(struct fg_chip *chip)
+{
+	int rc, esr_uohms;
+	u64 scaling_factor;
+	u32 val = 0;
+
+	rc = fg_sram_read(chip, PROFILE_INTEGRITY_WORD,
+			SW_CONFIG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading SW_CONFIG_OFFSET, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (val & RCONN_CONFIG_BIT) {
+		fg_dbg(chip, FG_STATUS, "Rconn already configured: %x\n", val);
+		return 0;
+	}
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_ESR, &esr_uohms);
+	if (rc < 0) {
+		pr_err("failed to get ESR, rc=%d\n", rc);
+		return rc;
+	}
+
+	scaling_factor = div64_u64((u64)esr_uohms * 1000,
+				esr_uohms + (chip->dt.rconn_mohms * 1000));
+
+	rc = fg_sram_read(chip, ESR_RSLOW_CHG_WORD,
+			ESR_RSLOW_CHG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading ESR_RSLOW_CHG_OFFSET, rc=%d\n", rc);
+		return rc;
+	}
+
+	val *= scaling_factor;
+	do_div(val, 1000);
+	rc = fg_sram_write(chip, ESR_RSLOW_CHG_WORD,
+			ESR_RSLOW_CHG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR_RSLOW_CHG_OFFSET, rc=%d\n", rc);
+		return rc;
+	}
+	fg_dbg(chip, FG_STATUS, "esr_rslow_chg modified to %x\n", val & 0xFF);
+
+	rc = fg_sram_read(chip, ESR_RSLOW_DISCHG_WORD,
+			ESR_RSLOW_DISCHG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading ESR_RSLOW_DISCHG_OFFSET, rc=%d\n", rc);
+		return rc;
+	}
+
+	val *= scaling_factor;
+	do_div(val, 1000);
+	rc = fg_sram_write(chip, ESR_RSLOW_DISCHG_WORD,
+			ESR_RSLOW_DISCHG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR_RSLOW_DISCHG_OFFSET, rc=%d\n", rc);
+		return rc;
+	}
+	fg_dbg(chip, FG_STATUS, "esr_rslow_dischg modified to %x\n",
+		val & 0xFF);
+
+	val = RCONN_CONFIG_BIT;
+	rc = fg_sram_write(chip, PROFILE_INTEGRITY_WORD,
+			SW_CONFIG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing SW_CONFIG_OFFSET, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int fg_set_constant_chg_voltage(struct fg_chip *chip, int volt_uv)
+{
+	u8 buf[2];
+	int rc;
+
+	if (volt_uv <= 0 || volt_uv > 15590000) {
+		pr_err("Invalid voltage %d\n", volt_uv);
+		return -EINVAL;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_VBATT_FULL, volt_uv, buf);
+
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_VBATT_FULL].addr_word,
+		chip->sp[FG_SRAM_VBATT_FULL].addr_byte, buf,
+		chip->sp[FG_SRAM_VBATT_FULL].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing vbatt_full, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int fg_set_recharge_soc(struct fg_chip *chip, int recharge_soc)
+{
+	u8 buf;
+	int rc;
+
+	if (!chip->dt.auto_recharge_soc)
+		return 0;
+
+	fg_encode(chip->sp, FG_SRAM_RECHARGE_SOC_THR, recharge_soc, &buf);
+	rc = fg_sram_write(chip,
+			chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_word,
+			chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_byte, &buf,
+			chip->sp[FG_SRAM_RECHARGE_SOC_THR].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing recharge_soc_thr, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int fg_adjust_recharge_soc(struct fg_chip *chip)
+{
+	int rc, msoc, recharge_soc, new_recharge_soc = 0;
+
+	if (!chip->dt.auto_recharge_soc)
+		return 0;
+
+	recharge_soc = chip->dt.recharge_soc_thr;
+	/*
+	 * If the input is present and charging had been terminated, adjust
+	 * the recharge SOC threshold based on the monotonic SOC at which
+	 * the charge termination had happened.
+	 */
+	if (is_input_present(chip) && !chip->recharge_soc_adjusted
+		&& chip->charge_done) {
+		/* Get raw monotonic SOC for calculation */
+		rc = fg_get_msoc(chip, &msoc);
+		if (rc < 0) {
+			pr_err("Error in getting msoc, rc=%d\n", rc);
+			return rc;
+		}
+
+		/* Adjust the recharge_soc threshold */
+		new_recharge_soc = msoc - (FULL_CAPACITY - recharge_soc);
+	} else if (chip->recharge_soc_adjusted && (!is_input_present(chip)
+				|| chip->health == POWER_SUPPLY_HEALTH_GOOD)) {
+		/* Restore the default value */
+		new_recharge_soc = recharge_soc;
+	}
+
+	if (new_recharge_soc > 0 && new_recharge_soc < FULL_CAPACITY) {
+		rc = fg_set_recharge_soc(chip, new_recharge_soc);
+		if (rc) {
+			pr_err("Couldn't set resume SOC for FG, rc=%d\n", rc);
+			return rc;
+		}
+
+		chip->recharge_soc_adjusted = (new_recharge_soc !=
+						recharge_soc);
+		fg_dbg(chip, FG_STATUS, "resume soc set to %d\n",
+			new_recharge_soc);
+	}
+
+	return 0;
+}
+
+static int fg_slope_limit_config(struct fg_chip *chip, int batt_temp)
+{
+	enum slope_limit_status status;
+	int rc;
+	u8 buf;
+
+	if (!chip->slope_limit_en)
+		return 0;
+
+	if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING ||
+		chip->charge_status == POWER_SUPPLY_STATUS_FULL) {
+		if (batt_temp < chip->dt.slope_limit_temp)
+			status = LOW_TEMP_CHARGE;
+		else
+			status = HIGH_TEMP_CHARGE;
+	} else {
+		if (batt_temp < chip->dt.slope_limit_temp)
+			status = LOW_TEMP_DISCHARGE;
+		else
+			status = HIGH_TEMP_DISCHARGE;
+	}
+
+	if (chip->slope_limit_sts == status)
+		return 0;
+
+	fg_encode(chip->sp, FG_SRAM_SLOPE_LIMIT,
+		chip->dt.slope_limit_coeffs[status], &buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_SLOPE_LIMIT].addr_word,
+			chip->sp[FG_SRAM_SLOPE_LIMIT].addr_byte, &buf,
+			chip->sp[FG_SRAM_SLOPE_LIMIT].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in configuring slope_limit coefficient, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	chip->slope_limit_sts = status;
+	fg_dbg(chip, FG_STATUS, "Slope limit status: %d value: %x\n", status,
+		buf);
+	return 0;
+}
+
+static int fg_esr_filter_config(struct fg_chip *chip, int batt_temp)
+{
+	u8 esr_tight_lt_flt, esr_broad_lt_flt;
+	bool cold_temp = false;
+	int rc;
+
+	/*
+	 * If the battery temperature is lower than -20 C, then skip modifying
+	 * ESR filter.
+	 */
+	if (batt_temp < -210)
+		return 0;
+
+	/*
+	 * If battery temperature is lesser than 10 C (default), then apply the
+	 * ESR low temperature tight and broad filter values to ESR room
+	 * temperature tight and broad filters. If battery temperature is higher
+	 * than 10 C, then apply back the room temperature ESR filter
+	 * coefficients to ESR room temperature tight and broad filters.
+	 */
+	if (batt_temp > chip->dt.esr_flt_switch_temp
+		&& chip->esr_flt_cold_temp_en) {
+		fg_encode(chip->sp, FG_SRAM_ESR_TIGHT_FILTER,
+			chip->dt.esr_tight_flt_upct, &esr_tight_lt_flt);
+		fg_encode(chip->sp, FG_SRAM_ESR_BROAD_FILTER,
+			chip->dt.esr_broad_flt_upct, &esr_broad_lt_flt);
+	} else if (batt_temp <= chip->dt.esr_flt_switch_temp
+			&& !chip->esr_flt_cold_temp_en) {
+		fg_encode(chip->sp, FG_SRAM_ESR_TIGHT_FILTER,
+			chip->dt.esr_tight_lt_flt_upct, &esr_tight_lt_flt);
+		fg_encode(chip->sp, FG_SRAM_ESR_BROAD_FILTER,
+			chip->dt.esr_broad_lt_flt_upct, &esr_broad_lt_flt);
+		cold_temp = true;
+	} else {
+		return 0;
+	}
+
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_TIGHT_FILTER].addr_word,
+			chip->sp[FG_SRAM_ESR_TIGHT_FILTER].addr_byte,
+			&esr_tight_lt_flt,
+			chip->sp[FG_SRAM_ESR_TIGHT_FILTER].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR LT tight filter, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_BROAD_FILTER].addr_word,
+			chip->sp[FG_SRAM_ESR_BROAD_FILTER].addr_byte,
+			&esr_broad_lt_flt,
+			chip->sp[FG_SRAM_ESR_BROAD_FILTER].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR LT broad filter, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->esr_flt_cold_temp_en = cold_temp;
+	fg_dbg(chip, FG_STATUS, "applied %s ESR filter values\n",
+		cold_temp ? "cold" : "normal");
+	return 0;
+}
+
+static int fg_esr_fcc_config(struct fg_chip *chip)
+{
+	union power_supply_propval prop = {0, };
+	int rc;
+	bool parallel_en = false;
+
+	if (is_parallel_charger_available(chip)) {
+		rc = power_supply_get_property(chip->parallel_psy,
+			POWER_SUPPLY_PROP_CHARGING_ENABLED, &prop);
+		if (rc < 0) {
+			pr_err("Error in reading charging_enabled from parallel_psy, rc=%d\n",
+				rc);
+			return rc;
+		}
+		parallel_en = prop.intval;
+	}
+
+	fg_dbg(chip, FG_POWER_SUPPLY, "charge_status: %d parallel_en: %d esr_fcc_ctrl_en: %d\n",
+		chip->charge_status, parallel_en, chip->esr_fcc_ctrl_en);
+
+	if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
+								parallel_en) {
+		if (chip->esr_fcc_ctrl_en)
+			return 0;
+
+		/*
+		 * When parallel charging is enabled, configure ESR FCC to
+		 * 300mA to trigger an ESR pulse. Without this, FG can ask
+		 * the main charger to increase FCC when it is supposed to
+		 * decrease it.
+		 */
+		rc = fg_masked_write(chip, BATT_INFO_ESR_FAST_CRG_CFG(chip),
+				ESR_FAST_CRG_IVAL_MASK |
+				ESR_FAST_CRG_CTL_EN_BIT,
+				ESR_FCC_300MA | ESR_FAST_CRG_CTL_EN_BIT);
+		if (rc < 0) {
+			pr_err("Error in writing to %04x, rc=%d\n",
+				BATT_INFO_ESR_FAST_CRG_CFG(chip), rc);
+			return rc;
+		}
+
+		chip->esr_fcc_ctrl_en = true;
+	} else {
+		if (!chip->esr_fcc_ctrl_en)
+			return 0;
+
+		/*
+		 * If we're here, then it means either the device is not in
+		 * charging state or parallel charging is disabled. Disable
+		 * ESR fast charge current control in SW.
+		 */
+		rc = fg_masked_write(chip, BATT_INFO_ESR_FAST_CRG_CFG(chip),
+				ESR_FAST_CRG_CTL_EN_BIT, 0);
+		if (rc < 0) {
+			pr_err("Error in writing to %04x, rc=%d\n",
+				BATT_INFO_ESR_FAST_CRG_CFG(chip), rc);
+			return rc;
+		}
+
+		chip->esr_fcc_ctrl_en = false;
+	}
+
+	fg_dbg(chip, FG_STATUS, "esr_fcc_ctrl_en set to %d\n",
+		chip->esr_fcc_ctrl_en);
+	return 0;
+}
+
+static void fg_batt_avg_update(struct fg_chip *chip)
+{
+	if (chip->charge_status == chip->prev_charge_status)
+		return;
+
+	cancel_delayed_work_sync(&chip->batt_avg_work);
+	fg_circ_buf_clr(&chip->ibatt_circ_buf);
+	fg_circ_buf_clr(&chip->vbatt_circ_buf);
+
+	if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING ||
+			chip->charge_status == POWER_SUPPLY_STATUS_DISCHARGING)
+		schedule_delayed_work(&chip->batt_avg_work,
+							msecs_to_jiffies(2000));
+}
+
+static void status_change_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work,
+			struct fg_chip, status_change_work);
+	union power_supply_propval prop = {0, };
+	int rc, batt_temp;
+
+	if (!batt_psy_initialized(chip)) {
+		fg_dbg(chip, FG_STATUS, "Charger not available?!\n");
+		goto out;
+	}
+
+	rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS,
+			&prop);
+	if (rc < 0) {
+		pr_err("Error in getting charging status, rc=%d\n", rc);
+		goto out;
+	}
+
+	chip->prev_charge_status = chip->charge_status;
+	chip->charge_status = prop.intval;
+	rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CHARGE_TYPE, &prop);
+	if (rc < 0) {
+		pr_err("Error in getting charge type, rc=%d\n", rc);
+		goto out;
+	}
+
+	chip->charge_type = prop.intval;
+	rc = power_supply_get_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CHARGE_DONE, &prop);
+	if (rc < 0) {
+		pr_err("Error in getting charge_done, rc=%d\n", rc);
+		goto out;
+	}
+
+	chip->charge_done = prop.intval;
+	if (chip->cyc_ctr.en)
+		schedule_work(&chip->cycle_count_work);
+
+	fg_cap_learning_update(chip);
+
+	rc = fg_charge_full_update(chip);
+	if (rc < 0)
+		pr_err("Error in charge_full_update, rc=%d\n", rc);
+
+	rc = fg_adjust_recharge_soc(chip);
+	if (rc < 0)
+		pr_err("Error in adjusting recharge_soc, rc=%d\n", rc);
+
+	rc = fg_adjust_ki_coeff_dischg(chip);
+	if (rc < 0)
+		pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
+
+	rc = fg_esr_fcc_config(chip);
+	if (rc < 0)
+		pr_err("Error in adjusting FCC for ESR, rc=%d\n", rc);
+
+	rc = fg_get_battery_temp(chip, &batt_temp);
+	if (!rc) {
+		rc = fg_slope_limit_config(chip, batt_temp);
+		if (rc < 0)
+			pr_err("Error in configuring slope limiter rc:%d\n",
+				rc);
+	}
+
+	fg_batt_avg_update(chip);
+
+out:
+	fg_dbg(chip, FG_POWER_SUPPLY, "charge_status:%d charge_type:%d charge_done:%d\n",
+		chip->charge_status, chip->charge_type, chip->charge_done);
+	pm_relax(chip->dev);
+}
+
+static void restore_cycle_counter(struct fg_chip *chip)
+{
+	int rc = 0, i;
+	u8 data[2];
+
+	mutex_lock(&chip->cyc_ctr.lock);
+	for (i = 0; i < BUCKET_COUNT; i++) {
+		rc = fg_sram_read(chip, CYCLE_COUNT_WORD + (i / 2),
+				CYCLE_COUNT_OFFSET + (i % 2) * 2, data, 2,
+				FG_IMA_DEFAULT);
+		if (rc < 0)
+			pr_err("failed to read bucket %d rc=%d\n", i, rc);
+		else
+			chip->cyc_ctr.count[i] = data[0] | data[1] << 8;
+	}
+	mutex_unlock(&chip->cyc_ctr.lock);
+}
+
+static void clear_cycle_counter(struct fg_chip *chip)
+{
+	int rc = 0, i;
+
+	if (!chip->cyc_ctr.en)
+		return;
+
+	mutex_lock(&chip->cyc_ctr.lock);
+	memset(chip->cyc_ctr.count, 0, sizeof(chip->cyc_ctr.count));
+	for (i = 0; i < BUCKET_COUNT; i++) {
+		chip->cyc_ctr.started[i] = false;
+		chip->cyc_ctr.last_soc[i] = 0;
+	}
+	rc = fg_sram_write(chip, CYCLE_COUNT_WORD, CYCLE_COUNT_OFFSET,
+			(u8 *)&chip->cyc_ctr.count,
+			sizeof(chip->cyc_ctr.count) / sizeof(u8 *),
+			FG_IMA_DEFAULT);
+	if (rc < 0)
+		pr_err("failed to clear cycle counter rc=%d\n", rc);
+
+	mutex_unlock(&chip->cyc_ctr.lock);
+}
+
+static int fg_inc_store_cycle_ctr(struct fg_chip *chip, int bucket)
+{
+	int rc = 0;
+	u16 cyc_count;
+	u8 data[2];
+
+	if (bucket < 0 || (bucket > BUCKET_COUNT - 1))
+		return 0;
+
+	cyc_count = chip->cyc_ctr.count[bucket];
+	cyc_count++;
+	data[0] = cyc_count & 0xFF;
+	data[1] = cyc_count >> 8;
+
+	rc = fg_sram_write(chip, CYCLE_COUNT_WORD + (bucket / 2),
+			CYCLE_COUNT_OFFSET + (bucket % 2) * 2, data, 2,
+			FG_IMA_DEFAULT);
+	if (rc < 0)
+		pr_err("failed to write BATT_CYCLE[%d] rc=%d\n",
+			bucket, rc);
+	else
+		chip->cyc_ctr.count[bucket] = cyc_count;
+	return rc;
+}
+
+static void cycle_count_work(struct work_struct *work)
+{
+	int rc = 0, bucket, i, batt_soc;
+	struct fg_chip *chip = container_of(work,
+				struct fg_chip,
+				cycle_count_work);
+
+	mutex_lock(&chip->cyc_ctr.lock);
+	rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
+	if (rc < 0) {
+		pr_err("Failed to read battery soc rc: %d\n", rc);
+		goto out;
+	}
+
+	/* We need only the most significant byte here */
+	batt_soc = (u32)batt_soc >> 24;
+
+	if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
+		/* Find out which bucket the SOC falls in */
+		bucket = batt_soc / BUCKET_SOC_PCT;
+		pr_debug("batt_soc: %d bucket: %d\n", batt_soc, bucket);
+
+		/*
+		 * If we've started counting for the previous bucket,
+		 * then store the counter for that bucket if the
+		 * counter for current bucket is getting started.
+		 */
+		if (bucket > 0 && chip->cyc_ctr.started[bucket - 1] &&
+			!chip->cyc_ctr.started[bucket]) {
+			rc = fg_inc_store_cycle_ctr(chip, bucket - 1);
+			if (rc < 0) {
+				pr_err("Error in storing cycle_ctr rc: %d\n",
+					rc);
+				goto out;
+			} else {
+				chip->cyc_ctr.started[bucket - 1] = false;
+				chip->cyc_ctr.last_soc[bucket - 1] = 0;
+			}
+		}
+		if (!chip->cyc_ctr.started[bucket]) {
+			chip->cyc_ctr.started[bucket] = true;
+			chip->cyc_ctr.last_soc[bucket] = batt_soc;
+		}
+	} else {
+		for (i = 0; i < BUCKET_COUNT; i++) {
+			if (chip->cyc_ctr.started[i] &&
+				batt_soc > chip->cyc_ctr.last_soc[i]) {
+				rc = fg_inc_store_cycle_ctr(chip, i);
+				if (rc < 0)
+					pr_err("Error in storing cycle_ctr rc: %d\n",
+						rc);
+				chip->cyc_ctr.last_soc[i] = 0;
+			}
+			chip->cyc_ctr.started[i] = false;
+		}
+	}
+out:
+	mutex_unlock(&chip->cyc_ctr.lock);
+}
+
+static int fg_get_cycle_count(struct fg_chip *chip)
+{
+	int count;
+
+	if (!chip->cyc_ctr.en)
+		return 0;
+
+	if ((chip->cyc_ctr.id <= 0) || (chip->cyc_ctr.id > BUCKET_COUNT))
+		return -EINVAL;
+
+	mutex_lock(&chip->cyc_ctr.lock);
+	count = chip->cyc_ctr.count[chip->cyc_ctr.id - 1];
+	mutex_unlock(&chip->cyc_ctr.lock);
+	return count;
+}
+
+#define PROFILE_LOAD_BIT	BIT(0)
+#define BOOTLOADER_LOAD_BIT	BIT(1)
+#define BOOTLOADER_RESTART_BIT	BIT(2)
+#define HLOS_RESTART_BIT	BIT(3)
+static bool is_profile_load_required(struct fg_chip *chip)
+{
+	u8 buf[PROFILE_COMP_LEN], val;
+	bool profiles_same = false;
+	int rc;
+
+	rc = fg_sram_read(chip, PROFILE_INTEGRITY_WORD,
+			PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("failed to read profile integrity rc=%d\n", rc);
+		return false;
+	}
+
+	/* Check if integrity bit is set */
+	if (val & PROFILE_LOAD_BIT) {
+		fg_dbg(chip, FG_STATUS, "Battery profile integrity bit is set\n");
+		rc = fg_sram_read(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
+				buf, PROFILE_COMP_LEN, FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in reading battery profile, rc:%d\n", rc);
+			return false;
+		}
+		profiles_same = memcmp(chip->batt_profile, buf,
+					PROFILE_COMP_LEN) == 0;
+		if (profiles_same) {
+			fg_dbg(chip, FG_STATUS, "Battery profile is same, not loading it\n");
+			return false;
+		}
+
+		if (!chip->dt.force_load_profile) {
+			pr_warn("Profiles doesn't match, skipping loading it since force_load_profile is disabled\n");
+			if (fg_profile_dump) {
+				pr_info("FG: loaded profile:\n");
+				dump_sram(buf, PROFILE_LOAD_WORD,
+					PROFILE_COMP_LEN);
+				pr_info("FG: available profile:\n");
+				dump_sram(chip->batt_profile, PROFILE_LOAD_WORD,
+					PROFILE_LEN);
+			}
+			return false;
+		}
+
+		fg_dbg(chip, FG_STATUS, "Profiles are different, loading the correct one\n");
+	} else {
+		fg_dbg(chip, FG_STATUS, "Profile integrity bit is not set\n");
+		if (fg_profile_dump) {
+			pr_info("FG: profile to be loaded:\n");
+			dump_sram(chip->batt_profile, PROFILE_LOAD_WORD,
+				PROFILE_LEN);
+		}
+	}
+	return true;
+}
+
+static void clear_battery_profile(struct fg_chip *chip)
+{
+	u8 val = 0;
+	int rc;
+
+	rc = fg_sram_write(chip, PROFILE_INTEGRITY_WORD,
+			PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT);
+	if (rc < 0)
+		pr_err("failed to write profile integrity rc=%d\n", rc);
+}
+
+#define SOC_READY_WAIT_MS		2000
+static int __fg_restart(struct fg_chip *chip)
+{
+	int rc, msoc;
+	bool tried_again = false;
+
+	rc = fg_get_prop_capacity(chip, &msoc);
+	if (rc < 0) {
+		pr_err("Error in getting capacity, rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->last_soc = msoc;
+	chip->fg_restarting = true;
+	reinit_completion(&chip->soc_ready);
+	rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT,
+			RESTART_GO_BIT);
+	if (rc < 0) {
+		pr_err("Error in writing to %04x, rc=%d\n",
+			BATT_SOC_RESTART(chip), rc);
+		goto out;
+	}
+
+wait:
+	rc = wait_for_completion_interruptible_timeout(&chip->soc_ready,
+		msecs_to_jiffies(SOC_READY_WAIT_MS));
+
+	/* If we were interrupted wait again one more time. */
+	if (rc == -ERESTARTSYS && !tried_again) {
+		tried_again = true;
+		goto wait;
+	} else if (rc <= 0) {
+		pr_err("wait for soc_ready timed out rc=%d\n", rc);
+	}
+
+	rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
+	if (rc < 0) {
+		pr_err("Error in writing to %04x, rc=%d\n",
+			BATT_SOC_RESTART(chip), rc);
+		goto out;
+	}
+out:
+	chip->fg_restarting = false;
+	return rc;
+}
+
+static void profile_load_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work,
+				struct fg_chip,
+				profile_load_work.work);
+	u8 buf[2], val;
+	int rc;
+
+	vote(chip->awake_votable, PROFILE_LOAD, true, 0);
+	if (!is_profile_load_required(chip))
+		goto done;
+
+	clear_cycle_counter(chip);
+	mutex_lock(&chip->cl.lock);
+	chip->cl.learned_cc_uah = 0;
+	chip->cl.active = false;
+	mutex_unlock(&chip->cl.lock);
+
+	fg_dbg(chip, FG_STATUS, "profile loading started\n");
+	rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
+	if (rc < 0) {
+		pr_err("Error in writing to %04x, rc=%d\n",
+			BATT_SOC_RESTART(chip), rc);
+		goto out;
+	}
+
+	/* load battery profile */
+	rc = fg_sram_write(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
+			chip->batt_profile, PROFILE_LEN, FG_IMA_ATOMIC);
+	if (rc < 0) {
+		pr_err("Error in writing battery profile, rc:%d\n", rc);
+		goto out;
+	}
+
+	rc = __fg_restart(chip);
+	if (rc < 0) {
+		pr_err("Error in restarting FG, rc=%d\n", rc);
+		goto out;
+	}
+
+	fg_dbg(chip, FG_STATUS, "SOC is ready\n");
+
+	/* Set the profile integrity bit */
+	val = HLOS_RESTART_BIT | PROFILE_LOAD_BIT;
+	rc = fg_sram_write(chip, PROFILE_INTEGRITY_WORD,
+			PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("failed to write profile integrity rc=%d\n", rc);
+		goto out;
+	}
+
+done:
+	rc = fg_sram_read(chip, NOM_CAP_WORD, NOM_CAP_OFFSET, buf, 2,
+			FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading %04x[%d] rc=%d\n", NOM_CAP_WORD,
+			NOM_CAP_OFFSET, rc);
+	} else {
+		chip->cl.nom_cap_uah = (int)(buf[0] | buf[1] << 8) * 1000;
+		rc = fg_load_learned_cap_from_sram(chip);
+		if (rc < 0)
+			pr_err("Error in loading capacity learning data, rc:%d\n",
+				rc);
+	}
+
+	batt_psy_initialized(chip);
+	fg_notify_charger(chip);
+	chip->profile_loaded = true;
+	chip->soc_reporting_ready = true;
+	fg_dbg(chip, FG_STATUS, "profile loaded successfully");
+out:
+	vote(chip->awake_votable, PROFILE_LOAD, false, 0);
+}
+
+static void sram_dump_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work, struct fg_chip,
+					    sram_dump_work.work);
+	u8 buf[FG_SRAM_LEN];
+	int rc;
+	s64 timestamp_ms, quotient;
+	s32 remainder;
+
+	rc = fg_sram_read(chip, 0, 0, buf, FG_SRAM_LEN, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in reading FG SRAM, rc:%d\n", rc);
+		goto resched;
+	}
+
+	timestamp_ms = ktime_to_ms(ktime_get_boottime());
+	quotient = div_s64_rem(timestamp_ms, 1000, &remainder);
+	fg_dbg(chip, FG_STATUS, "SRAM Dump Started at %lld.%d\n",
+		quotient, remainder);
+	dump_sram(buf, 0, FG_SRAM_LEN);
+	timestamp_ms = ktime_to_ms(ktime_get_boottime());
+	quotient = div_s64_rem(timestamp_ms, 1000, &remainder);
+	fg_dbg(chip, FG_STATUS, "SRAM Dump done at %lld.%d\n",
+		quotient, remainder);
+resched:
+	schedule_delayed_work(&chip->sram_dump_work,
+			msecs_to_jiffies(fg_sram_dump_period_ms));
+}
+
+static int fg_sram_dump_sysfs(const char *val, const struct kernel_param *kp)
+{
+	int rc;
+	struct power_supply *bms_psy;
+	struct fg_chip *chip;
+	bool old_val = fg_sram_dump;
+
+	rc = param_set_bool(val, kp);
+	if (rc) {
+		pr_err("Unable to set fg_sram_dump: %d\n", rc);
+		return rc;
+	}
+
+	if (fg_sram_dump == old_val)
+		return 0;
+
+	bms_psy = power_supply_get_by_name("bms");
+	if (!bms_psy) {
+		pr_err("bms psy not found\n");
+		return -ENODEV;
+	}
+
+	chip = power_supply_get_drvdata(bms_psy);
+	if (fg_sram_dump)
+		schedule_delayed_work(&chip->sram_dump_work,
+				msecs_to_jiffies(fg_sram_dump_period_ms));
+	else
+		cancel_delayed_work_sync(&chip->sram_dump_work);
+
+	return 0;
+}
+
+static struct kernel_param_ops fg_sram_dump_ops = {
+	.set = fg_sram_dump_sysfs,
+	.get = param_get_bool,
+};
+
+module_param_cb(sram_dump_en, &fg_sram_dump_ops, &fg_sram_dump, 0644);
+
+static int fg_restart_sysfs(const char *val, const struct kernel_param *kp)
+{
+	int rc;
+	struct power_supply *bms_psy;
+	struct fg_chip *chip;
+
+	rc = param_set_int(val, kp);
+	if (rc) {
+		pr_err("Unable to set fg_restart: %d\n", rc);
+		return rc;
+	}
+
+	if (fg_restart != 1) {
+		pr_err("Bad value %d\n", fg_restart);
+		return -EINVAL;
+	}
+
+	bms_psy = power_supply_get_by_name("bms");
+	if (!bms_psy) {
+		pr_err("bms psy not found\n");
+		return 0;
+	}
+
+	chip = power_supply_get_drvdata(bms_psy);
+	rc = __fg_restart(chip);
+	if (rc < 0) {
+		pr_err("Error in restarting FG, rc=%d\n", rc);
+		return rc;
+	}
+
+	pr_info("FG restart done\n");
+	return rc;
+}
+
+static struct kernel_param_ops fg_restart_ops = {
+	.set = fg_restart_sysfs,
+	.get = param_get_int,
+};
+
+module_param_cb(restart, &fg_restart_ops, &fg_restart, 0644);
+
+#define BATT_AVG_POLL_PERIOD_MS	10000
+static void batt_avg_work(struct work_struct *work)
+{
+	struct fg_chip *chip = container_of(work, struct fg_chip,
+					    batt_avg_work.work);
+	int rc, ibatt_now, vbatt_now;
+
+	mutex_lock(&chip->batt_avg_lock);
+	rc = fg_get_battery_current(chip, &ibatt_now);
+	if (rc < 0) {
+		pr_err("failed to get battery current, rc=%d\n", rc);
+		goto reschedule;
+	}
+
+	rc = fg_get_battery_voltage(chip, &vbatt_now);
+	if (rc < 0) {
+		pr_err("failed to get battery voltage, rc=%d\n", rc);
+		goto reschedule;
+	}
+
+	fg_circ_buf_add(&chip->ibatt_circ_buf, ibatt_now);
+	fg_circ_buf_add(&chip->vbatt_circ_buf, vbatt_now);
+
+reschedule:
+	mutex_unlock(&chip->batt_avg_lock);
+	schedule_delayed_work(&chip->batt_avg_work,
+			      msecs_to_jiffies(BATT_AVG_POLL_PERIOD_MS));
+}
+
+#define HOURS_TO_SECONDS	3600
+#define OCV_SLOPE_UV		10869
+#define MILLI_UNIT		1000
+#define MICRO_UNIT		1000000
+static int fg_get_time_to_full(struct fg_chip *chip, int *val)
+{
+	int rc, ibatt_avg, vbatt_avg, rbatt, msoc, ocv_cc2cv, full_soc,
+		act_cap_uah;
+	s32 i_cc2cv, soc_cc2cv, ln_val, centi_tau_scale;
+	s64 t_predicted_cc = 0, t_predicted_cv = 0;
+
+	if (chip->bp.float_volt_uv <= 0) {
+		pr_err("battery profile is not loaded\n");
+		return -ENODATA;
+	}
+
+	if (!batt_psy_initialized(chip)) {
+		fg_dbg(chip, FG_TTF, "charger is not available\n");
+		return -ENODATA;
+	}
+
+	rc = fg_get_prop_capacity(chip, &msoc);
+	if (rc < 0) {
+		pr_err("failed to get msoc rc=%d\n", rc);
+		return rc;
+	}
+	fg_dbg(chip, FG_TTF, "msoc=%d\n", msoc);
+
+	if (msoc >= 100) {
+		*val = 0;
+		return 0;
+	}
+
+	mutex_lock(&chip->batt_avg_lock);
+	rc = fg_circ_buf_avg(&chip->ibatt_circ_buf, &ibatt_avg);
+	if (rc < 0) {
+		/* try to get instantaneous current */
+		rc = fg_get_battery_current(chip, &ibatt_avg);
+		if (rc < 0) {
+			mutex_unlock(&chip->batt_avg_lock);
+			pr_err("failed to get battery current, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	rc = fg_circ_buf_avg(&chip->vbatt_circ_buf, &vbatt_avg);
+	if (rc < 0) {
+		/* try to get instantaneous voltage */
+		rc = fg_get_battery_voltage(chip, &vbatt_avg);
+		if (rc < 0) {
+			mutex_unlock(&chip->batt_avg_lock);
+			pr_err("failed to get battery voltage, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	mutex_unlock(&chip->batt_avg_lock);
+	fg_dbg(chip, FG_TTF, "vbatt_avg=%d\n", vbatt_avg);
+
+	/* clamp ibatt_avg to -150mA */
+	if (ibatt_avg > -150000)
+		ibatt_avg = -150000;
+	fg_dbg(chip, FG_TTF, "ibatt_avg=%d\n", ibatt_avg);
+
+	/* reverse polarity to be consistent with unsigned current settings */
+	ibatt_avg = abs(ibatt_avg);
+
+	/* estimated battery current at the CC to CV transition */
+	i_cc2cv = div_s64((s64)ibatt_avg * vbatt_avg, chip->bp.float_volt_uv);
+	fg_dbg(chip, FG_TTF, "i_cc2cv=%d\n", i_cc2cv);
+
+	rc = fg_get_battery_resistance(chip, &rbatt);
+	if (rc < 0) {
+		pr_err("failed to get battery resistance rc=%d\n", rc);
+		return rc;
+	}
+
+	/* clamp rbatt to 50mOhms */
+	if (rbatt < 50000)
+		rbatt = 50000;
+
+	fg_dbg(chip, FG_TTF, "rbatt=%d\n", rbatt);
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_uah);
+	if (rc < 0) {
+		pr_err("failed to get ACT_BATT_CAP rc=%d\n", rc);
+		return rc;
+	}
+	act_cap_uah *= MILLI_UNIT;
+	fg_dbg(chip, FG_TTF, "actual_capacity_uah=%d\n", act_cap_uah);
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_FULL_SOC, &full_soc);
+	if (rc < 0) {
+		pr_err("failed to get full soc rc=%d\n", rc);
+		return rc;
+	}
+	full_soc = DIV_ROUND_CLOSEST(((u16)full_soc >> 8) * FULL_CAPACITY,
+								FULL_SOC_RAW);
+	fg_dbg(chip, FG_TTF, "full_soc=%d\n", full_soc);
+
+	/* if we are already in CV state then we can skip estimating CC */
+	if (chip->charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER)
+		goto skip_cc_estimate;
+
+	/* if the charger is current limited then use power approximation */
+	if (ibatt_avg > chip->bp.fastchg_curr_ma * MILLI_UNIT - 50000)
+		ocv_cc2cv = div_s64((s64)rbatt * ibatt_avg, MICRO_UNIT);
+	else
+		ocv_cc2cv = div_s64((s64)rbatt * i_cc2cv, MICRO_UNIT);
+	ocv_cc2cv = chip->bp.float_volt_uv - ocv_cc2cv;
+	fg_dbg(chip, FG_TTF, "ocv_cc2cv=%d\n", ocv_cc2cv);
+
+	soc_cc2cv = div_s64(chip->bp.float_volt_uv - ocv_cc2cv, OCV_SLOPE_UV);
+	/* estimated SOC at the CC to CV transition */
+	soc_cc2cv = 100 - soc_cc2cv;
+	fg_dbg(chip, FG_TTF, "soc_cc2cv=%d\n", soc_cc2cv);
+
+	/* the esimated SOC may be lower than the current SOC */
+	if (soc_cc2cv - msoc <= 0)
+		goto skip_cc_estimate;
+
+	t_predicted_cc = div_s64((s64)full_soc * act_cap_uah, 100);
+	t_predicted_cc = div_s64(t_predicted_cc * (soc_cc2cv - msoc), 100);
+	t_predicted_cc *= HOURS_TO_SECONDS;
+	t_predicted_cc = div_s64(t_predicted_cc, (ibatt_avg + i_cc2cv) / 2);
+
+skip_cc_estimate:
+	fg_dbg(chip, FG_TTF, "t_predicted_cc=%lld\n", t_predicted_cc);
+
+	/* CV estimate starts here */
+	if (chip->charge_type >= POWER_SUPPLY_CHARGE_TYPE_TAPER)
+		ln_val = ibatt_avg / (abs(chip->dt.sys_term_curr_ma) + 200);
+	else
+		ln_val = i_cc2cv / (abs(chip->dt.sys_term_curr_ma) + 200);
+
+	if (msoc < 95)
+		centi_tau_scale = 100;
+	else
+		centi_tau_scale = 20 * (100 - msoc);
+
+	fg_dbg(chip, FG_TTF, "ln_in=%d\n", ln_val);
+	rc = fg_lerp(fg_ln_table, ARRAY_SIZE(fg_ln_table), ln_val, &ln_val);
+	fg_dbg(chip, FG_TTF, "ln_out=%d\n", ln_val);
+	t_predicted_cv = div_s64((s64)act_cap_uah * rbatt, MICRO_UNIT);
+	t_predicted_cv = div_s64(t_predicted_cv * centi_tau_scale, 100);
+	t_predicted_cv = div_s64(t_predicted_cv * ln_val, MILLI_UNIT);
+	t_predicted_cv = div_s64(t_predicted_cv * HOURS_TO_SECONDS, MICRO_UNIT);
+	fg_dbg(chip, FG_TTF, "t_predicted_cv=%lld\n", t_predicted_cv);
+	*val = t_predicted_cc + t_predicted_cv;
+	return 0;
+}
+
+#define CENTI_ICORRECT_C0	105
+#define CENTI_ICORRECT_C1	20
+static int fg_get_time_to_empty(struct fg_chip *chip, int *val)
+{
+	int rc, ibatt_avg, msoc, act_cap_uah;
+	s32 divisor;
+	s64 t_predicted;
+
+	rc = fg_circ_buf_avg(&chip->ibatt_circ_buf, &ibatt_avg);
+	if (rc < 0) {
+		/* try to get instantaneous current */
+		rc = fg_get_battery_current(chip, &ibatt_avg);
+		if (rc < 0) {
+			pr_err("failed to get battery current, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	/* clamp ibatt_avg to 150mA */
+	if (ibatt_avg < 150000)
+		ibatt_avg = 150000;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_uah);
+	if (rc < 0) {
+		pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+		return rc;
+	}
+	act_cap_uah *= MILLI_UNIT;
+
+	rc = fg_get_prop_capacity(chip, &msoc);
+	if (rc < 0) {
+		pr_err("Error in getting capacity, rc=%d\n", rc);
+		return rc;
+	}
+
+	t_predicted = div_s64((s64)msoc * act_cap_uah, 100);
+	t_predicted *= HOURS_TO_SECONDS;
+	divisor = CENTI_ICORRECT_C0 * 100 + CENTI_ICORRECT_C1 * msoc;
+	divisor = div_s64((s64)divisor * ibatt_avg, 10000);
+	if (divisor > 0)
+		t_predicted = div_s64(t_predicted, divisor);
+
+	*val = t_predicted;
+	return 0;
+}
+
+static int fg_update_maint_soc(struct fg_chip *chip)
+{
+	int rc = 0, msoc;
+
+	mutex_lock(&chip->charge_full_lock);
+	if (chip->delta_soc <= 0)
+		goto out;
+
+	rc = fg_get_msoc(chip, &msoc);
+	if (rc < 0) {
+		pr_err("Error in getting msoc, rc=%d\n", rc);
+		goto out;
+	}
+
+	if (msoc > chip->maint_soc) {
+		/*
+		 * When the monotonic SOC goes above maintenance SOC, we should
+		 * stop showing the maintenance SOC.
+		 */
+		chip->delta_soc = 0;
+		chip->maint_soc = 0;
+	} else if (msoc <= chip->last_msoc) {
+		/* MSOC is decreasing. Decrease maintenance SOC as well */
+		chip->maint_soc -= 1;
+		if (!(msoc % 10)) {
+			/*
+			 * Reduce the maintenance SOC additionally by 1 whenever
+			 * it crosses a SOC multiple of 10.
+			 */
+			chip->maint_soc -= 1;
+			chip->delta_soc -= 1;
+		}
+	}
+
+	fg_dbg(chip, FG_IRQ, "msoc: %d last_msoc: %d maint_soc: %d delta_soc: %d\n",
+		msoc, chip->last_msoc, chip->maint_soc, chip->delta_soc);
+	chip->last_msoc = msoc;
+out:
+	mutex_unlock(&chip->charge_full_lock);
+	return rc;
+}
+
+static int fg_esr_validate(struct fg_chip *chip)
+{
+	int rc, esr_uohms;
+	u8 buf[2];
+
+	if (chip->dt.esr_clamp_mohms <= 0)
+		return 0;
+
+	rc = fg_get_sram_prop(chip, FG_SRAM_ESR, &esr_uohms);
+	if (rc < 0) {
+		pr_err("failed to get ESR, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (esr_uohms >= chip->dt.esr_clamp_mohms * 1000) {
+		pr_debug("ESR %d is > ESR_clamp\n", esr_uohms);
+		return 0;
+	}
+
+	esr_uohms = chip->dt.esr_clamp_mohms * 1000;
+	fg_encode(chip->sp, FG_SRAM_ESR, esr_uohms, buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR].addr_word,
+			chip->sp[FG_SRAM_ESR].addr_byte, buf,
+			chip->sp[FG_SRAM_ESR].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_dbg(chip, FG_STATUS, "ESR clamped to %duOhms\n", esr_uohms);
+	return 0;
+}
+
+/* PSY CALLBACKS STAY HERE */
+
+static int fg_psy_get_property(struct power_supply *psy,
+				       enum power_supply_property psp,
+				       union power_supply_propval *pval)
+{
+	struct fg_chip *chip = power_supply_get_drvdata(psy);
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CAPACITY:
+		rc = fg_get_prop_capacity(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		if (chip->battery_missing)
+			pval->intval = 3700000;
+		else
+			rc = fg_get_battery_voltage(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		rc = fg_get_battery_current(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_TEMP:
+		rc = fg_get_battery_temp(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_RESISTANCE:
+		rc = fg_get_battery_resistance(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+		rc = fg_get_sram_prop(chip, FG_SRAM_OCV, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+		pval->intval = chip->cl.nom_cap_uah;
+		break;
+	case POWER_SUPPLY_PROP_RESISTANCE_ID:
+		pval->intval = chip->batt_id_ohms;
+		break;
+	case POWER_SUPPLY_PROP_BATTERY_TYPE:
+		pval->strval = fg_get_battery_type(chip);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+		pval->intval = chip->bp.float_volt_uv;
+		break;
+	case POWER_SUPPLY_PROP_CYCLE_COUNT:
+		pval->intval = fg_get_cycle_count(chip);
+		break;
+	case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+		pval->intval = chip->cyc_ctr.id;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
+		rc = fg_get_cc_soc(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_NOW:
+		pval->intval = chip->cl.init_cc_uah;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_FULL:
+		pval->intval = chip->cl.learned_cc_uah;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+		rc = fg_get_cc_soc_sw(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
+		rc = fg_get_time_to_full(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+		rc = fg_get_time_to_empty(chip, &pval->intval);
+		break;
+	case POWER_SUPPLY_PROP_SOC_REPORTING_READY:
+		pval->intval = chip->soc_reporting_ready;
+		break;
+	case POWER_SUPPLY_PROP_DEBUG_BATTERY:
+		pval->intval = is_debug_batt_id(chip);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+		rc = fg_get_sram_prop(chip, FG_SRAM_VBATT_FULL, &pval->intval);
+		break;
+	default:
+		pr_err("unsupported property %d\n", psp);
+		rc = -EINVAL;
+		break;
+	}
+
+	if (rc < 0)
+		return -ENODATA;
+
+	return 0;
+}
+
+static int fg_psy_set_property(struct power_supply *psy,
+				  enum power_supply_property psp,
+				  const union power_supply_propval *pval)
+{
+	struct fg_chip *chip = power_supply_get_drvdata(psy);
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+		if ((pval->intval > 0) && (pval->intval <= BUCKET_COUNT)) {
+			chip->cyc_ctr.id = pval->intval;
+		} else {
+			pr_err("rejecting invalid cycle_count_id = %d\n",
+				pval->intval);
+			return -EINVAL;
+		}
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+		rc = fg_set_constant_chg_voltage(chip, pval->intval);
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+static int fg_property_is_writeable(struct power_supply *psy,
+						enum power_supply_property psp)
+{
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+		return 1;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static void fg_external_power_changed(struct power_supply *psy)
+{
+	pr_debug("power supply changed\n");
+}
+
+static int fg_notifier_cb(struct notifier_block *nb,
+		unsigned long event, void *data)
+{
+	struct power_supply *psy = data;
+	struct fg_chip *chip = container_of(nb, struct fg_chip, nb);
+
+	if (event != PSY_EVENT_PROP_CHANGED)
+		return NOTIFY_OK;
+
+	if (work_pending(&chip->status_change_work))
+		return NOTIFY_OK;
+
+	if ((strcmp(psy->desc->name, "battery") == 0)
+		|| (strcmp(psy->desc->name, "usb") == 0)) {
+		/*
+		 * We cannot vote for awake votable here as that takes
+		 * a mutex lock and this is executed in an atomic context.
+		 */
+		pm_stay_awake(chip->dev);
+		schedule_work(&chip->status_change_work);
+	}
+
+	return NOTIFY_OK;
+}
+
+static enum power_supply_property fg_psy_props[] = {
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_TEMP,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_VOLTAGE_OCV,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_RESISTANCE_ID,
+	POWER_SUPPLY_PROP_RESISTANCE,
+	POWER_SUPPLY_PROP_BATTERY_TYPE,
+	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+	POWER_SUPPLY_PROP_CYCLE_COUNT,
+	POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+	POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+	POWER_SUPPLY_PROP_CHARGE_NOW,
+	POWER_SUPPLY_PROP_CHARGE_FULL,
+	POWER_SUPPLY_PROP_CHARGE_COUNTER,
+	POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
+	POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+	POWER_SUPPLY_PROP_SOC_REPORTING_READY,
+	POWER_SUPPLY_PROP_DEBUG_BATTERY,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+};
+
+static const struct power_supply_desc fg_psy_desc = {
+	.name = "bms",
+	.type = POWER_SUPPLY_TYPE_BMS,
+	.properties = fg_psy_props,
+	.num_properties = ARRAY_SIZE(fg_psy_props),
+	.get_property = fg_psy_get_property,
+	.set_property = fg_psy_set_property,
+	.external_power_changed = fg_external_power_changed,
+	.property_is_writeable = fg_property_is_writeable,
+};
+
+/* INIT FUNCTIONS STAY HERE */
+
+static int fg_hw_init(struct fg_chip *chip)
+{
+	int rc;
+	u8 buf[4], val;
+
+	fg_encode(chip->sp, FG_SRAM_CUTOFF_VOLT, chip->dt.cutoff_volt_mv, buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_CUTOFF_VOLT].addr_word,
+			chip->sp[FG_SRAM_CUTOFF_VOLT].addr_byte, buf,
+			chip->sp[FG_SRAM_CUTOFF_VOLT].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing cutoff_volt, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_EMPTY_VOLT, chip->dt.empty_volt_mv, buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_EMPTY_VOLT].addr_word,
+			chip->sp[FG_SRAM_EMPTY_VOLT].addr_byte, buf,
+			chip->sp[FG_SRAM_EMPTY_VOLT].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing empty_volt, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* This SRAM register is only present in v2.0 and above */
+	if (!(chip->wa_flags & PMI8998_V1_REV_WA) &&
+					chip->bp.float_volt_uv > 0) {
+		fg_encode(chip->sp, FG_SRAM_FLOAT_VOLT,
+			chip->bp.float_volt_uv / 1000, buf);
+		rc = fg_sram_write(chip, chip->sp[FG_SRAM_FLOAT_VOLT].addr_word,
+			chip->sp[FG_SRAM_FLOAT_VOLT].addr_byte, buf,
+			chip->sp[FG_SRAM_FLOAT_VOLT].len, FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing float_volt, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (chip->bp.vbatt_full_mv > 0) {
+		rc = fg_set_constant_chg_voltage(chip,
+				chip->bp.vbatt_full_mv * 1000);
+		if (rc < 0)
+			return rc;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_CHG_TERM_CURR, chip->dt.chg_term_curr_ma,
+		buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_CHG_TERM_CURR].addr_word,
+			chip->sp[FG_SRAM_CHG_TERM_CURR].addr_byte, buf,
+			chip->sp[FG_SRAM_CHG_TERM_CURR].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing chg_term_curr, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_SYS_TERM_CURR, chip->dt.sys_term_curr_ma,
+		buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_SYS_TERM_CURR].addr_word,
+			chip->sp[FG_SRAM_SYS_TERM_CURR].addr_byte, buf,
+			chip->sp[FG_SRAM_SYS_TERM_CURR].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing sys_term_curr, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chip->dt.vbatt_low_thr_mv > 0) {
+		fg_encode(chip->sp, FG_SRAM_VBATT_LOW,
+			chip->dt.vbatt_low_thr_mv, buf);
+		rc = fg_sram_write(chip, chip->sp[FG_SRAM_VBATT_LOW].addr_word,
+				chip->sp[FG_SRAM_VBATT_LOW].addr_byte, buf,
+				chip->sp[FG_SRAM_VBATT_LOW].len,
+				FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing vbatt_low_thr, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (chip->dt.delta_soc_thr > 0 && chip->dt.delta_soc_thr < 100) {
+		fg_encode(chip->sp, FG_SRAM_DELTA_MSOC_THR,
+			chip->dt.delta_soc_thr, buf);
+		rc = fg_sram_write(chip,
+				chip->sp[FG_SRAM_DELTA_MSOC_THR].addr_word,
+				chip->sp[FG_SRAM_DELTA_MSOC_THR].addr_byte,
+				buf, chip->sp[FG_SRAM_DELTA_MSOC_THR].len,
+				FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing delta_msoc_thr, rc=%d\n", rc);
+			return rc;
+		}
+
+		fg_encode(chip->sp, FG_SRAM_DELTA_BSOC_THR,
+			chip->dt.delta_soc_thr, buf);
+		rc = fg_sram_write(chip,
+				chip->sp[FG_SRAM_DELTA_BSOC_THR].addr_word,
+				chip->sp[FG_SRAM_DELTA_BSOC_THR].addr_byte,
+				buf, chip->sp[FG_SRAM_DELTA_BSOC_THR].len,
+				FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing delta_bsoc_thr, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	/*
+	 * configure battery thermal coefficients c1,c2,c3
+	 * if its value is not zero.
+	 */
+	if (chip->dt.batt_therm_coeffs[0] > 0) {
+		rc = fg_write(chip, BATT_INFO_THERM_C1(chip),
+			chip->dt.batt_therm_coeffs, BATT_THERM_NUM_COEFFS);
+		if (rc < 0) {
+			pr_err("Error in writing battery thermal coefficients, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+
+	if (chip->dt.recharge_soc_thr > 0 && chip->dt.recharge_soc_thr < 100) {
+		rc = fg_set_recharge_soc(chip, chip->dt.recharge_soc_thr);
+		if (rc < 0) {
+			pr_err("Error in setting recharge_soc, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (chip->dt.recharge_volt_thr_mv > 0) {
+		rc = fg_set_recharge_voltage(chip,
+			chip->dt.recharge_volt_thr_mv);
+		if (rc < 0) {
+			pr_err("Error in setting recharge_voltage, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	if (chip->dt.rsense_sel >= SRC_SEL_BATFET &&
+			chip->dt.rsense_sel < SRC_SEL_RESERVED) {
+		rc = fg_masked_write(chip, BATT_INFO_IBATT_SENSING_CFG(chip),
+				SOURCE_SELECT_MASK, chip->dt.rsense_sel);
+		if (rc < 0) {
+			pr_err("Error in writing rsense_sel, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	get_temp_setpoint(chip->dt.jeita_thresholds[JEITA_COLD], &val);
+	rc = fg_write(chip, BATT_INFO_JEITA_TOO_COLD(chip), &val, 1);
+	if (rc < 0) {
+		pr_err("Error in writing jeita_cold, rc=%d\n", rc);
+		return rc;
+	}
+
+	get_temp_setpoint(chip->dt.jeita_thresholds[JEITA_COOL], &val);
+	rc = fg_write(chip, BATT_INFO_JEITA_COLD(chip), &val, 1);
+	if (rc < 0) {
+		pr_err("Error in writing jeita_cool, rc=%d\n", rc);
+		return rc;
+	}
+
+	get_temp_setpoint(chip->dt.jeita_thresholds[JEITA_WARM], &val);
+	rc = fg_write(chip, BATT_INFO_JEITA_HOT(chip), &val, 1);
+	if (rc < 0) {
+		pr_err("Error in writing jeita_warm, rc=%d\n", rc);
+		return rc;
+	}
+
+	get_temp_setpoint(chip->dt.jeita_thresholds[JEITA_HOT], &val);
+	rc = fg_write(chip, BATT_INFO_JEITA_TOO_HOT(chip), &val, 1);
+	if (rc < 0) {
+		pr_err("Error in writing jeita_hot, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chip->dt.esr_timer_charging > 0) {
+		rc = fg_set_esr_timer(chip, chip->dt.esr_timer_charging, true,
+				      FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in setting ESR timer, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (chip->dt.esr_timer_awake > 0) {
+		rc = fg_set_esr_timer(chip, chip->dt.esr_timer_awake, false,
+				      FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in setting ESR timer, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (chip->cyc_ctr.en)
+		restore_cycle_counter(chip);
+
+	if (chip->dt.jeita_hyst_temp >= 0) {
+		val = chip->dt.jeita_hyst_temp << JEITA_TEMP_HYST_SHIFT;
+		rc = fg_masked_write(chip, BATT_INFO_BATT_TEMP_CFG(chip),
+			JEITA_TEMP_HYST_MASK, val);
+		if (rc < 0) {
+			pr_err("Error in writing batt_temp_cfg, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	get_batt_temp_delta(chip->dt.batt_temp_delta, &val);
+	rc = fg_masked_write(chip, BATT_INFO_BATT_TMPR_INTR(chip),
+			CHANGE_THOLD_MASK, val);
+	if (rc < 0) {
+		pr_err("Error in writing batt_temp_delta, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chip->dt.rconn_mohms > 0) {
+		rc = fg_rconn_config(chip);
+		if (rc < 0) {
+			pr_err("Error in configuring Rconn, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	fg_encode(chip->sp, FG_SRAM_ESR_TIGHT_FILTER,
+		chip->dt.esr_tight_flt_upct, buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_TIGHT_FILTER].addr_word,
+			chip->sp[FG_SRAM_ESR_TIGHT_FILTER].addr_byte, buf,
+			chip->sp[FG_SRAM_ESR_TIGHT_FILTER].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR tight filter, rc=%d\n", rc);
+		return rc;
+	}
+
+	fg_encode(chip->sp, FG_SRAM_ESR_BROAD_FILTER,
+		chip->dt.esr_broad_flt_upct, buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_BROAD_FILTER].addr_word,
+			chip->sp[FG_SRAM_ESR_BROAD_FILTER].addr_byte, buf,
+			chip->sp[FG_SRAM_ESR_BROAD_FILTER].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing ESR broad filter, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int fg_memif_init(struct fg_chip *chip)
+{
+	return fg_ima_init(chip);
+}
+
+/* INTERRUPT HANDLERS STAY HERE */
+
+static irqreturn_t fg_mem_xcp_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+	u8 status;
+	int rc;
+
+	rc = fg_read(chip, MEM_IF_INT_RT_STS(chip), &status, 1);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			MEM_IF_INT_RT_STS(chip), rc);
+		return IRQ_HANDLED;
+	}
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered, status:%d\n", irq, status);
+	if (status & MEM_XCP_BIT) {
+		rc = fg_clear_dma_errors_if_any(chip);
+		if (rc < 0) {
+			pr_err("Error in clearing DMA error, rc=%d\n", rc);
+			return IRQ_HANDLED;
+		}
+
+		mutex_lock(&chip->sram_rw_lock);
+		rc = fg_clear_ima_errors_if_any(chip, true);
+		if (rc < 0 && rc != -EAGAIN)
+			pr_err("Error in checking IMA errors rc:%d\n", rc);
+		mutex_unlock(&chip->sram_rw_lock);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_vbatt_low_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_batt_missing_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+	u8 status;
+	int rc;
+
+	rc = fg_read(chip, BATT_INFO_INT_RT_STS(chip), &status, 1);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			BATT_INFO_INT_RT_STS(chip), rc);
+		return IRQ_HANDLED;
+	}
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered sts:%d\n", irq, status);
+	chip->battery_missing = (status & BT_MISS_BIT);
+
+	if (chip->battery_missing) {
+		chip->profile_available = false;
+		chip->profile_loaded = false;
+		chip->soc_reporting_ready = false;
+		return IRQ_HANDLED;
+	}
+
+	rc = fg_get_batt_id(chip);
+	if (rc < 0) {
+		chip->soc_reporting_ready = true;
+		pr_err("Error in getting battery id, rc:%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	rc = fg_get_batt_profile(chip);
+	if (rc < 0) {
+		chip->soc_reporting_ready = true;
+		pr_err("Error in getting battery profile, rc:%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	clear_battery_profile(chip);
+	schedule_delayed_work(&chip->profile_load_work, 0);
+
+	if (chip->fg_psy)
+		power_supply_changed(chip->fg_psy);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_delta_batt_temp_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+	union power_supply_propval prop = {0, };
+	int rc, batt_temp;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	rc = fg_get_battery_temp(chip, &batt_temp);
+	if (rc < 0) {
+		pr_err("Error in getting batt_temp\n");
+		return IRQ_HANDLED;
+	}
+
+	rc = fg_esr_filter_config(chip, batt_temp);
+	if (rc < 0)
+		pr_err("Error in configuring ESR filter rc:%d\n", rc);
+
+	rc = fg_slope_limit_config(chip, batt_temp);
+	if (rc < 0)
+		pr_err("Error in configuring slope limiter rc:%d\n", rc);
+
+	if (!batt_psy_initialized(chip)) {
+		chip->last_batt_temp = batt_temp;
+		return IRQ_HANDLED;
+	}
+
+	power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
+		&prop);
+	chip->health = prop.intval;
+
+	if (chip->last_batt_temp != batt_temp) {
+		chip->last_batt_temp = batt_temp;
+		power_supply_changed(chip->batt_psy);
+	}
+
+	if (abs(chip->last_batt_temp - batt_temp) > 30)
+		pr_warn("Battery temperature last:%d current: %d\n",
+			chip->last_batt_temp, batt_temp);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_first_est_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	complete_all(&chip->soc_ready);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_soc_update_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	complete_all(&chip->soc_update);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_delta_bsoc_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+	int rc;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	rc = fg_charge_full_update(chip);
+	if (rc < 0)
+		pr_err("Error in charge_full_update, rc=%d\n", rc);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_delta_msoc_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+	int rc;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	if (chip->cyc_ctr.en)
+		schedule_work(&chip->cycle_count_work);
+
+	if (chip->cl.active)
+		fg_cap_learning_update(chip);
+
+	rc = fg_charge_full_update(chip);
+	if (rc < 0)
+		pr_err("Error in charge_full_update, rc=%d\n", rc);
+
+	rc = fg_adjust_ki_coeff_dischg(chip);
+	if (rc < 0)
+		pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
+
+	rc = fg_update_maint_soc(chip);
+	if (rc < 0)
+		pr_err("Error in updating maint_soc, rc=%d\n", rc);
+
+	rc = fg_esr_validate(chip);
+	if (rc < 0)
+		pr_err("Error in validating ESR, rc=%d\n", rc);
+
+	if (batt_psy_initialized(chip))
+		power_supply_changed(chip->batt_psy);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_empty_soc_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	if (batt_psy_initialized(chip))
+		power_supply_changed(chip->batt_psy);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_soc_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_dummy_irq_handler(int irq, void *data)
+{
+	pr_debug("irq %d triggered\n", irq);
+	return IRQ_HANDLED;
+}
+
+static struct fg_irq_info fg_irqs[FG_IRQ_MAX] = {
+	/* BATT_SOC irqs */
+	[MSOC_FULL_IRQ] = {
+		.name		= "msoc-full",
+		.handler	= fg_soc_irq_handler,
+	},
+	[MSOC_HIGH_IRQ] = {
+		.name		= "msoc-high",
+		.handler	= fg_soc_irq_handler,
+		.wakeable	= true,
+	},
+	[MSOC_EMPTY_IRQ] = {
+		.name		= "msoc-empty",
+		.handler	= fg_empty_soc_irq_handler,
+		.wakeable	= true,
+	},
+	[MSOC_LOW_IRQ] = {
+		.name		= "msoc-low",
+		.handler	= fg_soc_irq_handler,
+		.wakeable	= true,
+	},
+	[MSOC_DELTA_IRQ] = {
+		.name		= "msoc-delta",
+		.handler	= fg_delta_msoc_irq_handler,
+		.wakeable	= true,
+	},
+	[BSOC_DELTA_IRQ] = {
+		.name		= "bsoc-delta",
+		.handler	= fg_delta_bsoc_irq_handler,
+		.wakeable	= true,
+	},
+	[SOC_READY_IRQ] = {
+		.name		= "soc-ready",
+		.handler	= fg_first_est_irq_handler,
+		.wakeable	= true,
+	},
+	[SOC_UPDATE_IRQ] = {
+		.name		= "soc-update",
+		.handler	= fg_soc_update_irq_handler,
+	},
+	/* BATT_INFO irqs */
+	[BATT_TEMP_DELTA_IRQ] = {
+		.name		= "batt-temp-delta",
+		.handler	= fg_delta_batt_temp_irq_handler,
+		.wakeable	= true,
+	},
+	[BATT_MISSING_IRQ] = {
+		.name		= "batt-missing",
+		.handler	= fg_batt_missing_irq_handler,
+		.wakeable	= true,
+	},
+	[ESR_DELTA_IRQ] = {
+		.name		= "esr-delta",
+		.handler	= fg_dummy_irq_handler,
+	},
+	[VBATT_LOW_IRQ] = {
+		.name		= "vbatt-low",
+		.handler	= fg_vbatt_low_irq_handler,
+		.wakeable	= true,
+	},
+	[VBATT_PRED_DELTA_IRQ] = {
+		.name		= "vbatt-pred-delta",
+		.handler	= fg_dummy_irq_handler,
+	},
+	/* MEM_IF irqs */
+	[DMA_GRANT_IRQ] = {
+		.name		= "dma-grant",
+		.handler	= fg_dummy_irq_handler,
+	},
+	[MEM_XCP_IRQ] = {
+		.name		= "mem-xcp",
+		.handler	= fg_mem_xcp_irq_handler,
+	},
+	[IMA_RDY_IRQ] = {
+		.name		= "ima-rdy",
+		.handler	= fg_dummy_irq_handler,
+	},
+};
+
+static int fg_get_irq_index_byname(const char *name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(fg_irqs); i++) {
+		if (strcmp(fg_irqs[i].name, name) == 0)
+			return i;
+	}
+
+	pr_err("%s is not in irq list\n", name);
+	return -ENOENT;
+}
+
+static int fg_register_interrupts(struct fg_chip *chip)
+{
+	struct device_node *child, *node = chip->dev->of_node;
+	struct property *prop;
+	const char *name;
+	int rc, irq, irq_index;
+
+	for_each_available_child_of_node(node, child) {
+		of_property_for_each_string(child, "interrupt-names", prop,
+						name) {
+			irq = of_irq_get_byname(child, name);
+			if (irq < 0) {
+				dev_err(chip->dev, "failed to get irq %s irq:%d\n",
+					name, irq);
+				return irq;
+			}
+
+			irq_index = fg_get_irq_index_byname(name);
+			if (irq_index < 0)
+				return irq_index;
+
+			rc = devm_request_threaded_irq(chip->dev, irq, NULL,
+					fg_irqs[irq_index].handler,
+					IRQF_ONESHOT, name, chip);
+			if (rc < 0) {
+				dev_err(chip->dev, "failed to register irq handler for %s rc:%d\n",
+					name, rc);
+				return rc;
+			}
+
+			fg_irqs[irq_index].irq = irq;
+			if (fg_irqs[irq_index].wakeable)
+				enable_irq_wake(fg_irqs[irq_index].irq);
+		}
+	}
+
+	return 0;
+}
+
+static int fg_parse_slope_limit_coefficients(struct fg_chip *chip)
+{
+	struct device_node *node = chip->dev->of_node;
+	int rc, i;
+
+	rc = of_property_read_u32(node, "qcom,slope-limit-temp-threshold",
+			&chip->dt.slope_limit_temp);
+	if (rc < 0)
+		return 0;
+
+	rc = of_property_count_elems_of_size(node, "qcom,slope-limit-coeffs",
+			sizeof(u32));
+	if (rc != SLOPE_LIMIT_NUM_COEFFS)
+		return -EINVAL;
+
+	rc = of_property_read_u32_array(node, "qcom,slope-limit-coeffs",
+			chip->dt.slope_limit_coeffs, SLOPE_LIMIT_NUM_COEFFS);
+	if (rc < 0) {
+		pr_err("Error in reading qcom,slope-limit-coeffs, rc=%d\n", rc);
+		return rc;
+	}
+
+	for (i = 0; i < SLOPE_LIMIT_NUM_COEFFS; i++) {
+		if (chip->dt.slope_limit_coeffs[i] > SLOPE_LIMIT_COEFF_MAX ||
+			chip->dt.slope_limit_coeffs[i] < 0) {
+			pr_err("Incorrect slope limit coefficient\n");
+			return -EINVAL;
+		}
+	}
+
+	chip->slope_limit_en = true;
+	return 0;
+}
+
+static int fg_parse_ki_coefficients(struct fg_chip *chip)
+{
+	struct device_node *node = chip->dev->of_node;
+	int rc, i;
+
+	rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-soc-dischg",
+		sizeof(u32));
+	if (rc != KI_COEFF_SOC_LEVELS)
+		return 0;
+
+	rc = of_property_read_u32_array(node, "qcom,ki-coeff-soc-dischg",
+			chip->dt.ki_coeff_soc, KI_COEFF_SOC_LEVELS);
+	if (rc < 0) {
+		pr_err("Error in reading ki-coeff-soc-dischg, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-med-dischg",
+		sizeof(u32));
+	if (rc != KI_COEFF_SOC_LEVELS)
+		return 0;
+
+	rc = of_property_read_u32_array(node, "qcom,ki-coeff-med-dischg",
+			chip->dt.ki_coeff_med_dischg, KI_COEFF_SOC_LEVELS);
+	if (rc < 0) {
+		pr_err("Error in reading ki-coeff-med-dischg, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-hi-dischg",
+		sizeof(u32));
+	if (rc != KI_COEFF_SOC_LEVELS)
+		return 0;
+
+	rc = of_property_read_u32_array(node, "qcom,ki-coeff-hi-dischg",
+			chip->dt.ki_coeff_hi_dischg, KI_COEFF_SOC_LEVELS);
+	if (rc < 0) {
+		pr_err("Error in reading ki-coeff-hi-dischg, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	for (i = 0; i < KI_COEFF_SOC_LEVELS; i++) {
+		if (chip->dt.ki_coeff_soc[i] < 0 ||
+			chip->dt.ki_coeff_soc[i] > FULL_CAPACITY) {
+			pr_err("Error in ki_coeff_soc_dischg values\n");
+			return -EINVAL;
+		}
+
+		if (chip->dt.ki_coeff_med_dischg[i] < 0 ||
+			chip->dt.ki_coeff_med_dischg[i] > KI_COEFF_MAX) {
+			pr_err("Error in ki_coeff_med_dischg values\n");
+			return -EINVAL;
+		}
+
+		if (chip->dt.ki_coeff_med_dischg[i] < 0 ||
+			chip->dt.ki_coeff_med_dischg[i] > KI_COEFF_MAX) {
+			pr_err("Error in ki_coeff_med_dischg values\n");
+			return -EINVAL;
+		}
+	}
+	chip->ki_coeff_dischg_en = true;
+	return 0;
+}
+
+#define DEFAULT_CUTOFF_VOLT_MV		3200
+#define DEFAULT_EMPTY_VOLT_MV		2800
+#define DEFAULT_RECHARGE_VOLT_MV	4250
+#define DEFAULT_CHG_TERM_CURR_MA	100
+#define DEFAULT_SYS_TERM_CURR_MA	-125
+#define DEFAULT_DELTA_SOC_THR		1
+#define DEFAULT_RECHARGE_SOC_THR	95
+#define DEFAULT_BATT_TEMP_COLD		0
+#define DEFAULT_BATT_TEMP_COOL		5
+#define DEFAULT_BATT_TEMP_WARM		45
+#define DEFAULT_BATT_TEMP_HOT		50
+#define DEFAULT_CL_START_SOC		15
+#define DEFAULT_CL_MIN_TEMP_DECIDEGC	150
+#define DEFAULT_CL_MAX_TEMP_DECIDEGC	450
+#define DEFAULT_CL_MAX_INC_DECIPERC	5
+#define DEFAULT_CL_MAX_DEC_DECIPERC	100
+#define DEFAULT_CL_MIN_LIM_DECIPERC	0
+#define DEFAULT_CL_MAX_LIM_DECIPERC	0
+#define BTEMP_DELTA_LOW			2
+#define BTEMP_DELTA_HIGH		10
+#define DEFAULT_ESR_FLT_TEMP_DECIDEGC	100
+#define DEFAULT_ESR_TIGHT_FLT_UPCT	3907
+#define DEFAULT_ESR_BROAD_FLT_UPCT	99610
+#define DEFAULT_ESR_TIGHT_LT_FLT_UPCT	48829
+#define DEFAULT_ESR_BROAD_LT_FLT_UPCT	148438
+#define DEFAULT_ESR_CLAMP_MOHMS		20
+static int fg_parse_dt(struct fg_chip *chip)
+{
+	struct device_node *child, *revid_node, *node = chip->dev->of_node;
+	u32 base, temp;
+	u8 subtype;
+	int rc;
+
+	if (!node)  {
+		dev_err(chip->dev, "device tree node missing\n");
+		return -ENXIO;
+	}
+
+	revid_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
+	if (!revid_node) {
+		pr_err("Missing qcom,pmic-revid property - driver failed\n");
+		return -EINVAL;
+	}
+
+	chip->pmic_rev_id = get_revid_data(revid_node);
+	if (IS_ERR_OR_NULL(chip->pmic_rev_id)) {
+		pr_err("Unable to get pmic_revid rc=%ld\n",
+			PTR_ERR(chip->pmic_rev_id));
+		/*
+		 * the revid peripheral must be registered, any failure
+		 * here only indicates that the rev-id module has not
+		 * probed yet.
+		 */
+		return -EPROBE_DEFER;
+	}
+
+	pr_debug("PMIC subtype %d Digital major %d\n",
+		chip->pmic_rev_id->pmic_subtype, chip->pmic_rev_id->rev4);
+
+	switch (chip->pmic_rev_id->pmic_subtype) {
+	case PMI8998_SUBTYPE:
+		if (chip->pmic_rev_id->rev4 < PMI8998_V2P0_REV4) {
+			chip->sp = pmi8998_v1_sram_params;
+			chip->alg_flags = pmi8998_v1_alg_flags;
+			chip->wa_flags |= PMI8998_V1_REV_WA;
+		} else if (chip->pmic_rev_id->rev4 == PMI8998_V2P0_REV4) {
+			chip->sp = pmi8998_v2_sram_params;
+			chip->alg_flags = pmi8998_v2_alg_flags;
+		} else {
+			return -EINVAL;
+		}
+		break;
+	case PM660_SUBTYPE:
+		chip->sp = pmi8998_v2_sram_params;
+		chip->alg_flags = pmi8998_v2_alg_flags;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (of_get_available_child_count(node) == 0) {
+		dev_err(chip->dev, "No child nodes specified!\n");
+		return -ENXIO;
+	}
+
+	for_each_available_child_of_node(node, child) {
+		rc = of_property_read_u32(child, "reg", &base);
+		if (rc < 0) {
+			dev_err(chip->dev, "reg not specified in node %s, rc=%d\n",
+				child->full_name, rc);
+			return rc;
+		}
+
+		rc = fg_read(chip, base + PERPH_SUBTYPE_REG, &subtype, 1);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't read subtype for base %d, rc=%d\n",
+				base, rc);
+			return rc;
+		}
+
+		switch (subtype) {
+		case FG_BATT_SOC_PMI8998:
+			chip->batt_soc_base = base;
+			break;
+		case FG_BATT_INFO_PMI8998:
+			chip->batt_info_base = base;
+			break;
+		case FG_MEM_INFO_PMI8998:
+			chip->mem_if_base = base;
+			break;
+		default:
+			dev_err(chip->dev, "Invalid peripheral subtype 0x%x\n",
+				subtype);
+			return -ENXIO;
+		}
+	}
+
+	rc = of_property_read_u32(node, "qcom,rradc-base", &base);
+	if (rc < 0) {
+		dev_err(chip->dev, "rradc-base not specified, rc=%d\n", rc);
+		return rc;
+	}
+	chip->rradc_base = base;
+
+	/* Read all the optional properties below */
+	rc = of_property_read_u32(node, "qcom,fg-cutoff-voltage", &temp);
+	if (rc < 0)
+		chip->dt.cutoff_volt_mv = DEFAULT_CUTOFF_VOLT_MV;
+	else
+		chip->dt.cutoff_volt_mv = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-empty-voltage", &temp);
+	if (rc < 0)
+		chip->dt.empty_volt_mv = DEFAULT_EMPTY_VOLT_MV;
+	else
+		chip->dt.empty_volt_mv = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-vbatt-low-thr", &temp);
+	if (rc < 0)
+		chip->dt.vbatt_low_thr_mv = -EINVAL;
+	else
+		chip->dt.vbatt_low_thr_mv = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-chg-term-current", &temp);
+	if (rc < 0)
+		chip->dt.chg_term_curr_ma = DEFAULT_CHG_TERM_CURR_MA;
+	else
+		chip->dt.chg_term_curr_ma = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-sys-term-current", &temp);
+	if (rc < 0)
+		chip->dt.sys_term_curr_ma = DEFAULT_SYS_TERM_CURR_MA;
+	else
+		chip->dt.sys_term_curr_ma = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-delta-soc-thr", &temp);
+	if (rc < 0)
+		chip->dt.delta_soc_thr = DEFAULT_DELTA_SOC_THR;
+	else
+		chip->dt.delta_soc_thr = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-recharge-soc-thr", &temp);
+	if (rc < 0)
+		chip->dt.recharge_soc_thr = DEFAULT_RECHARGE_SOC_THR;
+	else
+		chip->dt.recharge_soc_thr = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-recharge-voltage", &temp);
+	if (rc < 0)
+		chip->dt.recharge_volt_thr_mv = DEFAULT_RECHARGE_VOLT_MV;
+	else
+		chip->dt.recharge_volt_thr_mv = temp;
+
+	chip->dt.auto_recharge_soc = of_property_read_bool(node,
+					"qcom,fg-auto-recharge-soc");
+
+	rc = of_property_read_u32(node, "qcom,fg-rsense-sel", &temp);
+	if (rc < 0)
+		chip->dt.rsense_sel = SRC_SEL_BATFET_SMB;
+	else
+		chip->dt.rsense_sel = (u8)temp & SOURCE_SELECT_MASK;
+
+	chip->dt.jeita_thresholds[JEITA_COLD] = DEFAULT_BATT_TEMP_COLD;
+	chip->dt.jeita_thresholds[JEITA_COOL] = DEFAULT_BATT_TEMP_COOL;
+	chip->dt.jeita_thresholds[JEITA_WARM] = DEFAULT_BATT_TEMP_WARM;
+	chip->dt.jeita_thresholds[JEITA_HOT] = DEFAULT_BATT_TEMP_HOT;
+	if (of_property_count_elems_of_size(node, "qcom,fg-jeita-thresholds",
+		sizeof(u32)) == NUM_JEITA_LEVELS) {
+		rc = of_property_read_u32_array(node,
+				"qcom,fg-jeita-thresholds",
+				chip->dt.jeita_thresholds, NUM_JEITA_LEVELS);
+		if (rc < 0)
+			pr_warn("Error reading Jeita thresholds, default values will be used rc:%d\n",
+				rc);
+	}
+
+	if (of_property_count_elems_of_size(node,
+		"qcom,battery-thermal-coefficients",
+		sizeof(u8)) == BATT_THERM_NUM_COEFFS) {
+		rc = of_property_read_u8_array(node,
+				"qcom,battery-thermal-coefficients",
+				chip->dt.batt_therm_coeffs,
+				BATT_THERM_NUM_COEFFS);
+		if (rc < 0)
+			pr_warn("Error reading battery thermal coefficients, rc:%d\n",
+				rc);
+	}
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-timer-charging", &temp);
+	if (rc < 0)
+		chip->dt.esr_timer_charging = -EINVAL;
+	else
+		chip->dt.esr_timer_charging = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-timer-awake", &temp);
+	if (rc < 0)
+		chip->dt.esr_timer_awake = -EINVAL;
+	else
+		chip->dt.esr_timer_awake = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-timer-asleep", &temp);
+	if (rc < 0)
+		chip->dt.esr_timer_asleep = -EINVAL;
+	else
+		chip->dt.esr_timer_asleep = temp;
+
+	chip->cyc_ctr.en = of_property_read_bool(node, "qcom,cycle-counter-en");
+	if (chip->cyc_ctr.en)
+		chip->cyc_ctr.id = 1;
+
+	chip->dt.force_load_profile = of_property_read_bool(node,
+					"qcom,fg-force-load-profile");
+
+	rc = of_property_read_u32(node, "qcom,cl-start-capacity", &temp);
+	if (rc < 0)
+		chip->dt.cl_start_soc = DEFAULT_CL_START_SOC;
+	else
+		chip->dt.cl_start_soc = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-min-temp", &temp);
+	if (rc < 0)
+		chip->dt.cl_min_temp = DEFAULT_CL_MIN_TEMP_DECIDEGC;
+	else
+		chip->dt.cl_min_temp = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-max-temp", &temp);
+	if (rc < 0)
+		chip->dt.cl_max_temp = DEFAULT_CL_MAX_TEMP_DECIDEGC;
+	else
+		chip->dt.cl_max_temp = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-max-increment", &temp);
+	if (rc < 0)
+		chip->dt.cl_max_cap_inc = DEFAULT_CL_MAX_INC_DECIPERC;
+	else
+		chip->dt.cl_max_cap_inc = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-max-decrement", &temp);
+	if (rc < 0)
+		chip->dt.cl_max_cap_dec = DEFAULT_CL_MAX_DEC_DECIPERC;
+	else
+		chip->dt.cl_max_cap_dec = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-min-limit", &temp);
+	if (rc < 0)
+		chip->dt.cl_min_cap_limit = DEFAULT_CL_MIN_LIM_DECIPERC;
+	else
+		chip->dt.cl_min_cap_limit = temp;
+
+	rc = of_property_read_u32(node, "qcom,cl-max-limit", &temp);
+	if (rc < 0)
+		chip->dt.cl_max_cap_limit = DEFAULT_CL_MAX_LIM_DECIPERC;
+	else
+		chip->dt.cl_max_cap_limit = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-jeita-hyst-temp", &temp);
+	if (rc < 0)
+		chip->dt.jeita_hyst_temp = -EINVAL;
+	else
+		chip->dt.jeita_hyst_temp = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-batt-temp-delta", &temp);
+	if (rc < 0)
+		chip->dt.batt_temp_delta = -EINVAL;
+	else if (temp > BTEMP_DELTA_LOW && temp <= BTEMP_DELTA_HIGH)
+		chip->dt.batt_temp_delta = temp;
+
+	chip->dt.hold_soc_while_full = of_property_read_bool(node,
+					"qcom,hold-soc-while-full");
+
+	rc = fg_parse_ki_coefficients(chip);
+	if (rc < 0)
+		pr_err("Error in parsing Ki coefficients, rc=%d\n", rc);
+
+	rc = of_property_read_u32(node, "qcom,fg-rconn-mohms", &temp);
+	if (rc < 0)
+		chip->dt.rconn_mohms = -EINVAL;
+	else
+		chip->dt.rconn_mohms = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-filter-switch-temp",
+			&temp);
+	if (rc < 0)
+		chip->dt.esr_flt_switch_temp = DEFAULT_ESR_FLT_TEMP_DECIDEGC;
+	else
+		chip->dt.esr_flt_switch_temp = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-tight-filter-micro-pct",
+			&temp);
+	if (rc < 0)
+		chip->dt.esr_tight_flt_upct = DEFAULT_ESR_TIGHT_FLT_UPCT;
+	else
+		chip->dt.esr_tight_flt_upct = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-broad-filter-micro-pct",
+			&temp);
+	if (rc < 0)
+		chip->dt.esr_broad_flt_upct = DEFAULT_ESR_BROAD_FLT_UPCT;
+	else
+		chip->dt.esr_broad_flt_upct = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-tight-lt-filter-micro-pct",
+			&temp);
+	if (rc < 0)
+		chip->dt.esr_tight_lt_flt_upct = DEFAULT_ESR_TIGHT_LT_FLT_UPCT;
+	else
+		chip->dt.esr_tight_lt_flt_upct = temp;
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-broad-lt-filter-micro-pct",
+			&temp);
+	if (rc < 0)
+		chip->dt.esr_broad_lt_flt_upct = DEFAULT_ESR_BROAD_LT_FLT_UPCT;
+	else
+		chip->dt.esr_broad_lt_flt_upct = temp;
+
+	rc = fg_parse_slope_limit_coefficients(chip);
+	if (rc < 0)
+		pr_err("Error in parsing slope limit coeffs, rc=%d\n", rc);
+
+	rc = of_property_read_u32(node, "qcom,fg-esr-clamp-mohms", &temp);
+	if (rc < 0)
+		chip->dt.esr_clamp_mohms = DEFAULT_ESR_CLAMP_MOHMS;
+	else
+		chip->dt.esr_clamp_mohms = temp;
+
+	return 0;
+}
+
+static void fg_cleanup(struct fg_chip *chip)
+{
+	power_supply_unreg_notifier(&chip->nb);
+	debugfs_remove_recursive(chip->dfs_root);
+	if (chip->awake_votable)
+		destroy_votable(chip->awake_votable);
+
+	if (chip->batt_id_chan)
+		iio_channel_release(chip->batt_id_chan);
+
+	dev_set_drvdata(chip->dev, NULL);
+}
+
+static int fg_gen3_probe(struct platform_device *pdev)
+{
+	struct fg_chip *chip;
+	struct power_supply_config fg_psy_cfg;
+	int rc, msoc, volt_uv, batt_temp;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->dev = &pdev->dev;
+	chip->debug_mask = &fg_gen3_debug_mask;
+	chip->irqs = fg_irqs;
+	chip->charge_status = -EINVAL;
+	chip->prev_charge_status = -EINVAL;
+	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
+	if (!chip->regmap) {
+		dev_err(chip->dev, "Parent regmap is unavailable\n");
+		return -ENXIO;
+	}
+
+	chip->batt_id_chan = iio_channel_get(chip->dev, "rradc_batt_id");
+	if (IS_ERR(chip->batt_id_chan)) {
+		if (PTR_ERR(chip->batt_id_chan) != -EPROBE_DEFER)
+			pr_err("batt_id_chan unavailable %ld\n",
+				PTR_ERR(chip->batt_id_chan));
+		rc = PTR_ERR(chip->batt_id_chan);
+		chip->batt_id_chan = NULL;
+		return rc;
+	}
+
+	chip->awake_votable = create_votable("FG_WS", VOTE_SET_ANY, fg_awake_cb,
+					chip);
+	if (IS_ERR(chip->awake_votable)) {
+		rc = PTR_ERR(chip->awake_votable);
+		return rc;
+	}
+
+	rc = fg_parse_dt(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Error in reading DT parameters, rc:%d\n",
+			rc);
+		goto exit;
+	}
+
+	mutex_init(&chip->bus_lock);
+	mutex_init(&chip->sram_rw_lock);
+	mutex_init(&chip->cyc_ctr.lock);
+	mutex_init(&chip->cl.lock);
+	mutex_init(&chip->batt_avg_lock);
+	mutex_init(&chip->charge_full_lock);
+	init_completion(&chip->soc_update);
+	init_completion(&chip->soc_ready);
+	INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
+	INIT_WORK(&chip->status_change_work, status_change_work);
+	INIT_WORK(&chip->cycle_count_work, cycle_count_work);
+	INIT_DELAYED_WORK(&chip->batt_avg_work, batt_avg_work);
+	INIT_DELAYED_WORK(&chip->sram_dump_work, sram_dump_work);
+
+	rc = fg_get_batt_id(chip);
+	if (rc < 0) {
+		pr_err("Error in getting battery id, rc:%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_get_batt_profile(chip);
+	if (rc < 0) {
+		chip->soc_reporting_ready = true;
+		pr_warn("profile for batt_id=%dKOhms not found..using OTP, rc:%d\n",
+			chip->batt_id_ohms / 1000, rc);
+	}
+
+	rc = fg_memif_init(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Error in initializing FG_MEMIF, rc:%d\n",
+			rc);
+		goto exit;
+	}
+
+	rc = fg_hw_init(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Error in initializing FG hardware, rc:%d\n",
+			rc);
+		goto exit;
+	}
+
+	platform_set_drvdata(pdev, chip);
+
+	/* Register the power supply */
+	fg_psy_cfg.drv_data = chip;
+	fg_psy_cfg.of_node = NULL;
+	fg_psy_cfg.supplied_to = NULL;
+	fg_psy_cfg.num_supplicants = 0;
+	chip->fg_psy = devm_power_supply_register(chip->dev, &fg_psy_desc,
+			&fg_psy_cfg);
+	if (IS_ERR(chip->fg_psy)) {
+		pr_err("failed to register fg_psy rc = %ld\n",
+				PTR_ERR(chip->fg_psy));
+		goto exit;
+	}
+
+	chip->nb.notifier_call = fg_notifier_cb;
+	rc = power_supply_reg_notifier(&chip->nb);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		goto exit;
+	}
+
+	rc = fg_register_interrupts(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Error in registering interrupts, rc:%d\n",
+			rc);
+		goto exit;
+	}
+
+	/* Keep SOC_UPDATE irq disabled until we require it */
+	if (fg_irqs[SOC_UPDATE_IRQ].irq)
+		disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq);
+
+	/* Keep BSOC_DELTA_IRQ irq disabled until we require it */
+	if (fg_irqs[BSOC_DELTA_IRQ].irq) {
+		disable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
+		disable_irq_nosync(fg_irqs[BSOC_DELTA_IRQ].irq);
+		chip->bsoc_delta_irq_en = false;
+	}
+
+	rc = fg_debugfs_create(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Error in creating debugfs entries, rc:%d\n",
+			rc);
+		goto exit;
+	}
+
+	rc = fg_get_battery_voltage(chip, &volt_uv);
+	if (!rc)
+		rc = fg_get_prop_capacity(chip, &msoc);
+
+	if (!rc)
+		rc = fg_get_battery_temp(chip, &batt_temp);
+
+	if (!rc) {
+		pr_info("battery SOC:%d voltage: %duV temp: %d id: %dKOhms\n",
+			msoc, volt_uv, batt_temp, chip->batt_id_ohms / 1000);
+		rc = fg_esr_filter_config(chip, batt_temp);
+		if (rc < 0)
+			pr_err("Error in configuring ESR filter rc:%d\n", rc);
+	}
+
+	device_init_wakeup(chip->dev, true);
+	if (chip->profile_available)
+		schedule_delayed_work(&chip->profile_load_work, 0);
+
+	pr_debug("FG GEN3 driver probed successfully\n");
+	return 0;
+exit:
+	fg_cleanup(chip);
+	return rc;
+}
+
+static int fg_gen3_suspend(struct device *dev)
+{
+	struct fg_chip *chip = dev_get_drvdata(dev);
+	int rc;
+
+	if (chip->dt.esr_timer_awake > 0 && chip->dt.esr_timer_asleep > 0) {
+		rc = fg_set_esr_timer(chip, chip->dt.esr_timer_asleep, false,
+				      FG_IMA_NO_WLOCK);
+		if (rc < 0) {
+			pr_err("Error in setting ESR timer during suspend, rc=%d\n",
+			       rc);
+			return rc;
+		}
+	}
+
+	cancel_delayed_work_sync(&chip->batt_avg_work);
+	if (fg_sram_dump)
+		cancel_delayed_work_sync(&chip->sram_dump_work);
+	return 0;
+}
+
+static int fg_gen3_resume(struct device *dev)
+{
+	struct fg_chip *chip = dev_get_drvdata(dev);
+	int rc;
+
+	if (chip->dt.esr_timer_awake > 0 && chip->dt.esr_timer_asleep > 0) {
+		rc = fg_set_esr_timer(chip, chip->dt.esr_timer_awake, false,
+				      FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in setting ESR timer during resume, rc=%d\n",
+			       rc);
+			return rc;
+		}
+	}
+
+	fg_circ_buf_clr(&chip->ibatt_circ_buf);
+	fg_circ_buf_clr(&chip->vbatt_circ_buf);
+	schedule_delayed_work(&chip->batt_avg_work, 0);
+	if (fg_sram_dump)
+		schedule_delayed_work(&chip->sram_dump_work,
+				msecs_to_jiffies(fg_sram_dump_period_ms));
+	return 0;
+}
+
+static const struct dev_pm_ops fg_gen3_pm_ops = {
+	.suspend	= fg_gen3_suspend,
+	.resume		= fg_gen3_resume,
+};
+
+static int fg_gen3_remove(struct platform_device *pdev)
+{
+	struct fg_chip *chip = dev_get_drvdata(&pdev->dev);
+
+	fg_cleanup(chip);
+	return 0;
+}
+
+static const struct of_device_id fg_gen3_match_table[] = {
+	{.compatible = FG_GEN3_DEV_NAME},
+	{},
+};
+
+static struct platform_driver fg_gen3_driver = {
+	.driver = {
+		.name = FG_GEN3_DEV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = fg_gen3_match_table,
+		.pm		= &fg_gen3_pm_ops,
+	},
+	.probe		= fg_gen3_probe,
+	.remove		= fg_gen3_remove,
+};
+
+static int __init fg_gen3_init(void)
+{
+	return platform_driver_register(&fg_gen3_driver);
+}
+
+static void __exit fg_gen3_exit(void)
+{
+	return platform_driver_unregister(&fg_gen3_driver);
+}
+
+module_init(fg_gen3_init);
+module_exit(fg_gen3_exit);
+
+MODULE_DESCRIPTION("QPNP Fuel gauge GEN3 driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" FG_GEN3_DEV_NAME);
diff --git a/drivers/power/supply/qcom/qpnp-qnovo.c b/drivers/power/supply/qcom/qpnp-qnovo.c
new file mode 100644
index 0000000..cbfab30
--- /dev/null
+++ b/drivers/power/supply/qcom/qpnp-qnovo.c
@@ -0,0 +1,1456 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/power_supply.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include "pmic-voter.h"
+
+#define QNOVO_REVISION1		0x00
+#define QNOVO_REVISION2		0x01
+#define QNOVO_PERPH_TYPE	0x04
+#define QNOVO_PERPH_SUBTYPE	0x05
+#define QNOVO_PTTIME_STS	0x07
+#define QNOVO_PTRAIN_STS	0x08
+#define QNOVO_ERROR_STS		0x09
+#define QNOVO_ERROR_BIT		BIT(0)
+#define QNOVO_INT_RT_STS	0x10
+#define QNOVO_INT_SET_TYPE	0x11
+#define QNOVO_INT_POLARITY_HIGH	0x12
+#define QNOVO_INT_POLARITY_LOW	0x13
+#define QNOVO_INT_LATCHED_CLR	0x14
+#define QNOVO_INT_EN_SET	0x15
+#define QNOVO_INT_EN_CLR	0x16
+#define QNOVO_INT_LATCHED_STS	0x18
+#define QNOVO_INT_PENDING_STS	0x19
+#define QNOVO_INT_MID_SEL	0x1A
+#define QNOVO_INT_PRIORITY	0x1B
+#define QNOVO_PE_CTRL		0x40
+#define QNOVO_PREST1_CTRL	0x41
+#define QNOVO_PPULS1_LSB_CTRL	0x42
+#define QNOVO_PPULS1_MSB_CTRL	0x43
+#define QNOVO_NREST1_CTRL	0x44
+#define QNOVO_NPULS1_CTRL	0x45
+#define QNOVO_PPCNT_CTRL	0x46
+#define QNOVO_VLIM1_LSB_CTRL	0x47
+#define QNOVO_VLIM1_MSB_CTRL	0x48
+#define QNOVO_PTRAIN_EN		0x49
+#define QNOVO_PTRAIN_EN_BIT	BIT(0)
+#define QNOVO_PE_CTRL2		0x4A
+#define QNOVO_PREST2_LSB_CTRL	0x50
+#define QNOVO_PREST2_MSB_CTRL	0x51
+#define QNOVO_PPULS2_LSB_CTRL	0x52
+#define QNOVO_PPULS2_MSB_CTRL	0x53
+#define QNOVO_NREST2_CTRL	0x54
+#define QNOVO_NPULS2_CTRL	0x55
+#define QNOVO_VLIM2_LSB_CTRL	0x56
+#define QNOVO_VLIM2_MSB_CTRL	0x57
+#define QNOVO_PVOLT1_LSB	0x60
+#define QNOVO_PVOLT1_MSB	0x61
+#define QNOVO_PCUR1_LSB		0x62
+#define QNOVO_PCUR1_MSB		0x63
+#define QNOVO_PVOLT2_LSB	0x70
+#define QNOVO_PVOLT2_MSB	0x71
+#define QNOVO_RVOLT2_LSB	0x72
+#define QNOVO_RVOLT2_MSB	0x73
+#define QNOVO_PCUR2_LSB		0x74
+#define QNOVO_PCUR2_MSB		0x75
+#define QNOVO_SCNT		0x80
+#define QNOVO_VMAX_LSB		0x90
+#define QNOVO_VMAX_MSB		0x91
+#define QNOVO_SNUM		0x92
+
+/* Registers ending in 0 imply external rsense */
+#define QNOVO_IADC_OFFSET_0	0xA0
+#define QNOVO_IADC_OFFSET_1	0xA1
+#define QNOVO_IADC_GAIN_0	0xA2
+#define QNOVO_IADC_GAIN_1	0xA3
+#define QNOVO_VADC_OFFSET	0xA4
+#define QNOVO_VADC_GAIN		0xA5
+#define QNOVO_IADC_GAIN_2	0xA6
+#define QNOVO_SPARE		0xA7
+#define QNOVO_STRM_CTRL		0xA8
+#define QNOVO_IADC_OFFSET_OVR_VAL	0xA9
+#define QNOVO_IADC_OFFSET_OVR		0xAA
+#define QNOVO_DISABLE_CHARGING		0xAB
+
+#define QNOVO_TR_IADC_OFFSET_0	0xF1
+#define QNOVO_TR_IADC_OFFSET_1	0xF2
+
+#define DRV_MAJOR_VERSION	1
+#define DRV_MINOR_VERSION	0
+
+#define IADC_LSB_NA	2441400
+#define VADC_LSB_NA	1220700
+#define GAIN_LSB_FACTOR	976560
+
+#define USER_VOTER		"user_voter"
+#define OK_TO_QNOVO_VOTER	"ok_to_qnovo_voter"
+
+#define QNOVO_VOTER		"qnovo_voter"
+
+struct qnovo_dt_props {
+	bool			external_rsense;
+	struct device_node	*revid_dev_node;
+};
+
+enum {
+	QNOVO_NO_ERR_STS_BIT		= BIT(0),
+};
+
+struct chg_props {
+	bool		charging;
+	bool		usb_online;
+	bool		dc_online;
+};
+
+struct chg_status {
+	bool		ok_to_qnovo;
+};
+
+struct qnovo {
+	int			base;
+	struct mutex		write_lock;
+	struct regmap		*regmap;
+	struct qnovo_dt_props	dt;
+	struct device		*dev;
+	struct votable		*disable_votable;
+	struct class		qnovo_class;
+	struct pmic_revid_data	*pmic_rev_id;
+	u32			wa_flags;
+	s64			external_offset_nA;
+	s64			internal_offset_nA;
+	s64			offset_nV;
+	s64			external_i_gain_mega;
+	s64			internal_i_gain_mega;
+	s64			v_gain_mega;
+	struct notifier_block	nb;
+	struct power_supply	*batt_psy;
+	struct power_supply	*usb_psy;
+	struct power_supply	*dc_psy;
+	struct chg_props	cp;
+	struct chg_status	cs;
+	struct work_struct	status_change_work;
+	int			fv_uV_request;
+	int			fcc_uA_request;
+};
+
+static int debug_mask;
+module_param_named(debug_mask, debug_mask, int, 0600);
+
+#define qnovo_dbg(chip, reason, fmt, ...)				\
+	do {								\
+		if (debug_mask & (reason))				\
+			dev_info(chip->dev, fmt, ##__VA_ARGS__);	\
+		else							\
+			dev_dbg(chip->dev, fmt, ##__VA_ARGS__);		\
+	} while (0)
+
+static bool is_secure(struct qnovo *chip, int addr)
+{
+	/* assume everything above 0x40 is secure */
+	return (bool)(addr >= 0x40);
+}
+
+static int qnovo_read(struct qnovo *chip, u16 addr, u8 *buf, int len)
+{
+	return regmap_bulk_read(chip->regmap, chip->base + addr, buf, len);
+}
+
+static int qnovo_masked_write(struct qnovo *chip, u16 addr, u8 mask, u8 val)
+{
+	int rc = 0;
+
+	mutex_lock(&chip->write_lock);
+	if (is_secure(chip, addr)) {
+		rc = regmap_write(chip->regmap,
+				((chip->base + addr) & ~(0xFF)) | 0xD0, 0xA5);
+		if (rc < 0)
+			goto unlock;
+	}
+
+	rc = regmap_update_bits(chip->regmap, chip->base + addr, mask, val);
+
+unlock:
+	mutex_unlock(&chip->write_lock);
+	return rc;
+}
+
+static int qnovo_write(struct qnovo *chip, u16 addr, u8 *buf, int len)
+{
+	int i, rc = 0;
+	bool is_start_secure, is_end_secure;
+
+	is_start_secure = is_secure(chip, addr);
+	is_end_secure = is_secure(chip, addr + len);
+
+	if (!is_start_secure && !is_end_secure) {
+		mutex_lock(&chip->write_lock);
+		rc = regmap_bulk_write(chip->regmap, chip->base + addr,
+					buf, len);
+		goto unlock;
+	}
+
+	mutex_lock(&chip->write_lock);
+	for (i = addr; i < addr + len; i++) {
+		if (is_secure(chip, i)) {
+			rc = regmap_write(chip->regmap,
+				((chip->base + i) & ~(0xFF)) | 0xD0, 0xA5);
+			if (rc < 0)
+				goto unlock;
+		}
+		rc = regmap_write(chip->regmap, chip->base + i, buf[i - addr]);
+		if (rc < 0)
+			goto unlock;
+	}
+
+unlock:
+	mutex_unlock(&chip->write_lock);
+	return rc;
+}
+
+static bool is_batt_available(struct qnovo *chip)
+{
+	if (!chip->batt_psy)
+		chip->batt_psy = power_supply_get_by_name("battery");
+
+	if (!chip->batt_psy)
+		return false;
+
+	return true;
+}
+
+static int qnovo_batt_psy_update(struct qnovo *chip, bool disable)
+{
+	union power_supply_propval pval = {0};
+	int rc = 0;
+
+	if (!is_batt_available(chip))
+		return -EINVAL;
+
+	if (chip->fv_uV_request != -EINVAL) {
+		pval.intval = disable ? -EINVAL : chip->fv_uV_request;
+		rc = power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_VOLTAGE_QNOVO,
+			&pval);
+		if (rc < 0) {
+			pr_err("Couldn't set prop qnovo_fv rc = %d\n", rc);
+			return -EINVAL;
+		}
+	}
+
+	if (chip->fcc_uA_request != -EINVAL) {
+		pval.intval = disable ? -EINVAL : chip->fcc_uA_request;
+		rc = power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CURRENT_QNOVO,
+			&pval);
+		if (rc < 0) {
+			pr_err("Couldn't set prop qnovo_fcc rc = %d\n", rc);
+			return -EINVAL;
+		}
+	}
+
+	return rc;
+}
+
+static int qnovo_disable_cb(struct votable *votable, void *data, int disable,
+					const char *client)
+{
+	struct qnovo *chip = data;
+	int rc = 0;
+
+	if (disable) {
+		rc = qnovo_batt_psy_update(chip, true);
+		if (rc < 0)
+			return rc;
+	}
+
+	rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
+				 disable ? 0 : QNOVO_PTRAIN_EN_BIT);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n",
+			disable ? "disable" : "enable", rc);
+		return rc;
+	}
+
+	if (!disable) {
+		rc = qnovo_batt_psy_update(chip, false);
+		if (rc < 0)
+			return rc;
+	}
+
+	return rc;
+}
+
+static int qnovo_parse_dt(struct qnovo *chip)
+{
+	struct device_node *node = chip->dev->of_node;
+	int rc;
+
+	if (!node) {
+		pr_err("device tree node missing\n");
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32(node, "reg", &chip->base);
+	if (rc < 0) {
+		pr_err("Couldn't read base rc = %d\n", rc);
+		return rc;
+	}
+
+	chip->dt.external_rsense = of_property_read_bool(node,
+			"qcom,external-rsense");
+
+	chip->dt.revid_dev_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
+	if (!chip->dt.revid_dev_node) {
+		pr_err("Missing qcom,pmic-revid property - driver failed\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qnovo_check_chg_version(struct qnovo *chip)
+{
+	int rc;
+
+	chip->pmic_rev_id = get_revid_data(chip->dt.revid_dev_node);
+	if (IS_ERR(chip->pmic_rev_id)) {
+		rc = PTR_ERR(chip->pmic_rev_id);
+		if (rc != -EPROBE_DEFER)
+			pr_err("Unable to get pmic_revid rc=%d\n", rc);
+		return rc;
+	}
+
+	if ((chip->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE)
+		   && (chip->pmic_rev_id->rev4 < PMI8998_V2P0_REV4)) {
+		chip->wa_flags |= QNOVO_NO_ERR_STS_BIT;
+	}
+
+	return 0;
+}
+
+enum {
+	VER = 0,
+	OK_TO_QNOVO,
+	ENABLE,
+	FV_REQUEST,
+	FCC_REQUEST,
+	PE_CTRL_REG,
+	PE_CTRL2_REG,
+	PTRAIN_STS_REG,
+	INT_RT_STS_REG,
+	PREST1,
+	PPULS1,
+	NREST1,
+	NPULS1,
+	PPCNT,
+	VLIM1,
+	PVOLT1,
+	PCUR1,
+	PTTIME,
+	PREST2,
+	PPULS2,
+	NREST2,
+	NPULS2,
+	VLIM2,
+	PVOLT2,
+	RVOLT2,
+	PCUR2,
+	SCNT,
+	VMAX,
+	SNUM,
+	VBATT,
+	IBATT,
+	BATTTEMP,
+	BATTSOC,
+};
+
+struct param_info {
+	char	*name;
+	int	start_addr;
+	int	num_regs;
+	int	reg_to_unit_multiplier;
+	int	reg_to_unit_divider;
+	int	reg_to_unit_offset;
+	int	min_val;
+	int	max_val;
+	char	*units_str;
+};
+
+static struct param_info params[] = {
+	[FV_REQUEST] = {
+		.units_str		= "uV",
+	},
+	[FCC_REQUEST] = {
+		.units_str		= "uA",
+	},
+	[PE_CTRL_REG] = {
+		.name			= "CTRL_REG",
+		.start_addr		= QNOVO_PE_CTRL,
+		.num_regs		= 1,
+		.units_str		= "",
+	},
+	[PE_CTRL2_REG] = {
+		.name			= "PE_CTRL2_REG",
+		.start_addr		= QNOVO_PE_CTRL2,
+		.num_regs		= 1,
+		.units_str		= "",
+	},
+	[PTRAIN_STS_REG] = {
+		.name			= "PTRAIN_STS",
+		.start_addr		= QNOVO_PTRAIN_STS,
+		.num_regs		= 1,
+		.units_str		= "",
+	},
+	[INT_RT_STS_REG] = {
+		.name			= "INT_RT_STS",
+		.start_addr		= QNOVO_INT_RT_STS,
+		.num_regs		= 1,
+		.units_str		= "",
+	},
+	[PREST1] = {
+		.name			= "PREST1",
+		.start_addr		= QNOVO_PREST1_CTRL,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 5,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 5,
+		.max_val		= 1275,
+		.units_str		= "mS",
+	},
+	[PPULS1] = {
+		.name			= "PPULS1",
+		.start_addr		= QNOVO_PPULS1_LSB_CTRL,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 1600, /* converts to uC */
+		.reg_to_unit_divider	= 1,
+		.min_val		= 0,
+		.max_val		= 104856000,
+		.units_str		= "uC",
+	},
+	[NREST1] = {
+		.name			= "NREST1",
+		.start_addr		= QNOVO_NREST1_CTRL,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 5,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 5,
+		.max_val		= 1275,
+		.units_str		= "mS",
+	},
+	[NPULS1] = {
+		.name			= "NPULS1",
+		.start_addr		= QNOVO_NPULS1_CTRL,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 5,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 5,
+		.max_val		= 1275,
+		.units_str		= "mS",
+	},
+	[PPCNT] = {
+		.name			= "PPCNT",
+		.start_addr		= QNOVO_PPCNT_CTRL,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 1,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 0,
+		.max_val		= 255,
+		.units_str		= "pulses",
+	},
+	[VLIM1] = {
+		.name			= "VLIM1",
+		.start_addr		= QNOVO_VLIM1_LSB_CTRL,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 610350, /* converts to nV */
+		.reg_to_unit_divider	= 1,
+		.min_val		= 0,
+		.max_val		= 5000000,
+		.units_str		= "uV",
+	},
+	[PVOLT1] = {
+		.name			= "PVOLT1",
+		.start_addr		= QNOVO_PVOLT1_LSB,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 610350, /* converts to nV */
+		.reg_to_unit_divider	= 1,
+		.units_str		= "uV",
+	},
+	[PCUR1] = {
+		.name			= "PCUR1",
+		.start_addr		= QNOVO_PCUR1_LSB,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 1220700, /* converts to nA */
+		.reg_to_unit_divider	= 1,
+		.units_str		= "uA",
+	},
+	[PTTIME] = {
+		.name			= "PTTIME",
+		.start_addr		= QNOVO_PTTIME_STS,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 2,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 5,
+		.max_val		= 1275,
+		.units_str		= "S",
+	},
+	[PREST2] = {
+		.name			= "PREST2",
+		.start_addr		= QNOVO_PREST2_LSB_CTRL,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 5,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 5,
+		.max_val		= 327675,
+		.units_str		= "mS",
+	},
+	[PPULS2] = {
+		.name			= "PPULS2",
+		.start_addr		= QNOVO_PPULS2_LSB_CTRL,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 1600, /* converts to uC */
+		.reg_to_unit_divider	= 1,
+		.min_val		= 0,
+		.max_val		= 104856000,
+		.units_str		= "uC",
+	},
+	[NREST2] = {
+		.name			= "NREST2",
+		.start_addr		= QNOVO_NREST2_CTRL,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 5,
+		.reg_to_unit_divider	= 1,
+		.reg_to_unit_offset	= -5,
+		.min_val		= 5,
+		.max_val		= 1280,
+		.units_str		= "mS",
+	},
+	[NPULS2] = {
+		.name			= "NPULS2",
+		.start_addr		= QNOVO_NPULS2_CTRL,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 5,
+		.reg_to_unit_divider	= 1,
+		.min_val		= 5,
+		.max_val		= 1275,
+		.units_str		= "mS",
+	},
+	[VLIM2] = {
+		.name			= "VLIM1",
+		.start_addr		= QNOVO_VLIM2_LSB_CTRL,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 610350, /* converts to nV */
+		.reg_to_unit_divider	= 1,
+		.min_val		= 0,
+		.max_val		= 5000000,
+		.units_str		= "uV",
+	},
+	[PVOLT2] = {
+		.name			= "PVOLT2",
+		.start_addr		= QNOVO_PVOLT2_LSB,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 610350, /* converts to nV */
+		.reg_to_unit_divider	= 1,
+		.units_str		= "uV",
+	},
+	[RVOLT2] = {
+		.name			= "RVOLT2",
+		.start_addr		= QNOVO_RVOLT2_LSB,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 610350,
+		.reg_to_unit_divider	= 1,
+		.units_str		= "uV",
+	},
+	[PCUR2] = {
+		.name			= "PCUR2",
+		.start_addr		= QNOVO_PCUR2_LSB,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 1220700, /* converts to nA */
+		.reg_to_unit_divider	= 1,
+		.units_str		= "uA",
+	},
+	[SCNT] = {
+		.name			= "SCNT",
+		.start_addr		= QNOVO_SCNT,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 1,
+		.reg_to_unit_divider	= 1,
+		.units_str		= "pulses",
+	},
+	[VMAX] = {
+		.name			= "VMAX",
+		.start_addr		= QNOVO_VMAX_LSB,
+		.num_regs		= 2,
+		.reg_to_unit_multiplier	= 814000, /* converts to nV */
+		.reg_to_unit_divider	= 1,
+		.units_str		= "uV",
+	},
+	[SNUM] = {
+		.name			= "SNUM",
+		.start_addr		= QNOVO_SNUM,
+		.num_regs		= 1,
+		.reg_to_unit_multiplier	= 1,
+		.reg_to_unit_divider	= 1,
+		.units_str		= "pulses",
+	},
+	[VBATT]	= {
+		.name			= "POWER_SUPPLY_PROP_VOLTAGE_NOW",
+		.start_addr		= POWER_SUPPLY_PROP_VOLTAGE_NOW,
+		.units_str		= "uV",
+	},
+	[IBATT]	= {
+		.name			= "POWER_SUPPLY_PROP_CURRENT_NOW",
+		.start_addr		= POWER_SUPPLY_PROP_CURRENT_NOW,
+		.units_str		= "uA",
+	},
+	[BATTTEMP] = {
+		.name			= "POWER_SUPPLY_PROP_TEMP",
+		.start_addr		= POWER_SUPPLY_PROP_TEMP,
+		.units_str		= "uV",
+	},
+	[BATTSOC] = {
+		.name			= "POWER_SUPPLY_PROP_CAPACITY",
+		.start_addr		= POWER_SUPPLY_PROP_CAPACITY,
+		.units_str		= "%",
+	},
+};
+
+static struct class_attribute qnovo_attributes[];
+
+static ssize_t version_show(struct class *c, struct class_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d.%d\n",
+			DRV_MAJOR_VERSION, DRV_MINOR_VERSION);
+}
+
+static ssize_t ok_to_qnovo_show(struct class *c, struct class_attribute *attr,
+			char *buf)
+{
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", chip->cs.ok_to_qnovo);
+}
+
+static ssize_t enable_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	int val;
+
+	val = get_client_vote(chip->disable_votable, USER_VOTER);
+	val = !val;
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t enable_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	unsigned long val;
+	bool disable;
+
+	if (kstrtoul(ubuf, 10, &val))
+		return -EINVAL;
+
+	disable = !val;
+
+	vote(chip->disable_votable, USER_VOTER, disable, 0);
+	return count;
+}
+
+static ssize_t val_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	int i = attr - qnovo_attributes;
+	int val = 0;
+
+	if (i == FV_REQUEST)
+		val = chip->fv_uV_request;
+
+	if (i == FCC_REQUEST)
+		val = chip->fcc_uA_request;
+
+	return snprintf(ubuf, PAGE_SIZE, "%d%s\n", val, params[i].units_str);
+}
+
+static ssize_t val_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	int i = attr - qnovo_attributes;
+	unsigned long val;
+
+	if (kstrtoul(ubuf, 10, &val))
+		return -EINVAL;
+
+	if (i == FV_REQUEST)
+		chip->fv_uV_request = val;
+
+	if (i == FCC_REQUEST)
+		chip->fcc_uA_request = val;
+
+	return count;
+}
+
+static ssize_t reg_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	u16 regval;
+	int rc;
+
+	rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+	regval = buf[1] << 8 | buf[0];
+
+	return snprintf(ubuf, PAGE_SIZE, "0x%04x%s\n",
+			regval, params[i].units_str);
+}
+
+static ssize_t reg_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	unsigned long val;
+	int rc;
+
+	if (kstrtoul(ubuf, 16, &val))
+		return -EINVAL;
+
+	buf[0] = val & 0xFF;
+	buf[1] = (val >> 8) & 0xFF;
+
+	rc = qnovo_write(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't write %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+	return count;
+}
+
+static ssize_t time_show(struct class *c, struct class_attribute *attr,
+		char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	u16 regval;
+	int val;
+	int rc;
+
+	rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+	regval = buf[1] << 8 | buf[0];
+
+	val = ((regval * params[i].reg_to_unit_multiplier)
+			/ params[i].reg_to_unit_divider)
+		- params[i].reg_to_unit_offset;
+
+	return snprintf(ubuf, PAGE_SIZE, "%d%s\n", val, params[i].units_str);
+}
+
+static ssize_t time_store(struct class *c, struct class_attribute *attr,
+		       const char *ubuf, size_t count)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	u16 regval;
+	unsigned long val;
+	int rc;
+
+	if (kstrtoul(ubuf, 10, &val))
+		return -EINVAL;
+
+	if (val < params[i].min_val || val > params[i].max_val) {
+		pr_err("Out of Range %d%s for %s\n", (int)val,
+				params[i].units_str,
+				params[i].name);
+		return -ERANGE;
+	}
+
+	regval = (((int)val + params[i].reg_to_unit_offset)
+			* params[i].reg_to_unit_divider)
+		/ params[i].reg_to_unit_multiplier;
+	buf[0] = regval & 0xFF;
+	buf[1] = (regval >> 8) & 0xFF;
+
+	rc = qnovo_write(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't write %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t current_show(struct class *c, struct class_attribute *attr,
+				char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	int rc;
+	int comp_val_uA;
+	s64 regval_nA;
+	s64 gain, offset_nA, comp_val_nA;
+
+	rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+	regval_nA = buf[1] << 8 | buf[0];
+	regval_nA = div_s64(regval_nA * params[i].reg_to_unit_multiplier,
+					params[i].reg_to_unit_divider)
+			- params[i].reg_to_unit_offset;
+
+	if (chip->dt.external_rsense) {
+		offset_nA = chip->external_offset_nA;
+		gain = chip->external_i_gain_mega;
+	} else {
+		offset_nA = chip->internal_offset_nA;
+		gain = chip->internal_i_gain_mega;
+	}
+
+	comp_val_nA = div_s64(regval_nA * gain, 1000000) + offset_nA;
+	comp_val_uA = div_s64(comp_val_nA, 1000);
+
+	return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
+			comp_val_uA, params[i].units_str);
+}
+
+static ssize_t voltage_show(struct class *c, struct class_attribute *attr,
+				char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	int rc;
+	int comp_val_uV;
+	s64 regval_nV;
+	s64 gain, offset_nV, comp_val_nV;
+
+	rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+	regval_nV = buf[1] << 8 | buf[0];
+	regval_nV = div_s64(regval_nV * params[i].reg_to_unit_multiplier,
+					params[i].reg_to_unit_divider)
+			- params[i].reg_to_unit_offset;
+
+	offset_nV = chip->offset_nV;
+	gain = chip->v_gain_mega;
+
+	comp_val_nV = div_s64(regval_nV * gain, 1000000) + offset_nV;
+	comp_val_uV = div_s64(comp_val_nV, 1000);
+
+	return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
+				comp_val_uV, params[i].units_str);
+}
+
+static ssize_t voltage_store(struct class *c, struct class_attribute *attr,
+		       const char *ubuf, size_t count)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	int rc;
+	unsigned long val_uV;
+	s64 regval_nV;
+	s64 gain, offset_nV;
+
+	if (kstrtoul(ubuf, 10, &val_uV))
+		return -EINVAL;
+
+	if (val_uV < params[i].min_val || val_uV > params[i].max_val) {
+		pr_err("Out of Range %d%s for %s\n", (int)val_uV,
+				params[i].units_str,
+				params[i].name);
+		return -ERANGE;
+	}
+
+	offset_nV = chip->offset_nV;
+	gain = chip->v_gain_mega;
+
+	regval_nV = (s64)val_uV * 1000 - offset_nV;
+	regval_nV = div_s64(regval_nV * 1000000, gain);
+
+	regval_nV = div_s64((regval_nV + params[i].reg_to_unit_offset)
+			* params[i].reg_to_unit_divider,
+			params[i].reg_to_unit_multiplier);
+	buf[0] = regval_nV & 0xFF;
+	buf[1] = ((u64)regval_nV >> 8) & 0xFF;
+
+	rc = qnovo_write(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't write %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t coulomb_show(struct class *c, struct class_attribute *attr,
+				char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	int rc;
+	int comp_val_uC;
+	s64 regval_uC, gain;
+
+	rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+	regval_uC = buf[1] << 8 | buf[0];
+	regval_uC = div_s64(regval_uC * params[i].reg_to_unit_multiplier,
+					params[i].reg_to_unit_divider)
+			- params[i].reg_to_unit_offset;
+
+	if (chip->dt.external_rsense)
+		gain = chip->external_i_gain_mega;
+	else
+		gain = chip->internal_i_gain_mega;
+
+	comp_val_uC = div_s64(regval_uC * gain, 1000000);
+	return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
+			comp_val_uC, params[i].units_str);
+}
+
+static ssize_t coulomb_store(struct class *c, struct class_attribute *attr,
+		       const char *ubuf, size_t count)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	int rc;
+	unsigned long val_uC;
+	s64 regval;
+	s64 gain;
+
+	if (kstrtoul(ubuf, 10, &val_uC))
+		return -EINVAL;
+
+	if (val_uC < params[i].min_val || val_uC > params[i].max_val) {
+		pr_err("Out of Range %d%s for %s\n", (int)val_uC,
+				params[i].units_str,
+				params[i].name);
+		return -ERANGE;
+	}
+
+	if (chip->dt.external_rsense)
+		gain = chip->external_i_gain_mega;
+	else
+		gain = chip->internal_i_gain_mega;
+
+	regval = div_s64((s64)val_uC * 1000000, gain);
+
+	regval = div_s64((regval + params[i].reg_to_unit_offset)
+			* params[i].reg_to_unit_divider,
+			params[i].reg_to_unit_multiplier);
+
+	buf[0] = regval & 0xFF;
+	buf[1] = ((u64)regval >> 8) & 0xFF;
+
+	rc = qnovo_write(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't write %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t batt_prop_show(struct class *c, struct class_attribute *attr,
+				char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	int rc = -EINVAL;
+	int prop = params[i].start_addr;
+	union power_supply_propval pval = {0};
+
+	if (!is_batt_available(chip))
+		return -EINVAL;
+
+	rc = power_supply_get_property(chip->batt_psy, prop, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't read battery prop %s rc = %d\n",
+				params[i].name, rc);
+		return -EINVAL;
+	}
+
+	return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
+			pval.intval, params[i].units_str);
+}
+
+static struct class_attribute qnovo_attributes[] = {
+	[VER]			= __ATTR_RO(version),
+	[OK_TO_QNOVO]		= __ATTR_RO(ok_to_qnovo),
+	[ENABLE]		= __ATTR(enable, 0644,
+					enable_show, enable_store),
+	[FV_REQUEST]		= __ATTR(fv_uV_request, 0644,
+					val_show, val_store),
+	[FCC_REQUEST]		= __ATTR(fcc_uA_request, 0644,
+					val_show, val_store),
+	[PE_CTRL_REG]		= __ATTR(PE_CTRL_REG, 0644,
+					reg_show, reg_store),
+	[PE_CTRL2_REG]		= __ATTR(PE_CTRL2_REG, 0644,
+					reg_show, reg_store),
+	[PTRAIN_STS_REG]	= __ATTR(PTRAIN_STS_REG, 0644,
+					reg_show, reg_store),
+	[INT_RT_STS_REG]	= __ATTR(INT_RT_STS_REG, 0644,
+					reg_show, reg_store),
+	[PREST1]		= __ATTR(PREST1_mS, 0644,
+					time_show, time_store),
+	[PPULS1]		= __ATTR(PPULS1_uC, 0644,
+					coulomb_show, coulomb_store),
+	[NREST1]		= __ATTR(NREST1_mS, 0644,
+					time_show, time_store),
+	[NPULS1]		= __ATTR(NPULS1_mS, 0644,
+					time_show, time_store),
+	[PPCNT]			= __ATTR(PPCNT, 0644,
+					time_show, time_store),
+	[VLIM1]			= __ATTR(VLIM1_uV, 0644,
+					voltage_show, voltage_store),
+	[PVOLT1]		= __ATTR(PVOLT1_uV, 0444,
+					voltage_show, NULL),
+	[PCUR1]			= __ATTR(PCUR1_uA, 0444,
+					current_show, NULL),
+	[PTTIME]		= __ATTR(PTTIME_S, 0444,
+					time_show, NULL),
+	[PREST2]		= __ATTR(PREST2_mS, 0644,
+					time_show, time_store),
+	[PPULS2]		= __ATTR(PPULS2_mS, 0644,
+					coulomb_show, coulomb_store),
+	[NREST2]		= __ATTR(NREST2_mS, 0644,
+					time_show, time_store),
+	[NPULS2]		= __ATTR(NPULS2_mS, 0644,
+					time_show, time_store),
+	[VLIM2]			= __ATTR(VLIM2_uV, 0644,
+					voltage_show, voltage_store),
+	[PVOLT2]		= __ATTR(PVOLT2_uV, 0444,
+					voltage_show, NULL),
+	[RVOLT2]		= __ATTR(RVOLT2_uV, 0444,
+					voltage_show, NULL),
+	[PCUR2]			= __ATTR(PCUR2_uA, 0444,
+					current_show, NULL),
+	[SCNT]			= __ATTR(SCNT, 0644,
+					time_show, time_store),
+	[VMAX]			= __ATTR(VMAX_uV, 0444,
+					voltage_show, NULL),
+	[SNUM]			= __ATTR(SNUM, 0644,
+					time_show, time_store),
+	[VBATT]			= __ATTR(VBATT_uV, 0444,
+					batt_prop_show, NULL),
+	[IBATT]			= __ATTR(IBATT_uA, 0444,
+					batt_prop_show, NULL),
+	[BATTTEMP]		= __ATTR(BATTTEMP_deciDegC, 0444,
+					batt_prop_show, NULL),
+	[BATTSOC]		= __ATTR(BATTSOC, 0444,
+					batt_prop_show, NULL),
+	__ATTR_NULL,
+};
+
+static void get_chg_props(struct qnovo *chip, struct chg_props *cp)
+{
+	union power_supply_propval pval;
+	u8 val = 0;
+	int rc;
+
+	cp->charging = true;
+	rc = qnovo_read(chip, QNOVO_ERROR_STS, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read error sts rc = %d\n", rc);
+		cp->charging = false;
+	} else {
+		cp->charging = (!(val & QNOVO_ERROR_BIT));
+	}
+
+	if (chip->wa_flags & QNOVO_NO_ERR_STS_BIT) {
+		/*
+		 * on v1.0 and v1.1 pmic's force charging to true
+		 * if things are not good to charge s/w gets a PTRAIN_DONE
+		 * interrupt
+		 */
+		cp->charging = true;
+	}
+
+	cp->usb_online = false;
+	if (!chip->usb_psy)
+		chip->usb_psy = power_supply_get_by_name("usb");
+	if (chip->usb_psy) {
+		rc = power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_ONLINE, &pval);
+		if (rc < 0)
+			pr_err("Couldn't read usb online rc = %d\n", rc);
+		else
+			cp->usb_online = (bool)pval.intval;
+	}
+
+	cp->dc_online = false;
+	if (!chip->dc_psy)
+		chip->dc_psy = power_supply_get_by_name("dc");
+	if (chip->dc_psy) {
+		rc = power_supply_get_property(chip->dc_psy,
+				POWER_SUPPLY_PROP_ONLINE, &pval);
+		if (rc < 0)
+			pr_err("Couldn't read dc online rc = %d\n", rc);
+		else
+			cp->dc_online = (bool)pval.intval;
+	}
+}
+
+static void get_chg_status(struct qnovo *chip, const struct chg_props *cp,
+				struct chg_status *cs)
+{
+	cs->ok_to_qnovo = false;
+
+	if (cp->charging &&
+		(cp->usb_online || cp->dc_online))
+		cs->ok_to_qnovo = true;
+}
+
+static void status_change_work(struct work_struct *work)
+{
+	struct qnovo *chip = container_of(work,
+			struct qnovo, status_change_work);
+	bool notify_uevent = false;
+	struct chg_props cp;
+	struct chg_status cs;
+
+	get_chg_props(chip, &cp);
+	get_chg_status(chip, &cp, &cs);
+
+	if (cs.ok_to_qnovo ^ chip->cs.ok_to_qnovo) {
+		/*
+		 * when it is not okay to Qnovo charge, disable both voters,
+		 * so that when it becomes okay to Qnovo charge the user voter
+		 * has to specifically enable its vote to being Qnovo charging
+		 */
+		if (!cs.ok_to_qnovo) {
+			vote(chip->disable_votable, OK_TO_QNOVO_VOTER, 1, 0);
+			vote(chip->disable_votable, USER_VOTER, 1, 0);
+		} else {
+			vote(chip->disable_votable, OK_TO_QNOVO_VOTER, 0, 0);
+		}
+		notify_uevent = true;
+	}
+
+	memcpy(&chip->cp, &cp, sizeof(struct chg_props));
+	memcpy(&chip->cs, &cs, sizeof(struct chg_status));
+
+	if (notify_uevent)
+		kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
+}
+
+static int qnovo_notifier_call(struct notifier_block *nb,
+		unsigned long ev, void *v)
+{
+	struct power_supply *psy = v;
+	struct qnovo *chip = container_of(nb, struct qnovo, nb);
+
+	if (ev != PSY_EVENT_PROP_CHANGED)
+		return NOTIFY_OK;
+	if ((strcmp(psy->desc->name, "battery") == 0)
+		|| (strcmp(psy->desc->name, "usb") == 0))
+		schedule_work(&chip->status_change_work);
+
+	return NOTIFY_OK;
+}
+
+static irqreturn_t handle_ptrain_done(int irq, void *data)
+{
+	struct qnovo *chip = data;
+
+	/* disable user voter here */
+	vote(chip->disable_votable, USER_VOTER, 0, 0);
+	kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
+	return IRQ_HANDLED;
+}
+
+static int qnovo_hw_init(struct qnovo *chip)
+{
+	int rc;
+	u8 iadc_offset_external, iadc_offset_internal;
+	u8 iadc_gain_external, iadc_gain_internal;
+	u8 vadc_offset, vadc_gain;
+	u8 val;
+
+	vote(chip->disable_votable, USER_VOTER, 1, 0);
+
+	rc = qnovo_read(chip, QNOVO_IADC_OFFSET_0, &iadc_offset_external, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read iadc exernal offset rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_IADC_OFFSET_1, &iadc_offset_internal, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read iadc internal offset rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_IADC_GAIN_0, &iadc_gain_external, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read iadc external gain rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_IADC_GAIN_1, &iadc_gain_internal, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read iadc internal gain rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_VADC_OFFSET, &vadc_offset, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read vadc offset rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_VADC_GAIN, &vadc_gain, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read vadc external gain rc = %d\n", rc);
+		return rc;
+	}
+
+	chip->external_offset_nA = (s64)iadc_offset_external * IADC_LSB_NA;
+	chip->internal_offset_nA = (s64)iadc_offset_internal * IADC_LSB_NA;
+	chip->offset_nV = (s64)vadc_offset * VADC_LSB_NA;
+	chip->external_i_gain_mega
+		= 1000000000 + (s64)iadc_gain_external * GAIN_LSB_FACTOR;
+	chip->external_i_gain_mega
+		= div_s64(chip->external_i_gain_mega, 1000);
+	chip->internal_i_gain_mega
+		= 1000000000 + (s64)iadc_gain_internal * GAIN_LSB_FACTOR;
+	chip->internal_i_gain_mega
+		= div_s64(chip->internal_i_gain_mega, 1000);
+	chip->v_gain_mega = 1000000000 + (s64)vadc_gain * GAIN_LSB_FACTOR;
+	chip->v_gain_mega = div_s64(chip->v_gain_mega, 1000);
+
+	val = 0;
+	rc = qnovo_write(chip, QNOVO_STRM_CTRL, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't write iadc bitsteam control rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_TR_IADC_OFFSET_0, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read iadc offset rc = %d\n", rc);
+		return rc;
+	}
+
+	val *= -1;
+	rc = qnovo_write(chip, QNOVO_TR_IADC_OFFSET_0, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't write iadc offset rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_read(chip, QNOVO_TR_IADC_OFFSET_1, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't read iadc offset rc = %d\n", rc);
+		return rc;
+	}
+
+	val *= -1;
+	rc = qnovo_write(chip, QNOVO_TR_IADC_OFFSET_1, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't write iadc offset rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qnovo_register_notifier(struct qnovo *chip)
+{
+	int rc;
+
+	chip->nb.notifier_call = qnovo_notifier_call;
+	rc = power_supply_reg_notifier(&chip->nb);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qnovo_determine_initial_status(struct qnovo *chip)
+{
+	status_change_work(&chip->status_change_work);
+	return 0;
+}
+
+static int qnovo_request_interrupts(struct qnovo *chip)
+{
+	int rc = 0;
+	int irq_ptrain_done = of_irq_get_byname(chip->dev->of_node,
+						"ptrain-done");
+
+	rc = devm_request_threaded_irq(chip->dev, irq_ptrain_done, NULL,
+					handle_ptrain_done,
+					IRQF_ONESHOT, "ptrain-done", chip);
+	if (rc < 0) {
+		pr_err("Couldn't request irq %d rc = %d\n",
+					irq_ptrain_done, rc);
+		return rc;
+	}
+	return rc;
+}
+
+static int qnovo_probe(struct platform_device *pdev)
+{
+	struct qnovo *chip;
+	int rc = 0;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->fv_uV_request = -EINVAL;
+	chip->fcc_uA_request = -EINVAL;
+	chip->dev = &pdev->dev;
+	mutex_init(&chip->write_lock);
+
+	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
+	if (!chip->regmap) {
+		pr_err("parent regmap is missing\n");
+		return -EINVAL;
+	}
+
+	rc = qnovo_parse_dt(chip);
+	if (rc < 0) {
+		pr_err("Couldn't parse device tree rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = qnovo_check_chg_version(chip);
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't check version rc=%d\n", rc);
+		return rc;
+	}
+
+	/* set driver data before resources request it */
+	platform_set_drvdata(pdev, chip);
+
+	chip->disable_votable = create_votable("QNOVO_DISABLE", VOTE_SET_ANY,
+					qnovo_disable_cb, chip);
+	if (IS_ERR(chip->disable_votable)) {
+		rc = PTR_ERR(chip->disable_votable);
+		goto cleanup;
+	}
+
+	INIT_WORK(&chip->status_change_work, status_change_work);
+
+	rc = qnovo_hw_init(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize hardware rc=%d\n", rc);
+		goto destroy_votable;
+	}
+
+	rc = qnovo_register_notifier(chip);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		goto unreg_notifier;
+	}
+
+	rc = qnovo_determine_initial_status(chip);
+	if (rc < 0) {
+		pr_err("Couldn't determine initial status rc=%d\n", rc);
+		goto unreg_notifier;
+	}
+
+	rc = qnovo_request_interrupts(chip);
+	if (rc < 0) {
+		pr_err("Couldn't request interrupts rc=%d\n", rc);
+		goto unreg_notifier;
+	}
+	chip->qnovo_class.name = "qnovo",
+	chip->qnovo_class.owner = THIS_MODULE,
+	chip->qnovo_class.class_attrs = qnovo_attributes;
+
+	rc = class_register(&chip->qnovo_class);
+	if (rc < 0) {
+		pr_err("couldn't register qnovo sysfs class rc = %d\n", rc);
+		goto unreg_notifier;
+	}
+
+	return rc;
+
+unreg_notifier:
+	power_supply_unreg_notifier(&chip->nb);
+destroy_votable:
+	destroy_votable(chip->disable_votable);
+cleanup:
+	platform_set_drvdata(pdev, NULL);
+	return rc;
+}
+
+static int qnovo_remove(struct platform_device *pdev)
+{
+	struct qnovo *chip = platform_get_drvdata(pdev);
+
+	class_unregister(&chip->qnovo_class);
+	power_supply_unreg_notifier(&chip->nb);
+	destroy_votable(chip->disable_votable);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static const struct of_device_id match_table[] = {
+	{ .compatible = "qcom,qpnp-qnovo", },
+	{ },
+};
+
+static struct platform_driver qnovo_driver = {
+	.driver		= {
+		.name		= "qcom,qnovo-driver",
+		.owner		= THIS_MODULE,
+		.of_match_table	= match_table,
+	},
+	.probe		= qnovo_probe,
+	.remove		= qnovo_remove,
+};
+module_platform_driver(qnovo_driver);
+
+MODULE_DESCRIPTION("QPNP Qnovo Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
new file mode 100644
index 0000000..dab7888
--- /dev/null
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -0,0 +1,2240 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/power_supply.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
+#include "smb-reg.h"
+#include "smb-lib.h"
+#include "storm-watch.h"
+#include "pmic-voter.h"
+
+#define SMB2_DEFAULT_WPWR_UW	8000000
+
+static struct smb_params v1_params = {
+	.fcc			= {
+		.name	= "fast charge current",
+		.reg	= FAST_CHARGE_CURRENT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 4500000,
+		.step_u	= 25000,
+	},
+	.fv			= {
+		.name	= "float voltage",
+		.reg	= FLOAT_VOLTAGE_CFG_REG,
+		.min_u	= 3487500,
+		.max_u	= 4920000,
+		.step_u	= 7500,
+	},
+	.usb_icl		= {
+		.name	= "usb input current limit",
+		.reg	= USBIN_CURRENT_LIMIT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 4800000,
+		.step_u	= 25000,
+	},
+	.icl_stat		= {
+		.name	= "input current limit status",
+		.reg	= ICL_STATUS_REG,
+		.min_u	= 0,
+		.max_u	= 4800000,
+		.step_u	= 25000,
+	},
+	.otg_cl			= {
+		.name	= "usb otg current limit",
+		.reg	= OTG_CURRENT_LIMIT_CFG_REG,
+		.min_u	= 250000,
+		.max_u	= 2000000,
+		.step_u	= 250000,
+	},
+	.dc_icl			= {
+		.name	= "dc input current limit",
+		.reg	= DCIN_CURRENT_LIMIT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 6000000,
+		.step_u	= 25000,
+	},
+	.dc_icl_pt_lv		= {
+		.name	= "dc icl PT <8V",
+		.reg	= ZIN_ICL_PT_REG,
+		.min_u	= 0,
+		.max_u	= 3000000,
+		.step_u	= 25000,
+	},
+	.dc_icl_pt_hv		= {
+		.name	= "dc icl PT >8V",
+		.reg	= ZIN_ICL_PT_HV_REG,
+		.min_u	= 0,
+		.max_u	= 3000000,
+		.step_u	= 25000,
+	},
+	.dc_icl_div2_lv		= {
+		.name	= "dc icl div2 <5.5V",
+		.reg	= ZIN_ICL_LV_REG,
+		.min_u	= 0,
+		.max_u	= 3000000,
+		.step_u	= 25000,
+	},
+	.dc_icl_div2_mid_lv	= {
+		.name	= "dc icl div2 5.5-6.5V",
+		.reg	= ZIN_ICL_MID_LV_REG,
+		.min_u	= 0,
+		.max_u	= 3000000,
+		.step_u	= 25000,
+	},
+	.dc_icl_div2_mid_hv	= {
+		.name	= "dc icl div2 6.5-8.0V",
+		.reg	= ZIN_ICL_MID_HV_REG,
+		.min_u	= 0,
+		.max_u	= 3000000,
+		.step_u	= 25000,
+	},
+	.dc_icl_div2_hv		= {
+		.name	= "dc icl div2 >8.0V",
+		.reg	= ZIN_ICL_HV_REG,
+		.min_u	= 0,
+		.max_u	= 3000000,
+		.step_u	= 25000,
+	},
+	.jeita_cc_comp		= {
+		.name	= "jeita fcc reduction",
+		.reg	= JEITA_CCCOMP_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 1575000,
+		.step_u	= 25000,
+	},
+	.step_soc_threshold[0]		= {
+		.name	= "step charge soc threshold 1",
+		.reg	= STEP_CHG_SOC_OR_BATT_V_TH1_REG,
+		.min_u	= 0,
+		.max_u	= 100,
+		.step_u	= 1,
+	},
+	.step_soc_threshold[1]		= {
+		.name	= "step charge soc threshold 2",
+		.reg	= STEP_CHG_SOC_OR_BATT_V_TH2_REG,
+		.min_u	= 0,
+		.max_u	= 100,
+		.step_u	= 1,
+	},
+	.step_soc_threshold[2]         = {
+		.name	= "step charge soc threshold 3",
+		.reg	= STEP_CHG_SOC_OR_BATT_V_TH3_REG,
+		.min_u	= 0,
+		.max_u	= 100,
+		.step_u	= 1,
+	},
+	.step_soc_threshold[3]         = {
+		.name	= "step charge soc threshold 4",
+		.reg	= STEP_CHG_SOC_OR_BATT_V_TH4_REG,
+		.min_u	= 0,
+		.max_u	= 100,
+		.step_u	= 1,
+	},
+	.step_soc			= {
+		.name	= "step charge soc",
+		.reg	= STEP_CHG_SOC_VBATT_V_REG,
+		.min_u	= 0,
+		.max_u	= 100,
+		.step_u	= 1,
+		.set_proc	= smblib_mapping_soc_from_field_value,
+	},
+	.step_cc_delta[0]	= {
+		.name	= "step charge current delta 1",
+		.reg	= STEP_CHG_CURRENT_DELTA1_REG,
+		.min_u	= 100000,
+		.max_u	= 3200000,
+		.step_u	= 100000,
+		.get_proc	= smblib_mapping_cc_delta_to_field_value,
+		.set_proc	= smblib_mapping_cc_delta_from_field_value,
+	},
+	.step_cc_delta[1]	= {
+		.name	= "step charge current delta 2",
+		.reg	= STEP_CHG_CURRENT_DELTA2_REG,
+		.min_u	= 100000,
+		.max_u	= 3200000,
+		.step_u	= 100000,
+		.get_proc	= smblib_mapping_cc_delta_to_field_value,
+		.set_proc	= smblib_mapping_cc_delta_from_field_value,
+	},
+	.step_cc_delta[2]	= {
+		.name	= "step charge current delta 3",
+		.reg	= STEP_CHG_CURRENT_DELTA3_REG,
+		.min_u	= 100000,
+		.max_u	= 3200000,
+		.step_u	= 100000,
+		.get_proc	= smblib_mapping_cc_delta_to_field_value,
+		.set_proc	= smblib_mapping_cc_delta_from_field_value,
+	},
+	.step_cc_delta[3]	= {
+		.name	= "step charge current delta 4",
+		.reg	= STEP_CHG_CURRENT_DELTA4_REG,
+		.min_u	= 100000,
+		.max_u	= 3200000,
+		.step_u	= 100000,
+		.get_proc	= smblib_mapping_cc_delta_to_field_value,
+		.set_proc	= smblib_mapping_cc_delta_from_field_value,
+	},
+	.step_cc_delta[4]	= {
+		.name	= "step charge current delta 5",
+		.reg	= STEP_CHG_CURRENT_DELTA5_REG,
+		.min_u	= 100000,
+		.max_u	= 3200000,
+		.step_u	= 100000,
+		.get_proc	= smblib_mapping_cc_delta_to_field_value,
+		.set_proc	= smblib_mapping_cc_delta_from_field_value,
+	},
+	.freq_buck		= {
+		.name	= "buck switching frequency",
+		.reg	= CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG,
+		.min_u	= 600,
+		.max_u	= 2000,
+		.step_u	= 200,
+	},
+	.freq_boost		= {
+		.name	= "boost switching frequency",
+		.reg	= CFG_BUCKBOOST_FREQ_SELECT_BOOST_REG,
+		.min_u	= 600,
+		.max_u	= 2000,
+		.step_u	= 200,
+	},
+};
+
+static struct smb_params pm660_params = {
+	.freq_buck		= {
+		.name	= "buck switching frequency",
+		.reg	= FREQ_CLK_DIV_REG,
+		.min_u	= 600,
+		.max_u	= 1600,
+		.set_proc = smblib_set_chg_freq,
+	},
+	.freq_boost		= {
+		.name	= "boost switching frequency",
+		.reg	= FREQ_CLK_DIV_REG,
+		.min_u	= 600,
+		.max_u	= 1600,
+		.set_proc = smblib_set_chg_freq,
+	},
+};
+
+#define STEP_CHARGING_MAX_STEPS	5
+struct smb_dt_props {
+	int	fcc_ua;
+	int	usb_icl_ua;
+	int	otg_cl_ua;
+	int	dc_icl_ua;
+	int	boost_threshold_ua;
+	int	fv_uv;
+	int	wipower_max_uw;
+	u32	step_soc_threshold[STEP_CHARGING_MAX_STEPS - 1];
+	s32	step_cc_delta[STEP_CHARGING_MAX_STEPS];
+	struct	device_node *revid_dev_node;
+	int	float_option;
+	int	chg_inhibit_thr_mv;
+	bool	no_battery;
+	bool	hvdcp_disable;
+	bool	auto_recharge_soc;
+};
+
+struct smb2 {
+	struct smb_charger	chg;
+	struct dentry		*dfs_root;
+	struct smb_dt_props	dt;
+	bool			bad_part;
+};
+
+static int __debug_mask;
+module_param_named(
+	debug_mask, __debug_mask, int, 0600
+);
+
+#define MICRO_1P5A	1500000
+#define MICRO_P1A	100000
+static int smb2_parse_dt(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct device_node *node = chg->dev->of_node;
+	int rc, byte_len;
+
+	if (!node) {
+		pr_err("device tree node missing\n");
+		return -EINVAL;
+	}
+
+	chg->step_chg_enabled = true;
+
+	if (of_property_count_u32_elems(node, "qcom,step-soc-thresholds")
+			!= STEP_CHARGING_MAX_STEPS - 1)
+		chg->step_chg_enabled = false;
+
+	rc = of_property_read_u32_array(node, "qcom,step-soc-thresholds",
+			chip->dt.step_soc_threshold,
+			STEP_CHARGING_MAX_STEPS - 1);
+	if (rc < 0)
+		chg->step_chg_enabled = false;
+
+	if (of_property_count_u32_elems(node, "qcom,step-current-deltas")
+			!= STEP_CHARGING_MAX_STEPS)
+		chg->step_chg_enabled = false;
+
+	rc = of_property_read_u32_array(node, "qcom,step-current-deltas",
+			chip->dt.step_cc_delta,
+			STEP_CHARGING_MAX_STEPS);
+	if (rc < 0)
+		chg->step_chg_enabled = false;
+
+	chip->dt.no_battery = of_property_read_bool(node,
+						"qcom,batteryless-platform");
+
+	chg->external_vconn = of_property_read_bool(node,
+						"qcom,external-vconn");
+
+	rc = of_property_read_u32(node,
+				"qcom,fcc-max-ua", &chip->dt.fcc_ua);
+	if (rc < 0)
+		chip->dt.fcc_ua = -EINVAL;
+
+	rc = of_property_read_u32(node,
+				"qcom,fv-max-uv", &chip->dt.fv_uv);
+	if (rc < 0)
+		chip->dt.fv_uv = -EINVAL;
+
+	rc = of_property_read_u32(node,
+				"qcom,usb-icl-ua", &chip->dt.usb_icl_ua);
+	if (rc < 0)
+		chip->dt.usb_icl_ua = -EINVAL;
+
+	rc = of_property_read_u32(node,
+				"qcom,otg-cl-ua", &chip->dt.otg_cl_ua);
+	if (rc < 0)
+		chip->dt.otg_cl_ua = MICRO_1P5A;
+
+	rc = of_property_read_u32(node,
+				"qcom,dc-icl-ua", &chip->dt.dc_icl_ua);
+	if (rc < 0)
+		chip->dt.dc_icl_ua = -EINVAL;
+
+	rc = of_property_read_u32(node,
+				"qcom,boost-threshold-ua",
+				&chip->dt.boost_threshold_ua);
+	if (rc < 0)
+		chip->dt.boost_threshold_ua = MICRO_P1A;
+
+	rc = of_property_read_u32(node, "qcom,wipower-max-uw",
+				&chip->dt.wipower_max_uw);
+	if (rc < 0)
+		chip->dt.wipower_max_uw = -EINVAL;
+
+	if (of_find_property(node, "qcom,thermal-mitigation", &byte_len)) {
+		chg->thermal_mitigation = devm_kzalloc(chg->dev, byte_len,
+			GFP_KERNEL);
+
+		if (chg->thermal_mitigation == NULL)
+			return -ENOMEM;
+
+		chg->thermal_levels = byte_len / sizeof(u32);
+		rc = of_property_read_u32_array(node,
+				"qcom,thermal-mitigation",
+				chg->thermal_mitigation,
+				chg->thermal_levels);
+		if (rc < 0) {
+			dev_err(chg->dev,
+				"Couldn't read threm limits rc = %d\n", rc);
+			return rc;
+		}
+	}
+
+	of_property_read_u32(node, "qcom,float-option", &chip->dt.float_option);
+	if (chip->dt.float_option < 0 || chip->dt.float_option > 4) {
+		pr_err("qcom,float-option is out of range [0, 4]\n");
+		return -EINVAL;
+	}
+
+	chip->dt.hvdcp_disable = of_property_read_bool(node,
+						"qcom,hvdcp-disable");
+
+	of_property_read_u32(node, "qcom,chg-inhibit-threshold-mv",
+				&chip->dt.chg_inhibit_thr_mv);
+	if ((chip->dt.chg_inhibit_thr_mv < 0 ||
+		chip->dt.chg_inhibit_thr_mv > 300)) {
+		pr_err("qcom,chg-inhibit-threshold-mv is incorrect\n");
+		return -EINVAL;
+	}
+
+	chip->dt.auto_recharge_soc = of_property_read_bool(node,
+						"qcom,auto-recharge-soc");
+
+	chg->micro_usb_mode = of_property_read_bool(node, "qcom,micro-usb");
+
+	chg->dcp_icl_ua = chip->dt.usb_icl_ua;
+
+	chg->suspend_input_on_debug_batt = of_property_read_bool(node,
+					"qcom,suspend-input-on-debug-batt");
+	return 0;
+}
+
+/************************
+ * USB PSY REGISTRATION *
+ ************************/
+
+static enum power_supply_property smb2_usb_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_VOLTAGE_MIN,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_PD_CURRENT_MAX,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
+	POWER_SUPPLY_PROP_TYPE,
+	POWER_SUPPLY_PROP_TYPEC_MODE,
+	POWER_SUPPLY_PROP_TYPEC_POWER_ROLE,
+	POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION,
+	POWER_SUPPLY_PROP_PD_ALLOWED,
+	POWER_SUPPLY_PROP_PD_ACTIVE,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+	POWER_SUPPLY_PROP_BOOST_CURRENT,
+	POWER_SUPPLY_PROP_PE_START,
+	POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
+};
+
+static int smb2_usb_get_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		if (chip->bad_part)
+			val->intval = 1;
+		else
+			rc = smblib_get_prop_usb_present(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		rc = smblib_get_prop_usb_online(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+		val->intval = chg->voltage_min_uv;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		val->intval = chg->voltage_max_uv;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		rc = smblib_get_prop_usb_voltage_now(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_CURRENT_MAX:
+		rc = smblib_get_prop_pd_current_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_get_prop_usb_current_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPE:
+		if (chip->bad_part)
+			val->intval = POWER_SUPPLY_TYPE_USB;
+		else
+			val->intval = chg->usb_psy_desc.type;
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_MODE:
+		if (chg->micro_usb_mode)
+			val->intval = POWER_SUPPLY_TYPEC_NONE;
+		else if (chip->bad_part)
+			val->intval = POWER_SUPPLY_TYPEC_SOURCE_DEFAULT;
+		else
+			rc = smblib_get_prop_typec_mode(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+		if (chg->micro_usb_mode)
+			val->intval = POWER_SUPPLY_TYPEC_PR_NONE;
+		else
+			rc = smblib_get_prop_typec_power_role(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION:
+		if (chg->micro_usb_mode)
+			val->intval = 0;
+		else
+			rc = smblib_get_prop_typec_cc_orientation(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_ALLOWED:
+		rc = smblib_get_prop_pd_allowed(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_ACTIVE:
+		val->intval = chg->pd_active;
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
+		rc = smblib_get_prop_input_current_settled(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_NOW:
+		rc = smblib_get_prop_usb_current_now(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_BOOST_CURRENT:
+		val->intval = chg->boost_current_ua;
+		break;
+	case POWER_SUPPLY_PROP_PD_IN_HARD_RESET:
+		rc = smblib_get_prop_pd_in_hard_reset(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED:
+		val->intval = chg->system_suspend_supported;
+		break;
+	case POWER_SUPPLY_PROP_PE_START:
+		rc = smblib_get_pe_start(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CTM_CURRENT_MAX:
+		val->intval = get_client_vote(chg->usb_icl_votable, CTM_VOTER);
+		break;
+	default:
+		pr_err("get prop %d is not supported in usb\n", psp);
+		rc = -EINVAL;
+		break;
+	}
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+		return -ENODATA;
+	}
+	return 0;
+}
+
+static int smb2_usb_set_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		const union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+		rc = smblib_set_prop_usb_voltage_min(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smblib_set_prop_usb_voltage_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_CURRENT_MAX:
+		rc = smblib_set_prop_pd_current_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_set_prop_usb_current_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+		rc = smblib_set_prop_typec_power_role(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_ACTIVE:
+		rc = smblib_set_prop_pd_active(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_IN_HARD_RESET:
+		rc = smblib_set_prop_pd_in_hard_reset(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED:
+		chg->system_suspend_supported = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_BOOST_CURRENT:
+		rc = smblib_set_prop_boost_current(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CTM_CURRENT_MAX:
+		rc = vote(chg->usb_icl_votable, CTM_VOTER,
+						val->intval >= 0, val->intval);
+		break;
+	default:
+		pr_err("set prop %d is not supported\n", psp);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int smb2_usb_prop_is_writeable(struct power_supply *psy,
+		enum power_supply_property psp)
+{
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+	case POWER_SUPPLY_PROP_CTM_CURRENT_MAX:
+		return 1;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int smb2_init_usb_psy(struct smb2 *chip)
+{
+	struct power_supply_config usb_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+
+	chg->usb_psy_desc.name			= "usb";
+	chg->usb_psy_desc.type			= POWER_SUPPLY_TYPE_UNKNOWN;
+	chg->usb_psy_desc.properties		= smb2_usb_props;
+	chg->usb_psy_desc.num_properties	= ARRAY_SIZE(smb2_usb_props);
+	chg->usb_psy_desc.get_property		= smb2_usb_get_prop;
+	chg->usb_psy_desc.set_property		= smb2_usb_set_prop;
+	chg->usb_psy_desc.property_is_writeable	= smb2_usb_prop_is_writeable;
+
+	usb_cfg.drv_data = chip;
+	usb_cfg.of_node = chg->dev->of_node;
+	chg->usb_psy = devm_power_supply_register(chg->dev,
+						  &chg->usb_psy_desc,
+						  &usb_cfg);
+	if (IS_ERR(chg->usb_psy)) {
+		pr_err("Couldn't register USB power supply\n");
+		return PTR_ERR(chg->usb_psy);
+	}
+
+	return 0;
+}
+
+/*****************************
+ * USB MAIN PSY REGISTRATION *
+ *****************************/
+
+static enum power_supply_property smb2_usb_main_props[] = {
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_ICL_REDUCTION,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_TYPE,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+	POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED,
+	POWER_SUPPLY_PROP_FCC_DELTA,
+	/*
+	 * TODO move the TEMP and TEMP_MAX properties here,
+	 * and update the thermal balancer to look here
+	 */
+};
+
+static int smb2_usb_main_get_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
+		break;
+	case POWER_SUPPLY_PROP_ICL_REDUCTION:
+		val->intval = chg->icl_reduction_ua;
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		rc = smblib_get_charge_param(chg, &chg->param.fcc,
+							&val->intval);
+		break;
+	case POWER_SUPPLY_PROP_TYPE:
+		val->intval = POWER_SUPPLY_TYPE_MAIN;
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
+		rc = smblib_get_prop_input_current_settled(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED:
+		rc = smblib_get_prop_input_voltage_settled(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_FCC_DELTA:
+		rc = smblib_get_prop_fcc_delta(chg, val);
+		break;
+	default:
+		pr_debug("get prop %d is not supported in usb-main\n", psp);
+		rc = -EINVAL;
+		break;
+	}
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+		return -ENODATA;
+	}
+	return 0;
+}
+
+static int smb2_usb_main_set_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		const union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_ICL_REDUCTION:
+		rc = smblib_set_icl_reduction(chg, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		rc = smblib_set_charge_param(chg, &chg->param.fcc, val->intval);
+		break;
+	default:
+		pr_err("set prop %d is not supported\n", psp);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static const struct power_supply_desc usb_main_psy_desc = {
+	.name		= "main",
+	.type		= POWER_SUPPLY_TYPE_MAIN,
+	.properties	= smb2_usb_main_props,
+	.num_properties	= ARRAY_SIZE(smb2_usb_main_props),
+	.get_property	= smb2_usb_main_get_prop,
+	.set_property	= smb2_usb_main_set_prop,
+};
+
+static int smb2_init_usb_main_psy(struct smb2 *chip)
+{
+	struct power_supply_config usb_main_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+
+	usb_main_cfg.drv_data = chip;
+	usb_main_cfg.of_node = chg->dev->of_node;
+	chg->usb_main_psy = devm_power_supply_register(chg->dev,
+						  &usb_main_psy_desc,
+						  &usb_main_cfg);
+	if (IS_ERR(chg->usb_main_psy)) {
+		pr_err("Couldn't register USB main power supply\n");
+		return PTR_ERR(chg->usb_main_psy);
+	}
+
+	return 0;
+}
+
+/*************************
+ * DC PSY REGISTRATION   *
+ *************************/
+
+static enum power_supply_property smb2_dc_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static int smb2_dc_get_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		rc = smblib_get_prop_dc_present(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		rc = smblib_get_prop_dc_online(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_get_prop_dc_current_max(chg, val);
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+		return -ENODATA;
+	}
+	return 0;
+}
+
+static int smb2_dc_set_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		const union power_supply_propval *val)
+{
+	struct smb2 *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_set_prop_dc_current_max(chg, val);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int smb2_dc_prop_is_writeable(struct power_supply *psy,
+		enum power_supply_property psp)
+{
+	int rc;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = 1;
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+
+	return rc;
+}
+
+static const struct power_supply_desc dc_psy_desc = {
+	.name = "dc",
+	.type = POWER_SUPPLY_TYPE_WIPOWER,
+	.properties = smb2_dc_props,
+	.num_properties = ARRAY_SIZE(smb2_dc_props),
+	.get_property = smb2_dc_get_prop,
+	.set_property = smb2_dc_set_prop,
+	.property_is_writeable = smb2_dc_prop_is_writeable,
+};
+
+static int smb2_init_dc_psy(struct smb2 *chip)
+{
+	struct power_supply_config dc_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+
+	dc_cfg.drv_data = chip;
+	dc_cfg.of_node = chg->dev->of_node;
+	chg->dc_psy = devm_power_supply_register(chg->dev,
+						  &dc_psy_desc,
+						  &dc_cfg);
+	if (IS_ERR(chg->dc_psy)) {
+		pr_err("Couldn't register USB power supply\n");
+		return PTR_ERR(chg->dc_psy);
+	}
+
+	return 0;
+}
+
+/*************************
+ * BATT PSY REGISTRATION *
+ *************************/
+
+static enum power_supply_property smb2_batt_props[] = {
+	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
+	POWER_SUPPLY_PROP_CHARGER_TEMP,
+	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_TEMP,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_STEP_CHARGING_STEP,
+	POWER_SUPPLY_PROP_CHARGE_DONE,
+	POWER_SUPPLY_PROP_PARALLEL_DISABLE,
+	POWER_SUPPLY_PROP_SET_SHIP_MODE,
+	POWER_SUPPLY_PROP_DIE_HEALTH,
+	POWER_SUPPLY_PROP_RERUN_AICL,
+	POWER_SUPPLY_PROP_DP_DM,
+};
+
+static int smb2_batt_get_prop(struct power_supply *psy,
+		enum power_supply_property psp,
+		union power_supply_propval *val)
+{
+	struct smb_charger *chg = power_supply_get_drvdata(psy);
+	int rc = 0;
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_STATUS:
+		rc = smblib_get_prop_batt_status(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		rc = smblib_get_prop_batt_health(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		rc = smblib_get_prop_batt_present(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smblib_get_prop_input_suspend(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		rc = smblib_get_prop_batt_charge_type(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		rc = smblib_get_prop_batt_capacity(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+		rc = smblib_get_prop_system_temp_level(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP:
+		rc = smblib_get_prop_charger_temp(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
+		rc = smblib_get_prop_charger_temp_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+		rc = smblib_get_prop_input_current_limited(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
+		val->intval = chg->step_chg_enabled;
+		break;
+	case POWER_SUPPLY_PROP_STEP_CHARGING_STEP:
+		rc = smblib_get_prop_step_chg_step(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		rc = smblib_get_prop_batt_voltage_now(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		val->intval = get_client_vote(chg->fv_votable, DEFAULT_VOTER);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
+		val->intval = chg->qnovo_fv_uv;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		rc = smblib_get_prop_batt_current_now(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_QNOVO:
+		val->intval = chg->qnovo_fcc_ua;
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		val->intval = get_client_vote(chg->fcc_votable,
+					      DEFAULT_VOTER);
+		break;
+	case POWER_SUPPLY_PROP_TEMP:
+		rc = smblib_get_prop_batt_temp(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TECHNOLOGY:
+		val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_DONE:
+		rc = smblib_get_prop_batt_charge_done(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+		val->intval = get_client_vote(chg->pl_disable_votable,
+					      USER_VOTER);
+		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		/* Not in ship mode as long as device is active */
+		val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_DIE_HEALTH:
+		rc = smblib_get_prop_die_health(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_DP_DM:
+		val->intval = chg->pulse_cnt;
+		break;
+	case POWER_SUPPLY_PROP_RERUN_AICL:
+		val->intval = 0;
+		break;
+	default:
+		pr_err("batt power supply prop %d not supported\n", psp);
+		return -EINVAL;
+	}
+
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+		return -ENODATA;
+	}
+
+	return 0;
+}
+
+static int smb2_batt_set_prop(struct power_supply *psy,
+		enum power_supply_property prop,
+		const union power_supply_propval *val)
+{
+	int rc = 0;
+	struct smb_charger *chg = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smblib_set_prop_input_suspend(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+		rc = smblib_set_prop_system_temp_level(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		rc = smblib_set_prop_batt_capacity(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+		vote(chg->pl_disable_votable, USER_VOTER, (bool)val->intval, 0);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		vote(chg->fv_votable, DEFAULT_VOTER, true, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
+		chg->qnovo_fv_uv = val->intval;
+		rc = rerun_election(chg->fv_votable);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_QNOVO:
+		chg->qnovo_fcc_ua = val->intval;
+		rc = rerun_election(chg->fcc_votable);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		vote(chg->fcc_votable, DEFAULT_VOTER, true, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		/* Not in ship mode as long as the device is active */
+		if (!val->intval)
+			break;
+		if (chg->pl.psy)
+			power_supply_set_property(chg->pl.psy,
+				POWER_SUPPLY_PROP_SET_SHIP_MODE, val);
+		rc = smblib_set_prop_ship_mode(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_RERUN_AICL:
+		rc = smblib_rerun_aicl(chg);
+		break;
+	case POWER_SUPPLY_PROP_DP_DM:
+		rc = smblib_dp_dm(chg, val->intval);
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int smb2_batt_prop_is_writeable(struct power_supply *psy,
+		enum power_supply_property psp)
+{
+	switch (psp) {
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+	case POWER_SUPPLY_PROP_CAPACITY:
+	case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+	case POWER_SUPPLY_PROP_DP_DM:
+	case POWER_SUPPLY_PROP_RERUN_AICL:
+		return 1;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static const struct power_supply_desc batt_psy_desc = {
+	.name = "battery",
+	.type = POWER_SUPPLY_TYPE_BATTERY,
+	.properties = smb2_batt_props,
+	.num_properties = ARRAY_SIZE(smb2_batt_props),
+	.get_property = smb2_batt_get_prop,
+	.set_property = smb2_batt_set_prop,
+	.property_is_writeable = smb2_batt_prop_is_writeable,
+};
+
+static int smb2_init_batt_psy(struct smb2 *chip)
+{
+	struct power_supply_config batt_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	batt_cfg.drv_data = chg;
+	batt_cfg.of_node = chg->dev->of_node;
+	chg->batt_psy = devm_power_supply_register(chg->dev,
+						   &batt_psy_desc,
+						   &batt_cfg);
+	if (IS_ERR(chg->batt_psy)) {
+		pr_err("Couldn't register battery power supply\n");
+		return PTR_ERR(chg->batt_psy);
+	}
+
+	return rc;
+}
+
+/******************************
+ * VBUS REGULATOR REGISTRATION *
+ ******************************/
+
+struct regulator_ops smb2_vbus_reg_ops = {
+	.enable = smblib_vbus_regulator_enable,
+	.disable = smblib_vbus_regulator_disable,
+	.is_enabled = smblib_vbus_regulator_is_enabled,
+};
+
+static int smb2_init_vbus_regulator(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct regulator_config cfg = {};
+	int rc = 0;
+
+	chg->vbus_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vbus_vreg),
+				      GFP_KERNEL);
+	if (!chg->vbus_vreg)
+		return -ENOMEM;
+
+	cfg.dev = chg->dev;
+	cfg.driver_data = chip;
+
+	chg->vbus_vreg->rdesc.owner = THIS_MODULE;
+	chg->vbus_vreg->rdesc.type = REGULATOR_VOLTAGE;
+	chg->vbus_vreg->rdesc.ops = &smb2_vbus_reg_ops;
+	chg->vbus_vreg->rdesc.of_match = "qcom,smb2-vbus";
+	chg->vbus_vreg->rdesc.name = "qcom,smb2-vbus";
+
+	chg->vbus_vreg->rdev = devm_regulator_register(chg->dev,
+						&chg->vbus_vreg->rdesc, &cfg);
+	if (IS_ERR(chg->vbus_vreg->rdev)) {
+		rc = PTR_ERR(chg->vbus_vreg->rdev);
+		chg->vbus_vreg->rdev = NULL;
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't register VBUS regualtor rc=%d\n", rc);
+	}
+
+	return rc;
+}
+
+/******************************
+ * VCONN REGULATOR REGISTRATION *
+ ******************************/
+
+struct regulator_ops smb2_vconn_reg_ops = {
+	.enable = smblib_vconn_regulator_enable,
+	.disable = smblib_vconn_regulator_disable,
+	.is_enabled = smblib_vconn_regulator_is_enabled,
+};
+
+static int smb2_init_vconn_regulator(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct regulator_config cfg = {};
+	int rc = 0;
+
+	chg->vconn_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vconn_vreg),
+				      GFP_KERNEL);
+	if (!chg->vconn_vreg)
+		return -ENOMEM;
+
+	cfg.dev = chg->dev;
+	cfg.driver_data = chip;
+
+	chg->vconn_vreg->rdesc.owner = THIS_MODULE;
+	chg->vconn_vreg->rdesc.type = REGULATOR_VOLTAGE;
+	chg->vconn_vreg->rdesc.ops = &smb2_vconn_reg_ops;
+	chg->vconn_vreg->rdesc.of_match = "qcom,smb2-vconn";
+	chg->vconn_vreg->rdesc.name = "qcom,smb2-vconn";
+
+	chg->vconn_vreg->rdev = devm_regulator_register(chg->dev,
+						&chg->vconn_vreg->rdesc, &cfg);
+	if (IS_ERR(chg->vconn_vreg->rdev)) {
+		rc = PTR_ERR(chg->vconn_vreg->rdev);
+		chg->vconn_vreg->rdev = NULL;
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't register VCONN regualtor rc=%d\n", rc);
+	}
+
+	return rc;
+}
+
+/***************************
+ * HARDWARE INITIALIZATION *
+ ***************************/
+static int smb2_config_step_charging(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+	int i;
+
+	if (!chg->step_chg_enabled)
+		return rc;
+
+	for (i = 0; i < STEP_CHARGING_MAX_STEPS - 1; i++) {
+		rc = smblib_set_charge_param(chg,
+					     &chg->param.step_soc_threshold[i],
+					     chip->dt.step_soc_threshold[i]);
+		if (rc < 0) {
+			pr_err("Couldn't configure soc thresholds rc = %d\n",
+				rc);
+			goto err_out;
+		}
+	}
+
+	for (i = 0; i < STEP_CHARGING_MAX_STEPS; i++) {
+		rc = smblib_set_charge_param(chg, &chg->param.step_cc_delta[i],
+					     chip->dt.step_cc_delta[i]);
+		if (rc < 0) {
+			pr_err("Couldn't configure cc delta rc = %d\n",
+				rc);
+			goto err_out;
+		}
+	}
+
+	rc = smblib_write(chg, STEP_CHG_UPDATE_REQUEST_TIMEOUT_CFG_REG,
+			  STEP_CHG_UPDATE_REQUEST_TIMEOUT_40S);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure soc request timeout reg rc=%d\n",
+			 rc);
+		goto err_out;
+	}
+
+	rc = smblib_write(chg, STEP_CHG_UPDATE_FAIL_TIMEOUT_CFG_REG,
+			  STEP_CHG_UPDATE_FAIL_TIMEOUT_120S);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure soc fail timeout reg rc=%d\n",
+			rc);
+		goto err_out;
+	}
+
+	/*
+	 *  enable step charging, source soc, standard mode, go to final
+	 *  state in case of failure.
+	 */
+	rc = smblib_write(chg, CHGR_STEP_CHG_MODE_CFG_REG,
+			       STEP_CHARGING_ENABLE_BIT |
+			       STEP_CHARGING_SOURCE_SELECT_BIT |
+			       STEP_CHARGING_SOC_FAIL_OPTION_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure charger rc=%d\n", rc);
+		goto err_out;
+	}
+
+	return 0;
+err_out:
+	chg->step_chg_enabled = false;
+	return rc;
+}
+
+static int smb2_config_wipower_input_power(struct smb2 *chip, int uw)
+{
+	int rc;
+	int ua;
+	struct smb_charger *chg = &chip->chg;
+	s64 nw = (s64)uw * 1000;
+
+	if (uw < 0)
+		return 0;
+
+	ua = div_s64(nw, ZIN_ICL_PT_MAX_MV);
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl_pt_lv, ua);
+	if (rc < 0) {
+		pr_err("Couldn't configure dc_icl_pt_lv rc = %d\n", rc);
+		return rc;
+	}
+
+	ua = div_s64(nw, ZIN_ICL_PT_HV_MAX_MV);
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl_pt_hv, ua);
+	if (rc < 0) {
+		pr_err("Couldn't configure dc_icl_pt_hv rc = %d\n", rc);
+		return rc;
+	}
+
+	ua = div_s64(nw, ZIN_ICL_LV_MAX_MV);
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl_div2_lv, ua);
+	if (rc < 0) {
+		pr_err("Couldn't configure dc_icl_div2_lv rc = %d\n", rc);
+		return rc;
+	}
+
+	ua = div_s64(nw, ZIN_ICL_MID_LV_MAX_MV);
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl_div2_mid_lv, ua);
+	if (rc < 0) {
+		pr_err("Couldn't configure dc_icl_div2_mid_lv rc = %d\n", rc);
+		return rc;
+	}
+
+	ua = div_s64(nw, ZIN_ICL_MID_HV_MAX_MV);
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl_div2_mid_hv, ua);
+	if (rc < 0) {
+		pr_err("Couldn't configure dc_icl_div2_mid_hv rc = %d\n", rc);
+		return rc;
+	}
+
+	ua = div_s64(nw, ZIN_ICL_HV_MAX_MV);
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl_div2_hv, ua);
+	if (rc < 0) {
+		pr_err("Couldn't configure dc_icl_div2_hv rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int smb2_configure_typec(struct smb_charger *chg)
+{
+	int rc;
+
+	/*
+	 * trigger the usb-typec-change interrupt only when the CC state
+	 * changes
+	 */
+	rc = smblib_write(chg, TYPE_C_INTRPT_ENB_REG,
+			  TYPEC_CCSTATE_CHANGE_INT_EN_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure Type-C interrupts rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure power role for dual-role */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 TYPEC_POWER_ROLE_CMD_MASK, 0);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure power role for DRP rc=%d\n", rc);
+		return rc;
+	}
+
+	/*
+	 * disable Type-C factory mode and stay in Attached.SRC state when VCONN
+	 * over-current happens
+	 */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+			FACTORY_MODE_DETECTION_EN_BIT | VCONN_OC_CFG_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure Type-C rc=%d\n", rc);
+		return rc;
+	}
+
+	/* increase VCONN softstart */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_2_REG,
+			VCONN_SOFTSTART_CFG_MASK, VCONN_SOFTSTART_CFG_MASK);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't increase VCONN softstart rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/* disable try.SINK mode */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_3_REG, EN_TRYSINK_MODE_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't set TRYSINK_MODE rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int smb2_disable_typec(struct smb_charger *chg)
+{
+	int rc;
+
+	/* configure FSM in idle state */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+			TYPEC_DISABLE_CMD_BIT, TYPEC_DISABLE_CMD_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't put FSM in idle rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure micro USB mode */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+			TYPE_C_OR_U_USB_BIT, TYPE_C_OR_U_USB_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't enable micro USB mode rc=%d\n", rc);
+		return rc;
+	}
+
+	/* release FSM from idle state */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+			TYPEC_DISABLE_CMD_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't release FSM rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int smb2_init_hw(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc;
+	u8 stat;
+
+	if (chip->dt.no_battery)
+		chg->fake_capacity = 50;
+
+	if (chip->dt.fcc_ua < 0)
+		smblib_get_charge_param(chg, &chg->param.fcc, &chip->dt.fcc_ua);
+
+	if (chip->dt.fv_uv < 0)
+		smblib_get_charge_param(chg, &chg->param.fv, &chip->dt.fv_uv);
+
+	smblib_get_charge_param(chg, &chg->param.usb_icl,
+				&chg->default_icl_ua);
+	if (chip->dt.usb_icl_ua < 0)
+		chip->dt.usb_icl_ua = chg->default_icl_ua;
+
+	if (chip->dt.dc_icl_ua < 0)
+		smblib_get_charge_param(chg, &chg->param.dc_icl,
+					&chip->dt.dc_icl_ua);
+
+	/* set a slower soft start setting for OTG */
+	rc = smblib_masked_write(chg, DC_ENG_SSUPPLY_CFG2_REG,
+				ENG_SSUPPLY_IVREF_OTG_SS_MASK, OTG_SS_SLOW);
+	if (rc < 0) {
+		pr_err("Couldn't set otg soft start rc=%d\n", rc);
+		return rc;
+	}
+
+	/* set OTG current limit */
+	rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
+							chip->dt.otg_cl_ua);
+	if (rc < 0) {
+		pr_err("Couldn't set otg current limit rc=%d\n", rc);
+		return rc;
+	}
+
+	chg->boost_threshold_ua = chip->dt.boost_threshold_ua;
+
+	rc = smblib_read(chg, APSD_RESULT_STATUS_REG, &stat);
+	if (rc < 0) {
+		pr_err("Couldn't read APSD_RESULT_STATUS rc=%d\n", rc);
+		return rc;
+	}
+
+	smblib_rerun_apsd_if_required(chg);
+
+	/* clear the ICL override if it is set */
+	if (smblib_icl_override(chg, false) < 0) {
+		pr_err("Couldn't disable ICL override rc=%d\n", rc);
+		return rc;
+	}
+
+	/* votes must be cast before configuring software control */
+	/* vote 0mA on usb_icl for non battery platforms */
+	vote(chg->usb_icl_votable,
+		DEFAULT_VOTER, chip->dt.no_battery, 0);
+	vote(chg->dc_suspend_votable,
+		DEFAULT_VOTER, chip->dt.no_battery, 0);
+	vote(chg->fcc_votable,
+		DEFAULT_VOTER, true, chip->dt.fcc_ua);
+	vote(chg->fv_votable,
+		DEFAULT_VOTER, true, chip->dt.fv_uv);
+	vote(chg->dc_icl_votable,
+		DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
+	vote(chg->hvdcp_disable_votable_indirect, DEFAULT_VOTER,
+		chip->dt.hvdcp_disable, 0);
+	vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
+			true, 0);
+	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER,
+			true, 0);
+	vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+			true, 0);
+	vote(chg->pd_disallowed_votable_indirect, MICRO_USB_VOTER,
+			chg->micro_usb_mode, 0);
+	vote(chg->hvdcp_enable_votable, MICRO_USB_VOTER,
+			chg->micro_usb_mode, 0);
+
+	/*
+	 * AICL configuration:
+	 * start from min and AICL ADC disable
+	 */
+	rc = smblib_masked_write(chg, USBIN_AICL_OPTIONS_CFG_REG,
+			USBIN_AICL_START_AT_MAX_BIT
+				| USBIN_AICL_ADC_EN_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure AICL rc=%d\n", rc);
+		return rc;
+	}
+
+	/* Configure charge enable for software control; active high */
+	rc = smblib_masked_write(chg, CHGR_CFG2_REG,
+				 CHG_EN_POLARITY_BIT |
+				 CHG_EN_SRC_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure charger rc=%d\n", rc);
+		return rc;
+	}
+
+	/* enable the charging path */
+	rc = vote(chg->chg_disable_votable, DEFAULT_VOTER, false, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't enable charging rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chg->micro_usb_mode)
+		rc = smb2_disable_typec(chg);
+	else
+		rc = smb2_configure_typec(chg);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure Type-C interrupts rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure VCONN for software control */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 VCONN_EN_SRC_BIT | VCONN_EN_VALUE_BIT,
+				 VCONN_EN_SRC_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure VCONN for SW control rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure VBUS for software control */
+	rc = smblib_masked_write(chg, OTG_CFG_REG, OTG_EN_SRC_CFG_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't configure VBUS for SW control rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smblib_masked_write(chg, QNOVO_PT_ENABLE_CMD_REG,
+			QNOVO_PT_ENABLE_CMD_BIT, QNOVO_PT_ENABLE_CMD_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't enable qnovo rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure step charging */
+	rc = smb2_config_step_charging(chip);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure step charging rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/* configure wipower watts */
+	rc = smb2_config_wipower_input_power(chip, chip->dt.wipower_max_uw);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure wipower rc=%d\n", rc);
+		return rc;
+	}
+
+	/* disable SW STAT override */
+	rc = smblib_masked_write(chg, STAT_CFG_REG,
+				 STAT_SW_OVERRIDE_CFG_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't disable SW STAT override rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/* configure float charger options */
+	switch (chip->dt.float_option) {
+	case 1:
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+				FLOAT_OPTIONS_MASK, 0);
+		break;
+	case 2:
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+				FLOAT_OPTIONS_MASK, FORCE_FLOAT_SDP_CFG_BIT);
+		break;
+	case 3:
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+				FLOAT_OPTIONS_MASK, FLOAT_DIS_CHGING_CFG_BIT);
+		break;
+	case 4:
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+				FLOAT_OPTIONS_MASK, SUSPEND_FLOAT_CFG_BIT);
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure float charger options rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	switch (chip->dt.chg_inhibit_thr_mv) {
+	case 50:
+		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+				CHARGE_INHIBIT_THRESHOLD_MASK,
+				CHARGE_INHIBIT_THRESHOLD_50MV);
+		break;
+	case 100:
+		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+				CHARGE_INHIBIT_THRESHOLD_MASK,
+				CHARGE_INHIBIT_THRESHOLD_100MV);
+		break;
+	case 200:
+		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+				CHARGE_INHIBIT_THRESHOLD_MASK,
+				CHARGE_INHIBIT_THRESHOLD_200MV);
+		break;
+	case 300:
+		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+				CHARGE_INHIBIT_THRESHOLD_MASK,
+				CHARGE_INHIBIT_THRESHOLD_300MV);
+		break;
+	case 0:
+		rc = smblib_masked_write(chg, CHGR_CFG2_REG,
+				CHARGER_INHIBIT_BIT, 0);
+	default:
+		break;
+	}
+
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure charge inhibit threshold rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (chip->dt.auto_recharge_soc) {
+		rc = smblib_masked_write(chg, FG_UPDATE_CFG_2_SEL_REG,
+				SOC_LT_CHG_RECHARGE_THRESH_SEL_BIT |
+				VBT_LT_CHG_RECHARGE_THRESH_SEL_BIT,
+				VBT_LT_CHG_RECHARGE_THRESH_SEL_BIT);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't configure FG_UPDATE_CFG2_SEL_REG rc=%d\n",
+				rc);
+			return rc;
+		}
+	} else {
+		rc = smblib_masked_write(chg, FG_UPDATE_CFG_2_SEL_REG,
+				SOC_LT_CHG_RECHARGE_THRESH_SEL_BIT |
+				VBT_LT_CHG_RECHARGE_THRESH_SEL_BIT,
+				SOC_LT_CHG_RECHARGE_THRESH_SEL_BIT);
+		if (rc < 0) {
+			dev_err(chg->dev, "Couldn't configure FG_UPDATE_CFG2_SEL_REG rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int smb2_chg_config_init(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct pmic_revid_data *pmic_rev_id;
+	struct device_node *revid_dev_node;
+
+	revid_dev_node = of_parse_phandle(chip->chg.dev->of_node,
+					  "qcom,pmic-revid", 0);
+	if (!revid_dev_node) {
+		pr_err("Missing qcom,pmic-revid property\n");
+		return -EINVAL;
+	}
+
+	pmic_rev_id = get_revid_data(revid_dev_node);
+	if (IS_ERR_OR_NULL(pmic_rev_id)) {
+		/*
+		 * the revid peripheral must be registered, any failure
+		 * here only indicates that the rev-id module has not
+		 * probed yet.
+		 */
+		return -EPROBE_DEFER;
+	}
+
+	switch (pmic_rev_id->pmic_subtype) {
+	case PMI8998_SUBTYPE:
+		chip->chg.smb_version = PMI8998_SUBTYPE;
+		chip->chg.wa_flags |= BOOST_BACK_WA | QC_AUTH_INTERRUPT_WA_BIT;
+		if (pmic_rev_id->rev4 == PMI8998_V1P1_REV4) /* PMI rev 1.1 */
+			chg->wa_flags |= QC_CHARGER_DETECTION_WA_BIT;
+		if (pmic_rev_id->rev4 == PMI8998_V2P0_REV4) /* PMI rev 2.0 */
+			chg->wa_flags |= TYPEC_CC2_REMOVAL_WA_BIT;
+		chg->chg_freq.freq_5V		= 600;
+		chg->chg_freq.freq_6V_8V	= 800;
+		chg->chg_freq.freq_9V		= 1000;
+		chg->chg_freq.freq_12V		= 1200;
+		chg->chg_freq.freq_removal	= 1000;
+		chg->chg_freq.freq_below_otg_threshold = 2000;
+		chg->chg_freq.freq_above_otg_threshold = 800;
+		break;
+	case PM660_SUBTYPE:
+		chip->chg.smb_version = PM660_SUBTYPE;
+		chip->chg.wa_flags |= BOOST_BACK_WA;
+		chg->param.freq_buck = pm660_params.freq_buck;
+		chg->param.freq_boost = pm660_params.freq_boost;
+		chg->chg_freq.freq_5V		= 600;
+		chg->chg_freq.freq_6V_8V	= 800;
+		chg->chg_freq.freq_9V		= 1050;
+		chg->chg_freq.freq_12V		= 1200;
+		chg->chg_freq.freq_removal	= 1050;
+		chg->chg_freq.freq_below_otg_threshold = 1600;
+		chg->chg_freq.freq_above_otg_threshold = 800;
+		break;
+	default:
+		pr_err("PMIC subtype %d not supported\n",
+				pmic_rev_id->pmic_subtype);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/****************************
+ * DETERMINE INITIAL STATUS *
+ ****************************/
+
+static int smb2_determine_initial_status(struct smb2 *chip)
+{
+	struct smb_irq_data irq_data = {chip, "determine-initial-status"};
+	struct smb_charger *chg = &chip->chg;
+
+	if (chg->bms_psy)
+		smblib_suspend_on_debug_battery(chg);
+	smblib_handle_usb_plugin(0, &irq_data);
+	smblib_handle_usb_typec_change(0, &irq_data);
+	smblib_handle_usb_source_change(0, &irq_data);
+	smblib_handle_chg_state_change(0, &irq_data);
+	smblib_handle_icl_change(0, &irq_data);
+	smblib_handle_step_chg_state_change(0, &irq_data);
+	smblib_handle_step_chg_soc_update_request(0, &irq_data);
+
+	return 0;
+}
+
+/**************************
+ * INTERRUPT REGISTRATION *
+ **************************/
+
+static struct smb_irq_info smb2_irqs[] = {
+/* CHARGER IRQs */
+	[CHG_ERROR_IRQ] = {
+		.name		= "chg-error",
+		.handler	= smblib_handle_debug,
+	},
+	[CHG_STATE_CHANGE_IRQ] = {
+		.name		= "chg-state-change",
+		.handler	= smblib_handle_chg_state_change,
+		.wake		= true,
+	},
+	[STEP_CHG_STATE_CHANGE_IRQ] = {
+		.name		= "step-chg-state-change",
+		.handler	= smblib_handle_step_chg_state_change,
+		.wake		= true,
+	},
+	[STEP_CHG_SOC_UPDATE_FAIL_IRQ] = {
+		.name		= "step-chg-soc-update-fail",
+		.handler	= smblib_handle_step_chg_soc_update_fail,
+		.wake		= true,
+	},
+	[STEP_CHG_SOC_UPDATE_REQ_IRQ] = {
+		.name		= "step-chg-soc-update-request",
+		.handler	= smblib_handle_step_chg_soc_update_request,
+		.wake		= true,
+	},
+/* OTG IRQs */
+	[OTG_FAIL_IRQ] = {
+		.name		= "otg-fail",
+		.handler	= smblib_handle_debug,
+	},
+	[OTG_OVERCURRENT_IRQ] = {
+		.name		= "otg-overcurrent",
+		.handler	= smblib_handle_otg_overcurrent,
+	},
+	[OTG_OC_DIS_SW_STS_IRQ] = {
+		.name		= "otg-oc-dis-sw-sts",
+		.handler	= smblib_handle_debug,
+	},
+	[TESTMODE_CHANGE_DET_IRQ] = {
+		.name		= "testmode-change-detect",
+		.handler	= smblib_handle_debug,
+	},
+/* BATTERY IRQs */
+	[BATT_TEMP_IRQ] = {
+		.name		= "bat-temp",
+		.handler	= smblib_handle_batt_temp_changed,
+	},
+	[BATT_OCP_IRQ] = {
+		.name		= "bat-ocp",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_OV_IRQ] = {
+		.name		= "bat-ov",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_LOW_IRQ] = {
+		.name		= "bat-low",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_THERM_ID_MISS_IRQ] = {
+		.name		= "bat-therm-or-id-missing",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_TERM_MISS_IRQ] = {
+		.name		= "bat-terminal-missing",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+/* USB INPUT IRQs */
+	[USBIN_COLLAPSE_IRQ] = {
+		.name		= "usbin-collapse",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_LT_3P6V_IRQ] = {
+		.name		= "usbin-lt-3p6v",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_UV_IRQ] = {
+		.name		= "usbin-uv",
+		.handler	= smblib_handle_usbin_uv,
+	},
+	[USBIN_OV_IRQ] = {
+		.name		= "usbin-ov",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_PLUGIN_IRQ] = {
+		.name		= "usbin-plugin",
+		.handler	= smblib_handle_usb_plugin,
+		.wake		= true,
+	},
+	[USBIN_SRC_CHANGE_IRQ] = {
+		.name		= "usbin-src-change",
+		.handler	= smblib_handle_usb_source_change,
+		.wake		= true,
+	},
+	[USBIN_ICL_CHANGE_IRQ] = {
+		.name		= "usbin-icl-change",
+		.handler	= smblib_handle_icl_change,
+		.wake		= true,
+	},
+	[TYPE_C_CHANGE_IRQ] = {
+		.name		= "type-c-change",
+		.handler	= smblib_handle_usb_typec_change,
+		.wake		= true,
+	},
+/* DC INPUT IRQs */
+	[DCIN_COLLAPSE_IRQ] = {
+		.name		= "dcin-collapse",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_LT_3P6V_IRQ] = {
+		.name		= "dcin-lt-3p6v",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_UV_IRQ] = {
+		.name		= "dcin-uv",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_OV_IRQ] = {
+		.name		= "dcin-ov",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_PLUGIN_IRQ] = {
+		.name		= "dcin-plugin",
+		.handler	= smblib_handle_dc_plugin,
+		.wake		= true,
+	},
+	[DIV2_EN_DG_IRQ] = {
+		.name		= "div2-en-dg",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_ICL_CHANGE_IRQ] = {
+		.name		= "dcin-icl-change",
+		.handler	= smblib_handle_debug,
+	},
+/* MISCELLANEOUS IRQs */
+	[WDOG_SNARL_IRQ] = {
+		.name		= "wdog-snarl",
+		.handler	= NULL,
+	},
+	[WDOG_BARK_IRQ] = {
+		.name		= "wdog-bark",
+		.handler	= NULL,
+	},
+	[AICL_FAIL_IRQ] = {
+		.name		= "aicl-fail",
+		.handler	= smblib_handle_debug,
+	},
+	[AICL_DONE_IRQ] = {
+		.name		= "aicl-done",
+		.handler	= smblib_handle_debug,
+	},
+	[HIGH_DUTY_CYCLE_IRQ] = {
+		.name		= "high-duty-cycle",
+		.handler	= smblib_handle_high_duty_cycle,
+		.wake		= true,
+	},
+	[INPUT_CURRENT_LIMIT_IRQ] = {
+		.name		= "input-current-limiting",
+		.handler	= smblib_handle_debug,
+	},
+	[TEMPERATURE_CHANGE_IRQ] = {
+		.name		= "temperature-change",
+		.handler	= smblib_handle_debug,
+	},
+	[SWITCH_POWER_OK_IRQ] = {
+		.name		= "switcher-power-ok",
+		.handler	= smblib_handle_switcher_power_ok,
+		.storm_data	= {true, 1000, 3},
+	},
+};
+
+static int smb2_get_irq_index_byname(const char *irq_name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(smb2_irqs); i++) {
+		if (strcmp(smb2_irqs[i].name, irq_name) == 0)
+			return i;
+	}
+
+	return -ENOENT;
+}
+
+static int smb2_request_interrupt(struct smb2 *chip,
+				struct device_node *node, const char *irq_name)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc, irq, irq_index;
+	struct smb_irq_data *irq_data;
+
+	irq = of_irq_get_byname(node, irq_name);
+	if (irq < 0) {
+		pr_err("Couldn't get irq %s byname\n", irq_name);
+		return irq;
+	}
+
+	irq_index = smb2_get_irq_index_byname(irq_name);
+	if (irq_index < 0) {
+		pr_err("%s is not a defined irq\n", irq_name);
+		return irq_index;
+	}
+
+	if (!smb2_irqs[irq_index].handler)
+		return 0;
+
+	irq_data = devm_kzalloc(chg->dev, sizeof(*irq_data), GFP_KERNEL);
+	if (!irq_data)
+		return -ENOMEM;
+
+	irq_data->parent_data = chip;
+	irq_data->name = irq_name;
+	irq_data->storm_data = smb2_irqs[irq_index].storm_data;
+	mutex_init(&irq_data->storm_data.storm_lock);
+
+	rc = devm_request_threaded_irq(chg->dev, irq, NULL,
+					smb2_irqs[irq_index].handler,
+					IRQF_ONESHOT, irq_name, irq_data);
+	if (rc < 0) {
+		pr_err("Couldn't request irq %d\n", irq);
+		return rc;
+	}
+
+	smb2_irqs[irq_index].irq = irq;
+	smb2_irqs[irq_index].irq_data = irq_data;
+	if (smb2_irqs[irq_index].wake)
+		enable_irq_wake(irq);
+
+	return rc;
+}
+
+static int smb2_request_interrupts(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct device_node *node = chg->dev->of_node;
+	struct device_node *child;
+	int rc = 0;
+	const char *name;
+	struct property *prop;
+
+	for_each_available_child_of_node(node, child) {
+		of_property_for_each_string(child, "interrupt-names",
+					    prop, name) {
+			rc = smb2_request_interrupt(chip, child, name);
+			if (rc < 0)
+				return rc;
+		}
+	}
+
+	return rc;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int force_batt_psy_update_write(void *data, u64 val)
+{
+	struct smb_charger *chg = data;
+
+	power_supply_changed(chg->batt_psy);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_batt_psy_update_ops, NULL,
+			force_batt_psy_update_write, "0x%02llx\n");
+
+static int force_usb_psy_update_write(void *data, u64 val)
+{
+	struct smb_charger *chg = data;
+
+	power_supply_changed(chg->usb_psy);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_usb_psy_update_ops, NULL,
+			force_usb_psy_update_write, "0x%02llx\n");
+
+static int force_dc_psy_update_write(void *data, u64 val)
+{
+	struct smb_charger *chg = data;
+
+	power_supply_changed(chg->dc_psy);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_dc_psy_update_ops, NULL,
+			force_dc_psy_update_write, "0x%02llx\n");
+
+static void smb2_create_debugfs(struct smb2 *chip)
+{
+	struct dentry *file;
+
+	chip->dfs_root = debugfs_create_dir("charger", NULL);
+	if (IS_ERR_OR_NULL(chip->dfs_root)) {
+		pr_err("Couldn't create charger debugfs rc=%ld\n",
+			(long)chip->dfs_root);
+		return;
+	}
+
+	file = debugfs_create_file("force_batt_psy_update", 0600,
+			    chip->dfs_root, chip, &force_batt_psy_update_ops);
+	if (IS_ERR_OR_NULL(file))
+		pr_err("Couldn't create force_batt_psy_update file rc=%ld\n",
+			(long)file);
+
+	file = debugfs_create_file("force_usb_psy_update", 0600,
+			    chip->dfs_root, chip, &force_usb_psy_update_ops);
+	if (IS_ERR_OR_NULL(file))
+		pr_err("Couldn't create force_usb_psy_update file rc=%ld\n",
+			(long)file);
+
+	file = debugfs_create_file("force_dc_psy_update", 0600,
+			    chip->dfs_root, chip, &force_dc_psy_update_ops);
+	if (IS_ERR_OR_NULL(file))
+		pr_err("Couldn't create force_dc_psy_update file rc=%ld\n",
+			(long)file);
+}
+
+#else
+
+static void smb2_create_debugfs(struct smb2 *chip)
+{}
+
+#endif
+
+static int smb2_probe(struct platform_device *pdev)
+{
+	struct smb2 *chip;
+	struct smb_charger *chg;
+	int rc = 0;
+	union power_supply_propval val;
+	int usb_present, batt_present, batt_health, batt_charge_type;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chg = &chip->chg;
+	chg->dev = &pdev->dev;
+	chg->param = v1_params;
+	chg->debug_mask = &__debug_mask;
+	chg->mode = PARALLEL_MASTER;
+	chg->irq_info = smb2_irqs;
+	chg->name = "PMI";
+
+	chg->regmap = dev_get_regmap(chg->dev->parent, NULL);
+	if (!chg->regmap) {
+		pr_err("parent regmap is missing\n");
+		return -EINVAL;
+	}
+
+	rc = smb2_chg_config_init(chip);
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't setup chg_config rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smblib_init(chg);
+	if (rc < 0) {
+		pr_err("Smblib_init failed rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_parse_dt(chip);
+	if (rc < 0) {
+		pr_err("Couldn't parse device tree rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	/* set driver data before resources request it */
+	platform_set_drvdata(pdev, chip);
+
+	rc = smb2_init_vbus_regulator(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize vbus regulator rc=%d\n",
+			rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_vconn_regulator(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize vconn regulator rc=%d\n",
+			rc);
+		goto cleanup;
+	}
+
+	/* extcon registration */
+	chg->extcon = devm_extcon_dev_allocate(chg->dev, smblib_extcon_cable);
+	if (IS_ERR(chg->extcon)) {
+		rc = PTR_ERR(chg->extcon);
+		dev_err(chg->dev, "failed to allocate extcon device rc=%d\n",
+				rc);
+		goto cleanup;
+	}
+
+	rc = devm_extcon_dev_register(chg->dev, chg->extcon);
+	if (rc < 0) {
+		dev_err(chg->dev, "failed to register extcon device rc=%d\n",
+				rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_hw(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize hardware rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_dc_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize dc psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_usb_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize usb psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_usb_main_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize usb psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_init_batt_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize batt psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb2_determine_initial_status(chip);
+	if (rc < 0) {
+		pr_err("Couldn't determine initial status rc=%d\n",
+			rc);
+		goto cleanup;
+	}
+
+	rc = smb2_request_interrupts(chip);
+	if (rc < 0) {
+		pr_err("Couldn't request interrupts rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	smb2_create_debugfs(chip);
+
+	rc = smblib_get_prop_usb_present(chg, &val);
+	if (rc < 0) {
+		pr_err("Couldn't get usb present rc=%d\n", rc);
+		goto cleanup;
+	}
+	usb_present = val.intval;
+
+	rc = smblib_get_prop_batt_present(chg, &val);
+	if (rc < 0) {
+		pr_err("Couldn't get batt present rc=%d\n", rc);
+		goto cleanup;
+	}
+	batt_present = val.intval;
+
+	rc = smblib_get_prop_batt_health(chg, &val);
+	if (rc < 0) {
+		pr_err("Couldn't get batt health rc=%d\n", rc);
+		goto cleanup;
+	}
+	batt_health = val.intval;
+
+	rc = smblib_get_prop_batt_charge_type(chg, &val);
+	if (rc < 0) {
+		pr_err("Couldn't get batt charge type rc=%d\n", rc);
+		goto cleanup;
+	}
+	batt_charge_type = val.intval;
+
+	pr_info("QPNP SMB2 probed successfully usb:present=%d type=%d batt:present = %d health = %d charge = %d\n",
+		usb_present, chg->usb_psy_desc.type,
+		batt_present, batt_health, batt_charge_type);
+	return rc;
+
+cleanup:
+	smblib_deinit(chg);
+	if (chg->usb_psy)
+		power_supply_unregister(chg->usb_psy);
+	if (chg->batt_psy)
+		power_supply_unregister(chg->batt_psy);
+	if (chg->vconn_vreg && chg->vconn_vreg->rdev)
+		regulator_unregister(chg->vconn_vreg->rdev);
+	if (chg->vbus_vreg && chg->vbus_vreg->rdev)
+		regulator_unregister(chg->vbus_vreg->rdev);
+	platform_set_drvdata(pdev, NULL);
+	return rc;
+}
+
+static int smb2_remove(struct platform_device *pdev)
+{
+	struct smb2 *chip = platform_get_drvdata(pdev);
+	struct smb_charger *chg = &chip->chg;
+
+	power_supply_unregister(chg->batt_psy);
+	power_supply_unregister(chg->usb_psy);
+	regulator_unregister(chg->vconn_vreg->rdev);
+	regulator_unregister(chg->vbus_vreg->rdev);
+
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static void smb2_shutdown(struct platform_device *pdev)
+{
+	struct smb2 *chip = platform_get_drvdata(pdev);
+	struct smb_charger *chg = &chip->chg;
+
+	/* configure power role for UFP */
+	smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				TYPEC_POWER_ROLE_CMD_MASK, UFP_EN_CMD_BIT);
+
+	/* force HVDCP to 5V */
+	smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+				HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT, 0);
+	smblib_write(chg, CMD_HVDCP_2_REG, FORCE_5V_BIT);
+
+	/* force enable APSD */
+	smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+				 AUTO_SRC_DETECT_BIT, AUTO_SRC_DETECT_BIT);
+}
+
+static const struct of_device_id match_table[] = {
+	{ .compatible = "qcom,qpnp-smb2", },
+	{ },
+};
+
+static struct platform_driver smb2_driver = {
+	.driver		= {
+		.name		= "qcom,qpnp-smb2",
+		.owner		= THIS_MODULE,
+		.of_match_table	= match_table,
+	},
+	.probe		= smb2_probe,
+	.remove		= smb2_remove,
+	.shutdown	= smb2_shutdown,
+};
+module_platform_driver(smb2_driver);
+
+MODULE_DESCRIPTION("QPNP SMB2 Charger Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
new file mode 100644
index 0000000..eb6727b
--- /dev/null
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -0,0 +1,4290 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/iio/consumer.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/driver.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/irq.h>
+#include "smb-lib.h"
+#include "smb-reg.h"
+#include "storm-watch.h"
+#include "pmic-voter.h"
+
+#define smblib_err(chg, fmt, ...)		\
+	pr_err("%s: %s: " fmt, chg->name,	\
+		__func__, ##__VA_ARGS__)	\
+
+#define smblib_dbg(chg, reason, fmt, ...)			\
+	do {							\
+		if (*chg->debug_mask & (reason))		\
+			pr_info("%s: %s: " fmt, chg->name,	\
+				__func__, ##__VA_ARGS__);	\
+		else						\
+			pr_debug("%s: %s: " fmt, chg->name,	\
+				__func__, ##__VA_ARGS__);	\
+	} while (0)
+
+static bool is_secure(struct smb_charger *chg, int addr)
+{
+	if (addr == SHIP_MODE_REG || addr == FREQ_CLK_DIV_REG)
+		return true;
+	/* assume everything above 0xA0 is secure */
+	return (bool)((addr & 0xFF) >= 0xA0);
+}
+
+int smblib_read(struct smb_charger *chg, u16 addr, u8 *val)
+{
+	unsigned int temp;
+	int rc = 0;
+
+	rc = regmap_read(chg->regmap, addr, &temp);
+	if (rc >= 0)
+		*val = (u8)temp;
+
+	return rc;
+}
+
+int smblib_multibyte_read(struct smb_charger *chg, u16 addr, u8 *val,
+				int count)
+{
+	return regmap_bulk_read(chg->regmap, addr, val, count);
+}
+
+int smblib_masked_write(struct smb_charger *chg, u16 addr, u8 mask, u8 val)
+{
+	int rc = 0;
+
+	mutex_lock(&chg->write_lock);
+	if (is_secure(chg, addr)) {
+		rc = regmap_write(chg->regmap, (addr & 0xFF00) | 0xD0, 0xA5);
+		if (rc < 0)
+			goto unlock;
+	}
+
+	rc = regmap_update_bits(chg->regmap, addr, mask, val);
+
+unlock:
+	mutex_unlock(&chg->write_lock);
+	return rc;
+}
+
+int smblib_write(struct smb_charger *chg, u16 addr, u8 val)
+{
+	int rc = 0;
+
+	mutex_lock(&chg->write_lock);
+
+	if (is_secure(chg, addr)) {
+		rc = regmap_write(chg->regmap, (addr & ~(0xFF)) | 0xD0, 0xA5);
+		if (rc < 0)
+			goto unlock;
+	}
+
+	rc = regmap_write(chg->regmap, addr, val);
+
+unlock:
+	mutex_unlock(&chg->write_lock);
+	return rc;
+}
+
+static int smblib_get_step_cc_delta(struct smb_charger *chg, int *cc_delta_ua)
+{
+	int rc, step_state;
+	u8 stat;
+
+	if (!chg->step_chg_enabled) {
+		*cc_delta_ua = 0;
+		return 0;
+	}
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	step_state = (stat & STEP_CHARGING_STATUS_MASK) >>
+				STEP_CHARGING_STATUS_SHIFT;
+	rc = smblib_get_charge_param(chg, &chg->param.step_cc_delta[step_state],
+				     cc_delta_ua);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get step cc delta rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int smblib_get_jeita_cc_delta(struct smb_charger *chg, int *cc_delta_ua)
+{
+	int rc, cc_minus_ua;
+	u8 stat;
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_2_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (!(stat & BAT_TEMP_STATUS_SOFT_LIMIT_MASK)) {
+		*cc_delta_ua = 0;
+		return 0;
+	}
+
+	rc = smblib_get_charge_param(chg, &chg->param.jeita_cc_comp,
+				     &cc_minus_ua);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get jeita cc minus rc=%d\n", rc);
+		return rc;
+	}
+
+	*cc_delta_ua = -cc_minus_ua;
+	return 0;
+}
+
+int smblib_icl_override(struct smb_charger *chg, bool override)
+{
+	int rc;
+	bool override_status;
+	u8 stat;
+	u16 reg;
+
+	switch (chg->smb_version) {
+	case PMI8998_SUBTYPE:
+		reg = APSD_RESULT_STATUS_REG;
+		break;
+	case PM660_SUBTYPE:
+		reg = AICL_STATUS_REG;
+		break;
+	default:
+		smblib_dbg(chg, PR_MISC, "Unknown chip version=%x\n",
+				chg->smb_version);
+		return -EINVAL;
+	}
+
+	rc = smblib_read(chg, reg, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read reg=%x rc=%d\n", reg, rc);
+		return rc;
+	}
+	override_status = (bool)(stat & ICL_OVERRIDE_LATCH_BIT);
+
+	if (override != override_status) {
+		rc = smblib_masked_write(chg, CMD_APSD_REG,
+				ICL_OVERRIDE_BIT, ICL_OVERRIDE_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't override ICL rc=%d\n", rc);
+			return rc;
+		}
+	}
+	return 0;
+}
+
+/********************
+ * REGISTER GETTERS *
+ ********************/
+
+int smblib_get_charge_param(struct smb_charger *chg,
+			    struct smb_chg_param *param, int *val_u)
+{
+	int rc = 0;
+	u8 val_raw;
+
+	rc = smblib_read(chg, param->reg, &val_raw);
+	if (rc < 0) {
+		smblib_err(chg, "%s: Couldn't read from 0x%04x rc=%d\n",
+			param->name, param->reg, rc);
+		return rc;
+	}
+
+	if (param->get_proc)
+		*val_u = param->get_proc(param, val_raw);
+	else
+		*val_u = val_raw * param->step_u + param->min_u;
+	smblib_dbg(chg, PR_REGISTER, "%s = %d (0x%02x)\n",
+		   param->name, *val_u, val_raw);
+
+	return rc;
+}
+
+int smblib_get_usb_suspend(struct smb_charger *chg, int *suspend)
+{
+	int rc = 0;
+	u8 temp;
+
+	rc = smblib_read(chg, USBIN_CMD_IL_REG, &temp);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read USBIN_CMD_IL rc=%d\n", rc);
+		return rc;
+	}
+	*suspend = temp & USBIN_SUSPEND_BIT;
+
+	return rc;
+}
+
+struct apsd_result {
+	const char * const name;
+	const u8 bit;
+	const enum power_supply_type pst;
+};
+
+enum {
+	UNKNOWN,
+	SDP,
+	CDP,
+	DCP,
+	OCP,
+	FLOAT,
+	HVDCP2,
+	HVDCP3,
+	MAX_TYPES
+};
+
+static const struct apsd_result const smblib_apsd_results[] = {
+	[UNKNOWN] = {
+		.name	= "UNKNOWN",
+		.bit	= 0,
+		.pst	= POWER_SUPPLY_TYPE_UNKNOWN
+	},
+	[SDP] = {
+		.name	= "SDP",
+		.bit	= SDP_CHARGER_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB
+	},
+	[CDP] = {
+		.name	= "CDP",
+		.bit	= CDP_CHARGER_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB_CDP
+	},
+	[DCP] = {
+		.name	= "DCP",
+		.bit	= DCP_CHARGER_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB_DCP
+	},
+	[OCP] = {
+		.name	= "OCP",
+		.bit	= OCP_CHARGER_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB_DCP
+	},
+	[FLOAT] = {
+		.name	= "FLOAT",
+		.bit	= FLOAT_CHARGER_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB_DCP
+	},
+	[HVDCP2] = {
+		.name	= "HVDCP2",
+		.bit	= DCP_CHARGER_BIT | QC_2P0_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB_HVDCP
+	},
+	[HVDCP3] = {
+		.name	= "HVDCP3",
+		.bit	= DCP_CHARGER_BIT | QC_3P0_BIT,
+		.pst	= POWER_SUPPLY_TYPE_USB_HVDCP_3,
+	},
+};
+
+static const struct apsd_result *smblib_get_apsd_result(struct smb_charger *chg)
+{
+	int rc, i;
+	u8 apsd_stat, stat;
+	const struct apsd_result *result = &smblib_apsd_results[UNKNOWN];
+
+	rc = smblib_read(chg, APSD_STATUS_REG, &apsd_stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc);
+		return result;
+	}
+	smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", apsd_stat);
+
+	if (!(apsd_stat & APSD_DTC_STATUS_DONE_BIT))
+		return result;
+
+	rc = smblib_read(chg, APSD_RESULT_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read APSD_RESULT_STATUS rc=%d\n",
+			rc);
+		return result;
+	}
+	stat &= APSD_RESULT_STATUS_MASK;
+
+	for (i = 0; i < ARRAY_SIZE(smblib_apsd_results); i++) {
+		if (smblib_apsd_results[i].bit == stat)
+			result = &smblib_apsd_results[i];
+	}
+
+	if (apsd_stat & QC_CHARGER_BIT) {
+		/* since its a qc_charger, either return HVDCP3 or HVDCP2 */
+		if (result != &smblib_apsd_results[HVDCP3])
+			result = &smblib_apsd_results[HVDCP2];
+	}
+
+	return result;
+}
+
+/********************
+ * REGISTER SETTERS *
+ ********************/
+
+static int chg_freq_list[] = {
+	9600, 9600, 6400, 4800, 3800, 3200, 2700, 2400, 2100, 1900, 1700,
+	1600, 1500, 1400, 1300, 1200,
+};
+
+int smblib_set_chg_freq(struct smb_chg_param *param,
+				int val_u, u8 *val_raw)
+{
+	u8 i;
+
+	if (val_u > param->max_u || val_u < param->min_u)
+		return -EINVAL;
+
+	/* Charger FSW is the configured freqency / 2 */
+	val_u *= 2;
+	for (i = 0; i < ARRAY_SIZE(chg_freq_list); i++) {
+		if (chg_freq_list[i] == val_u)
+			break;
+	}
+	if (i == ARRAY_SIZE(chg_freq_list)) {
+		pr_err("Invalid frequency %d Hz\n", val_u / 2);
+		return -EINVAL;
+	}
+
+	*val_raw = i;
+
+	return 0;
+}
+
+static int smblib_set_opt_freq_buck(struct smb_charger *chg, int fsw_khz)
+{
+	union power_supply_propval pval = {0, };
+	int rc = 0;
+
+	rc = smblib_set_charge_param(chg, &chg->param.freq_buck, fsw_khz);
+	if (rc < 0)
+		dev_err(chg->dev, "Error in setting freq_buck rc=%d\n", rc);
+
+	if (chg->mode == PARALLEL_MASTER && chg->pl.psy) {
+		pval.intval = fsw_khz;
+		/*
+		 * Some parallel charging implementations may not have
+		 * PROP_BUCK_FREQ property - they could be running
+		 * with a fixed frequency
+		 */
+		power_supply_set_property(chg->pl.psy,
+				POWER_SUPPLY_PROP_BUCK_FREQ, &pval);
+	}
+
+	return rc;
+}
+
+int smblib_set_charge_param(struct smb_charger *chg,
+			    struct smb_chg_param *param, int val_u)
+{
+	int rc = 0;
+	u8 val_raw;
+
+	if (param->set_proc) {
+		rc = param->set_proc(param, val_u, &val_raw);
+		if (rc < 0)
+			return -EINVAL;
+	} else {
+		if (val_u > param->max_u || val_u < param->min_u) {
+			smblib_err(chg, "%s: %d is out of range [%d, %d]\n",
+				param->name, val_u, param->min_u, param->max_u);
+			return -EINVAL;
+		}
+
+		val_raw = (val_u - param->min_u) / param->step_u;
+	}
+
+	rc = smblib_write(chg, param->reg, val_raw);
+	if (rc < 0) {
+		smblib_err(chg, "%s: Couldn't write 0x%02x to 0x%04x rc=%d\n",
+			param->name, val_raw, param->reg, rc);
+		return rc;
+	}
+
+	smblib_dbg(chg, PR_REGISTER, "%s = %d (0x%02x)\n",
+		   param->name, val_u, val_raw);
+
+	return rc;
+}
+
+static int step_charge_soc_update(struct smb_charger *chg, int capacity)
+{
+	int rc = 0;
+
+	rc = smblib_set_charge_param(chg, &chg->param.step_soc, capacity);
+	if (rc < 0) {
+		smblib_err(chg, "Error in updating soc, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smblib_write(chg, STEP_CHG_SOC_VBATT_V_UPDATE_REG,
+			STEP_CHG_SOC_VBATT_V_UPDATE_BIT);
+	if (rc < 0) {
+		smblib_err(chg,
+			"Couldn't set STEP_CHG_SOC_VBATT_V_UPDATE_REG rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend)
+{
+	int rc = 0;
+
+	rc = smblib_masked_write(chg, USBIN_CMD_IL_REG, USBIN_SUSPEND_BIT,
+				 suspend ? USBIN_SUSPEND_BIT : 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't write %s to USBIN_SUSPEND_BIT rc=%d\n",
+			suspend ? "suspend" : "resume", rc);
+
+	return rc;
+}
+
+int smblib_set_dc_suspend(struct smb_charger *chg, bool suspend)
+{
+	int rc = 0;
+
+	rc = smblib_masked_write(chg, DCIN_CMD_IL_REG, DCIN_SUSPEND_BIT,
+				 suspend ? DCIN_SUSPEND_BIT : 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't write %s to DCIN_SUSPEND_BIT rc=%d\n",
+			suspend ? "suspend" : "resume", rc);
+
+	return rc;
+}
+
+static int smblib_set_adapter_allowance(struct smb_charger *chg,
+					u8 allowed_voltage)
+{
+	int rc = 0;
+
+	switch (allowed_voltage) {
+	case USBIN_ADAPTER_ALLOW_12V:
+	case USBIN_ADAPTER_ALLOW_5V_OR_12V:
+	case USBIN_ADAPTER_ALLOW_9V_TO_12V:
+	case USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V:
+	case USBIN_ADAPTER_ALLOW_5V_TO_12V:
+		/* PM660 only support max. 9V */
+		if (chg->smb_version == PM660_SUBTYPE) {
+			smblib_dbg(chg, PR_MISC, "voltage not supported=%d\n",
+					allowed_voltage);
+			allowed_voltage = USBIN_ADAPTER_ALLOW_5V_TO_9V;
+		}
+		break;
+	}
+
+	rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG, allowed_voltage);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't write 0x%02x to USBIN_ADAPTER_ALLOW_CFG rc=%d\n",
+			allowed_voltage, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+#define MICRO_5V	5000000
+#define MICRO_9V	9000000
+#define MICRO_12V	12000000
+static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg,
+					int min_allowed_uv, int max_allowed_uv)
+{
+	int rc;
+	u8 allowed_voltage;
+
+	if (min_allowed_uv == MICRO_5V && max_allowed_uv == MICRO_5V) {
+		allowed_voltage = USBIN_ADAPTER_ALLOW_5V;
+		smblib_set_opt_freq_buck(chg, chg->chg_freq.freq_5V);
+	} else if (min_allowed_uv == MICRO_9V && max_allowed_uv == MICRO_9V) {
+		allowed_voltage = USBIN_ADAPTER_ALLOW_9V;
+		smblib_set_opt_freq_buck(chg, chg->chg_freq.freq_9V);
+	} else if (min_allowed_uv == MICRO_12V && max_allowed_uv == MICRO_12V) {
+		allowed_voltage = USBIN_ADAPTER_ALLOW_12V;
+		smblib_set_opt_freq_buck(chg, chg->chg_freq.freq_12V);
+	} else if (min_allowed_uv < MICRO_9V && max_allowed_uv <= MICRO_9V) {
+		allowed_voltage = USBIN_ADAPTER_ALLOW_5V_TO_9V;
+	} else if (min_allowed_uv < MICRO_9V && max_allowed_uv <= MICRO_12V) {
+		allowed_voltage = USBIN_ADAPTER_ALLOW_5V_TO_12V;
+	} else if (min_allowed_uv < MICRO_12V && max_allowed_uv <= MICRO_12V) {
+		allowed_voltage = USBIN_ADAPTER_ALLOW_9V_TO_12V;
+	} else {
+		smblib_err(chg, "invalid allowed voltage [%d, %d]\n",
+			min_allowed_uv, max_allowed_uv);
+		return -EINVAL;
+	}
+
+	rc = smblib_set_adapter_allowance(chg, allowed_voltage);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't configure adapter allowance rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+/********************
+ * HELPER FUNCTIONS *
+ ********************/
+
+static int try_rerun_apsd_for_hvdcp(struct smb_charger *chg)
+{
+	const struct apsd_result *apsd_result;
+
+	/*
+	 * PD_INACTIVE_VOTER on hvdcp_disable_votable indicates whether
+	 * apsd rerun was tried earlier
+	 */
+	if (get_client_vote(chg->hvdcp_disable_votable_indirect,
+						PD_INACTIVE_VOTER)) {
+		vote(chg->hvdcp_disable_votable_indirect,
+				PD_INACTIVE_VOTER, false, 0);
+		/* ensure hvdcp is enabled */
+		if (!get_effective_result(
+				chg->hvdcp_disable_votable_indirect)) {
+			apsd_result = smblib_get_apsd_result(chg);
+			if (apsd_result->bit & (QC_2P0_BIT | QC_3P0_BIT)) {
+				/* rerun APSD */
+				smblib_dbg(chg, PR_MISC, "rerun APSD\n");
+				smblib_masked_write(chg, CMD_APSD_REG,
+						APSD_RERUN_BIT,
+						APSD_RERUN_BIT);
+			}
+		}
+	}
+	return 0;
+}
+
+static const struct apsd_result *smblib_update_usb_type(struct smb_charger *chg)
+{
+	const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
+
+	/* if PD is active, APSD is disabled so won't have a valid result */
+	if (chg->pd_active) {
+		chg->usb_psy_desc.type = POWER_SUPPLY_TYPE_USB_PD;
+		return apsd_result;
+	}
+
+	chg->usb_psy_desc.type = apsd_result->pst;
+	return apsd_result;
+}
+
+static int smblib_notifier_call(struct notifier_block *nb,
+		unsigned long ev, void *v)
+{
+	struct power_supply *psy = v;
+	struct smb_charger *chg = container_of(nb, struct smb_charger, nb);
+
+	if (!strcmp(psy->desc->name, "bms")) {
+		if (!chg->bms_psy)
+			chg->bms_psy = psy;
+		if (ev == PSY_EVENT_PROP_CHANGED)
+			schedule_work(&chg->bms_update_work);
+	}
+
+	if (!chg->pl.psy && !strcmp(psy->desc->name, "parallel"))
+		chg->pl.psy = psy;
+
+	return NOTIFY_OK;
+}
+
+static int smblib_register_notifier(struct smb_charger *chg)
+{
+	int rc;
+
+	chg->nb.notifier_call = smblib_notifier_call;
+	rc = power_supply_reg_notifier(&chg->nb);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't register psy notifier rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+int smblib_mapping_soc_from_field_value(struct smb_chg_param *param,
+					     int val_u, u8 *val_raw)
+{
+	if (val_u > param->max_u || val_u < param->min_u)
+		return -EINVAL;
+
+	*val_raw = val_u << 1;
+
+	return 0;
+}
+
+int smblib_mapping_cc_delta_to_field_value(struct smb_chg_param *param,
+					   u8 val_raw)
+{
+	int val_u  = val_raw * param->step_u + param->min_u;
+
+	if (val_u > param->max_u)
+		val_u -= param->max_u * 2;
+
+	return val_u;
+}
+
+int smblib_mapping_cc_delta_from_field_value(struct smb_chg_param *param,
+					     int val_u, u8 *val_raw)
+{
+	if (val_u > param->max_u || val_u < param->min_u - param->max_u)
+		return -EINVAL;
+
+	val_u += param->max_u * 2 - param->min_u;
+	val_u %= param->max_u * 2;
+	*val_raw = val_u / param->step_u;
+
+	return 0;
+}
+
+static void smblib_uusb_removal(struct smb_charger *chg)
+{
+	int rc;
+
+	/* reset both usbin current and voltage votes */
+	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+	vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
+	vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, true, 0);
+
+	cancel_delayed_work_sync(&chg->hvdcp_detect_work);
+
+	if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+		/* re-enable AUTH_IRQ_EN_CFG_BIT */
+		rc = smblib_masked_write(chg,
+				USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+				AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
+		if (rc < 0)
+			smblib_err(chg,
+				"Couldn't enable QC auth setting rc=%d\n", rc);
+	}
+
+	/* reconfigure allowed voltage for HVDCP */
+	rc = smblib_set_adapter_allowance(chg,
+			USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n",
+			rc);
+
+	chg->voltage_min_uv = MICRO_5V;
+	chg->voltage_max_uv = MICRO_5V;
+	chg->usb_icl_delta_ua = 0;
+	chg->pulse_cnt = 0;
+
+	/* clear USB ICL vote for USB_PSY_VOTER */
+	rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't un-vote for USB ICL rc=%d\n", rc);
+
+	/* clear USB ICL vote for DCP_VOTER */
+	rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
+	if (rc < 0)
+		smblib_err(chg,
+			"Couldn't un-vote DCP from USB ICL rc=%d\n", rc);
+
+	/* clear USB ICL vote for PL_USBIN_USBIN_VOTER */
+	rc = vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
+	if (rc < 0)
+		smblib_err(chg,
+			"Couldn't un-vote PL_USBIN_USBIN from USB ICL rc=%d\n",
+			rc);
+}
+
+static bool smblib_sysok_reason_usbin(struct smb_charger *chg)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, SYSOK_REASON_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get SYSOK_REASON_STATUS rc=%d\n", rc);
+		/* assuming 'not usbin' in case of read failure */
+		return false;
+	}
+
+	return stat & SYSOK_REASON_USBIN_BIT;
+}
+
+void smblib_suspend_on_debug_battery(struct smb_charger *chg)
+{
+	int rc;
+	union power_supply_propval val;
+
+	if (!chg->suspend_input_on_debug_batt)
+		return;
+
+	rc = power_supply_get_property(chg->bms_psy,
+			POWER_SUPPLY_PROP_DEBUG_BATTERY, &val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get debug battery prop rc=%d\n", rc);
+		return;
+	}
+
+	vote(chg->usb_icl_votable, DEBUG_BOARD_VOTER, val.intval, 0);
+	vote(chg->dc_suspend_votable, DEBUG_BOARD_VOTER, val.intval, 0);
+	if (val.intval)
+		pr_info("Input suspended: Fake battery\n");
+}
+
+int smblib_rerun_apsd_if_required(struct smb_charger *chg)
+{
+	const struct apsd_result *apsd_result;
+	union power_supply_propval val;
+	int rc;
+
+	rc = smblib_get_prop_usb_present(chg, &val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get usb present rc = %d\n", rc);
+		return rc;
+	}
+
+	if (!val.intval)
+		return 0;
+
+	apsd_result = smblib_get_apsd_result(chg);
+	if ((apsd_result->pst == POWER_SUPPLY_TYPE_UNKNOWN)
+		|| (apsd_result->pst == POWER_SUPPLY_TYPE_USB)) {
+		/* rerun APSD */
+		pr_info("Reruning APSD type = %s at bootup\n",
+				apsd_result->name);
+		rc = smblib_masked_write(chg, CMD_APSD_REG,
+					APSD_RERUN_BIT,
+					APSD_RERUN_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't rerun APSD rc = %d\n", rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int smblib_get_pulse_cnt(struct smb_charger *chg, int *count)
+{
+	int rc;
+	u8 val[2];
+
+	switch (chg->smb_version) {
+	case PMI8998_SUBTYPE:
+		rc = smblib_read(chg, QC_PULSE_COUNT_STATUS_REG, val);
+		if (rc) {
+			pr_err("failed to read QC_PULSE_COUNT_STATUS_REG rc=%d\n",
+					rc);
+			return rc;
+		}
+		*count = val[0] & QC_PULSE_COUNT_MASK;
+		break;
+	case PM660_SUBTYPE:
+		rc = smblib_multibyte_read(chg,
+				QC_PULSE_COUNT_STATUS_1_REG, val, 2);
+		if (rc) {
+			pr_err("failed to read QC_PULSE_COUNT_STATUS_1_REG rc=%d\n",
+					rc);
+			return rc;
+		}
+		*count = (val[1] << 8) | val[0];
+		break;
+	default:
+		smblib_dbg(chg, PR_PARALLEL, "unknown SMB chip %d\n",
+				chg->smb_version);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*********************
+ * VOTABLE CALLBACKS *
+ *********************/
+
+static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
+			int suspend, const char *client)
+{
+	struct smb_charger *chg = data;
+
+	/* resume input if suspend is invalid */
+	if (suspend < 0)
+		suspend = 0;
+
+	return smblib_set_dc_suspend(chg, (bool)suspend);
+}
+
+#define USBIN_25MA	25000
+#define USBIN_100MA	100000
+#define USBIN_150MA	150000
+#define USBIN_500MA	500000
+#define USBIN_900MA	900000
+
+
+static int set_sdp_current(struct smb_charger *chg, int icl_ua)
+{
+	int rc;
+	u8 icl_options;
+
+	/* power source is SDP */
+	switch (icl_ua) {
+	case USBIN_100MA:
+		/* USB 2.0 100mA */
+		icl_options = 0;
+		break;
+	case USBIN_150MA:
+		/* USB 3.0 150mA */
+		icl_options = CFG_USB3P0_SEL_BIT;
+		break;
+	case USBIN_500MA:
+		/* USB 2.0 500mA */
+		icl_options = USB51_MODE_BIT;
+		break;
+	case USBIN_900MA:
+		/* USB 3.0 900mA */
+		icl_options = CFG_USB3P0_SEL_BIT | USB51_MODE_BIT;
+		break;
+	default:
+		smblib_err(chg, "ICL %duA isn't supported for SDP\n", icl_ua);
+		return -EINVAL;
+	}
+
+	rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
+		CFG_USB3P0_SEL_BIT | USB51_MODE_BIT, icl_options);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set ICL options rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int smblib_usb_icl_vote_callback(struct votable *votable, void *data,
+			int icl_ua, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc = 0;
+	bool override;
+	union power_supply_propval pval;
+
+	/* suspend and return if 25mA or less is requested */
+	if (client && (icl_ua < USBIN_25MA))
+		return smblib_set_usb_suspend(chg, true);
+
+	disable_irq_nosync(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
+	if (!client)
+		goto override_suspend_config;
+
+	rc = smblib_get_prop_typec_mode(chg, &pval);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get typeC mode rc = %d\n", rc);
+		goto enable_icl_changed_interrupt;
+	}
+
+	/* configure current */
+	if (pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
+		&& (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB)) {
+		rc = set_sdp_current(chg, icl_ua);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't set SDP ICL rc=%d\n", rc);
+			goto enable_icl_changed_interrupt;
+		}
+	} else {
+		rc = smblib_set_charge_param(chg, &chg->param.usb_icl,
+				icl_ua - chg->icl_reduction_ua);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't set HC ICL rc=%d\n", rc);
+			goto enable_icl_changed_interrupt;
+		}
+	}
+
+override_suspend_config:
+	/* determine if override needs to be enforced */
+	override = true;
+	if (client == NULL) {
+		/* remove override if no voters - hw defaults is desired */
+		override = false;
+	} else if (pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
+		if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB)
+			/* For std cable with type = SDP never override */
+			override = false;
+		else if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB_CDP
+			&& icl_ua - chg->icl_reduction_ua == 1500000)
+			/*
+			 * For std cable with type = CDP override only if
+			 * current is not 1500mA
+			 */
+			override = false;
+	}
+
+	/* enforce override */
+	rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
+		USBIN_MODE_CHG_BIT, override ? USBIN_MODE_CHG_BIT : 0);
+
+	rc = smblib_icl_override(chg, override);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set ICL override rc=%d\n", rc);
+		goto enable_icl_changed_interrupt;
+	}
+
+	/* unsuspend after configuring current and override */
+	rc = smblib_set_usb_suspend(chg, false);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't resume input rc=%d\n", rc);
+		goto enable_icl_changed_interrupt;
+	}
+
+enable_icl_changed_interrupt:
+	enable_irq(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
+	return rc;
+}
+
+static int smblib_dc_icl_vote_callback(struct votable *votable, void *data,
+			int icl_ua, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc = 0;
+	bool suspend;
+
+	if (icl_ua < 0) {
+		smblib_dbg(chg, PR_MISC, "No Voter hence suspending\n");
+		icl_ua = 0;
+	}
+
+	suspend = (icl_ua < USBIN_25MA);
+	if (suspend)
+		goto suspend;
+
+	rc = smblib_set_charge_param(chg, &chg->param.dc_icl, icl_ua);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set DC input current limit rc=%d\n",
+			rc);
+		return rc;
+	}
+
+suspend:
+	rc = vote(chg->dc_suspend_votable, USER_VOTER, suspend, 0);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't vote to %s DC rc=%d\n",
+			suspend ? "suspend" : "resume", rc);
+		return rc;
+	}
+	return rc;
+}
+
+static int smblib_pd_disallowed_votable_indirect_callback(
+	struct votable *votable, void *data, int disallowed, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc;
+
+	rc = vote(chg->pd_allowed_votable, PD_DISALLOWED_INDIRECT_VOTER,
+		!disallowed, 0);
+
+	return rc;
+}
+
+static int smblib_awake_vote_callback(struct votable *votable, void *data,
+			int awake, const char *client)
+{
+	struct smb_charger *chg = data;
+
+	if (awake)
+		pm_stay_awake(chg->dev);
+	else
+		pm_relax(chg->dev);
+
+	return 0;
+}
+
+static int smblib_chg_disable_vote_callback(struct votable *votable, void *data,
+			int chg_disable, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc;
+
+	rc = smblib_masked_write(chg, CHARGING_ENABLE_CMD_REG,
+				 CHARGING_ENABLE_CMD_BIT,
+				 chg_disable ? 0 : CHARGING_ENABLE_CMD_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't %s charging rc=%d\n",
+			chg_disable ? "disable" : "enable", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int smblib_pl_enable_indirect_vote_callback(struct votable *votable,
+			void *data, int chg_enable, const char *client)
+{
+	struct smb_charger *chg = data;
+
+	vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, !chg_enable, 0);
+
+	return 0;
+}
+
+static int smblib_hvdcp_enable_vote_callback(struct votable *votable,
+			void *data,
+			int hvdcp_enable, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc;
+	u8 val = HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_EN_BIT;
+
+	/* vote to enable/disable HW autonomous INOV */
+	vote(chg->hvdcp_hw_inov_dis_votable, client, !hvdcp_enable, 0);
+
+	/*
+	 * Disable the autonomous bit and auth bit for disabling hvdcp.
+	 * This ensures only qc 2.0 detection runs but no vbus
+	 * negotiation happens.
+	 */
+	if (!hvdcp_enable)
+		val = HVDCP_EN_BIT;
+
+	rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+				 HVDCP_EN_BIT | HVDCP_AUTH_ALG_EN_CFG_BIT,
+				 val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't %s hvdcp rc=%d\n",
+			hvdcp_enable ? "enable" : "disable", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int smblib_hvdcp_disable_indirect_vote_callback(struct votable *votable,
+			void *data, int hvdcp_disable, const char *client)
+{
+	struct smb_charger *chg = data;
+
+	vote(chg->hvdcp_enable_votable, HVDCP_INDIRECT_VOTER,
+			!hvdcp_disable, 0);
+
+	return 0;
+}
+
+static int smblib_apsd_disable_vote_callback(struct votable *votable,
+			void *data,
+			int apsd_disable, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc;
+
+	if (apsd_disable) {
+		/* Don't run APSD on CC debounce when APSD is disabled */
+		rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+							APSD_START_ON_CC_BIT,
+							0);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
+									rc);
+			return rc;
+		}
+
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+							AUTO_SRC_DETECT_BIT,
+							0);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't disable APSD rc=%d\n", rc);
+			return rc;
+		}
+	} else {
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+							AUTO_SRC_DETECT_BIT,
+							AUTO_SRC_DETECT_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't enable APSD rc=%d\n", rc);
+			return rc;
+		}
+
+		rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+							APSD_START_ON_CC_BIT,
+							APSD_START_ON_CC_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't enable APSD_START_ON_CC rc=%d\n",
+									rc);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int smblib_hvdcp_hw_inov_dis_vote_callback(struct votable *votable,
+				void *data, int disable, const char *client)
+{
+	struct smb_charger *chg = data;
+	int rc;
+
+	if (disable) {
+		/*
+		 * the pulse count register get zeroed when autonomous mode is
+		 * disabled. Track that in variables before disabling
+		 */
+		rc = smblib_get_pulse_cnt(chg, &chg->pulse_cnt);
+		if (rc < 0) {
+			pr_err("failed to read QC_PULSE_COUNT_STATUS_REG rc=%d\n",
+					rc);
+			return rc;
+		}
+	}
+
+	rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+			HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT,
+			disable ? 0 : HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't %s hvdcp rc=%d\n",
+				disable ? "disable" : "enable", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+/*******************
+ * VCONN REGULATOR *
+ * *****************/
+
+#define MAX_OTG_SS_TRIES 2
+static int _smblib_vconn_regulator_enable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	u8 otg_stat, stat4;
+	int rc = 0, i;
+
+	if (!chg->external_vconn) {
+		/*
+		 * Hardware based OTG soft start should complete within 1ms, so
+		 * wait for 2ms in the worst case.
+		 */
+		for (i = 0; i < MAX_OTG_SS_TRIES; ++i) {
+			usleep_range(1000, 1100);
+			rc = smblib_read(chg, OTG_STATUS_REG, &otg_stat);
+			if (rc < 0) {
+				smblib_err(chg, "Couldn't read OTG status rc=%d\n",
+									rc);
+				return rc;
+			}
+
+			if (otg_stat & BOOST_SOFTSTART_DONE_BIT)
+				break;
+		}
+
+		if (!(otg_stat & BOOST_SOFTSTART_DONE_BIT)) {
+			smblib_err(chg, "Couldn't enable VCONN; OTG soft start failed\n");
+			return -EAGAIN;
+		}
+	}
+
+	/*
+	 * VCONN_EN_ORIENTATION is overloaded with overriding the CC pin used
+	 * for Vconn, and it should be set with reverse polarity of CC_OUT.
+	 */
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat4);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+		return rc;
+	}
+
+	smblib_dbg(chg, PR_OTG, "enabling VCONN\n");
+	stat4 = stat4 & CC_ORIENTATION_BIT ? 0 : VCONN_EN_ORIENTATION_BIT;
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 VCONN_EN_VALUE_BIT | VCONN_EN_ORIENTATION_BIT,
+				 VCONN_EN_VALUE_BIT | stat4);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't enable vconn setting rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int smblib_vconn_regulator_enable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	mutex_lock(&chg->otg_oc_lock);
+	if (chg->vconn_en)
+		goto unlock;
+
+	rc = _smblib_vconn_regulator_enable(rdev);
+	if (rc >= 0)
+		chg->vconn_en = true;
+
+unlock:
+	mutex_unlock(&chg->otg_oc_lock);
+	return rc;
+}
+
+static int _smblib_vconn_regulator_disable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	smblib_dbg(chg, PR_OTG, "disabling VCONN\n");
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 VCONN_EN_VALUE_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't disable vconn regulator rc=%d\n", rc);
+
+	return rc;
+}
+
+int smblib_vconn_regulator_disable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	mutex_lock(&chg->otg_oc_lock);
+	if (!chg->vconn_en)
+		goto unlock;
+
+	rc = _smblib_vconn_regulator_disable(rdev);
+	if (rc >= 0)
+		chg->vconn_en = false;
+
+unlock:
+	mutex_unlock(&chg->otg_oc_lock);
+	return rc;
+}
+
+int smblib_vconn_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int ret;
+
+	mutex_lock(&chg->otg_oc_lock);
+	ret = chg->vconn_en;
+	mutex_unlock(&chg->otg_oc_lock);
+	return ret;
+}
+
+/*****************
+ * OTG REGULATOR *
+ *****************/
+
+static int _smblib_vbus_regulator_enable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc;
+
+	smblib_dbg(chg, PR_OTG, "halt 1 in 8 mode\n");
+	rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
+				 ENG_BUCKBOOST_HALT1_8_MODE_BIT,
+				 ENG_BUCKBOOST_HALT1_8_MODE_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	smblib_dbg(chg, PR_OTG, "enabling OTG\n");
+	rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't enable OTG regulator rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int smblib_vbus_regulator_enable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	mutex_lock(&chg->otg_oc_lock);
+	if (chg->otg_en)
+		goto unlock;
+
+	rc = _smblib_vbus_regulator_enable(rdev);
+	if (rc >= 0)
+		chg->otg_en = true;
+
+unlock:
+	mutex_unlock(&chg->otg_oc_lock);
+	return rc;
+}
+
+static int _smblib_vbus_regulator_disable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc;
+
+	if (!chg->external_vconn && chg->vconn_en) {
+		smblib_dbg(chg, PR_OTG, "Killing VCONN before disabling OTG\n");
+		rc = _smblib_vconn_regulator_disable(rdev);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't disable VCONN rc=%d\n", rc);
+	}
+
+	smblib_dbg(chg, PR_OTG, "disabling OTG\n");
+	rc = smblib_write(chg, CMD_OTG_REG, 0);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't disable OTG regulator rc=%d\n", rc);
+		return rc;
+	}
+
+	smblib_dbg(chg, PR_OTG, "start 1 in 8 mode\n");
+	rc = smblib_write(chg, CMD_OTG_REG, 0);
+	rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
+				 ENG_BUCKBOOST_HALT1_8_MODE_BIT, 0);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+int smblib_vbus_regulator_disable(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int rc = 0;
+
+	mutex_lock(&chg->otg_oc_lock);
+	if (!chg->otg_en)
+		goto unlock;
+
+	rc = _smblib_vbus_regulator_disable(rdev);
+	if (rc >= 0)
+		chg->otg_en = false;
+
+unlock:
+	mutex_unlock(&chg->otg_oc_lock);
+	return rc;
+}
+
+int smblib_vbus_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct smb_charger *chg = rdev_get_drvdata(rdev);
+	int ret;
+
+	mutex_lock(&chg->otg_oc_lock);
+	ret = chg->otg_en;
+	mutex_unlock(&chg->otg_oc_lock);
+	return ret;
+}
+
+/********************
+ * BATT PSY GETTERS *
+ ********************/
+
+int smblib_get_prop_input_suspend(struct smb_charger *chg,
+				  union power_supply_propval *val)
+{
+	val->intval
+		= (get_client_vote(chg->usb_icl_votable, USER_VOTER) == 0)
+		 && get_client_vote(chg->dc_suspend_votable, USER_VOTER);
+	return 0;
+}
+
+int smblib_get_prop_batt_present(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, BATIF_BASE + INT_RT_STS_OFFSET, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATIF_INT_RT_STS rc=%d\n", rc);
+		return rc;
+	}
+
+	val->intval = !(stat & (BAT_THERM_OR_ID_MISSING_RT_STS_BIT
+					| BAT_TERMINAL_MISSING_RT_STS_BIT));
+
+	return rc;
+}
+
+int smblib_get_prop_batt_capacity(struct smb_charger *chg,
+				  union power_supply_propval *val)
+{
+	int rc = -EINVAL;
+
+	if (chg->fake_capacity >= 0) {
+		val->intval = chg->fake_capacity;
+		return 0;
+	}
+
+	if (chg->bms_psy)
+		rc = power_supply_get_property(chg->bms_psy,
+				POWER_SUPPLY_PROP_CAPACITY, val);
+	return rc;
+}
+
+int smblib_get_prop_batt_status(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	union power_supply_propval pval = {0, };
+	bool usb_online, dc_online;
+	u8 stat;
+	int rc;
+
+	rc = smblib_get_prop_usb_online(chg, &pval);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get usb online property rc=%d\n",
+			rc);
+		return rc;
+	}
+	usb_online = (bool)pval.intval;
+
+	rc = smblib_get_prop_dc_online(chg, &pval);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get dc online property rc=%d\n",
+			rc);
+		return rc;
+	}
+	dc_online = (bool)pval.intval;
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+			rc);
+		return rc;
+	}
+	stat = stat & BATTERY_CHARGER_STATUS_MASK;
+
+	if (!usb_online && !dc_online) {
+		switch (stat) {
+		case TERMINATE_CHARGE:
+		case INHIBIT_CHARGE:
+			val->intval = POWER_SUPPLY_STATUS_FULL;
+			break;
+		default:
+			val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+			break;
+		}
+		return rc;
+	}
+
+	switch (stat) {
+	case TRICKLE_CHARGE:
+	case PRE_CHARGE:
+	case FAST_CHARGE:
+	case FULLON_CHARGE:
+	case TAPER_CHARGE:
+		val->intval = POWER_SUPPLY_STATUS_CHARGING;
+		break;
+	case TERMINATE_CHARGE:
+	case INHIBIT_CHARGE:
+		val->intval = POWER_SUPPLY_STATUS_FULL;
+		break;
+	case DISABLE_CHARGE:
+		val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+		break;
+	default:
+		val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+		break;
+	}
+
+	return 0;
+}
+
+int smblib_get_prop_batt_charge_type(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	switch (stat & BATTERY_CHARGER_STATUS_MASK) {
+	case TRICKLE_CHARGE:
+	case PRE_CHARGE:
+		val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+		break;
+	case FAST_CHARGE:
+	case FULLON_CHARGE:
+		val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+		break;
+	case TAPER_CHARGE:
+		val->intval = POWER_SUPPLY_CHARGE_TYPE_TAPER;
+		break;
+	default:
+		val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+	}
+
+	return rc;
+}
+
+int smblib_get_prop_batt_health(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	union power_supply_propval pval;
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_2_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+			rc);
+		return rc;
+	}
+	smblib_dbg(chg, PR_REGISTER, "BATTERY_CHARGER_STATUS_2 = 0x%02x\n",
+		   stat);
+
+	if (stat & CHARGER_ERROR_STATUS_BAT_OV_BIT) {
+		rc = smblib_get_prop_batt_voltage_now(chg, &pval);
+		if (!rc) {
+			/*
+			 * If Vbatt is within 40mV above Vfloat, then don't
+			 * treat it as overvoltage.
+			 */
+			if (pval.intval >=
+				get_effective_result(chg->fv_votable) + 40000) {
+				val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+				smblib_err(chg, "battery over-voltage\n");
+				goto done;
+			}
+		}
+	}
+
+	if (stat & BAT_TEMP_STATUS_TOO_COLD_BIT)
+		val->intval = POWER_SUPPLY_HEALTH_COLD;
+	else if (stat & BAT_TEMP_STATUS_TOO_HOT_BIT)
+		val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+	else if (stat & BAT_TEMP_STATUS_COLD_SOFT_LIMIT_BIT)
+		val->intval = POWER_SUPPLY_HEALTH_COOL;
+	else if (stat & BAT_TEMP_STATUS_HOT_SOFT_LIMIT_BIT)
+		val->intval = POWER_SUPPLY_HEALTH_WARM;
+	else
+		val->intval = POWER_SUPPLY_HEALTH_GOOD;
+
+done:
+	return rc;
+}
+
+int smblib_get_prop_system_temp_level(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	val->intval = chg->system_temp_level;
+	return 0;
+}
+
+int smblib_get_prop_input_current_limited(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	u8 stat;
+	int rc;
+
+	rc = smblib_read(chg, AICL_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read AICL_STATUS rc=%d\n", rc);
+		return rc;
+	}
+	val->intval = (stat & SOFT_ILIMIT_BIT) || chg->is_hdc;
+	return 0;
+}
+
+int smblib_get_prop_batt_voltage_now(struct smb_charger *chg,
+				     union power_supply_propval *val)
+{
+	int rc;
+
+	if (!chg->bms_psy)
+		return -EINVAL;
+
+	rc = power_supply_get_property(chg->bms_psy,
+				       POWER_SUPPLY_PROP_VOLTAGE_NOW, val);
+	return rc;
+}
+
+int smblib_get_prop_batt_current_now(struct smb_charger *chg,
+				     union power_supply_propval *val)
+{
+	int rc;
+
+	if (!chg->bms_psy)
+		return -EINVAL;
+
+	rc = power_supply_get_property(chg->bms_psy,
+				       POWER_SUPPLY_PROP_CURRENT_NOW, val);
+	return rc;
+}
+
+int smblib_get_prop_batt_temp(struct smb_charger *chg,
+			      union power_supply_propval *val)
+{
+	int rc;
+
+	if (!chg->bms_psy)
+		return -EINVAL;
+
+	rc = power_supply_get_property(chg->bms_psy,
+				       POWER_SUPPLY_PROP_TEMP, val);
+	return rc;
+}
+
+int smblib_get_prop_step_chg_step(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	if (!chg->step_chg_enabled) {
+		val->intval = -1;
+		return 0;
+	}
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	val->intval = (stat & STEP_CHARGING_STATUS_MASK) >>
+				STEP_CHARGING_STATUS_SHIFT;
+
+	return rc;
+}
+
+int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
+					union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	stat = stat & BATTERY_CHARGER_STATUS_MASK;
+	val->intval = (stat == TERMINATE_CHARGE);
+	return 0;
+}
+
+/***********************
+ * BATTERY PSY SETTERS *
+ ***********************/
+
+int smblib_set_prop_input_suspend(struct smb_charger *chg,
+				  const union power_supply_propval *val)
+{
+	int rc;
+
+	/* vote 0mA when suspended */
+	rc = vote(chg->usb_icl_votable, USER_VOTER, (bool)val->intval, 0);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't vote to %s USB rc=%d\n",
+			(bool)val->intval ? "suspend" : "resume", rc);
+		return rc;
+	}
+
+	rc = vote(chg->dc_suspend_votable, USER_VOTER, (bool)val->intval, 0);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't vote to %s DC rc=%d\n",
+			(bool)val->intval ? "suspend" : "resume", rc);
+		return rc;
+	}
+
+	power_supply_changed(chg->batt_psy);
+	return rc;
+}
+
+int smblib_set_prop_batt_capacity(struct smb_charger *chg,
+				  const union power_supply_propval *val)
+{
+	chg->fake_capacity = val->intval;
+
+	power_supply_changed(chg->batt_psy);
+
+	return 0;
+}
+
+int smblib_set_prop_system_temp_level(struct smb_charger *chg,
+				const union power_supply_propval *val)
+{
+	if (val->intval < 0)
+		return -EINVAL;
+
+	if (chg->thermal_levels <= 0)
+		return -EINVAL;
+
+	if (val->intval > chg->thermal_levels)
+		return -EINVAL;
+
+	chg->system_temp_level = val->intval;
+	if (chg->system_temp_level == chg->thermal_levels)
+		return vote(chg->chg_disable_votable,
+			THERMAL_DAEMON_VOTER, true, 0);
+
+	vote(chg->chg_disable_votable, THERMAL_DAEMON_VOTER, false, 0);
+	if (chg->system_temp_level == 0)
+		return vote(chg->fcc_votable, THERMAL_DAEMON_VOTER, false, 0);
+
+	vote(chg->fcc_votable, THERMAL_DAEMON_VOTER, true,
+			chg->thermal_mitigation[chg->system_temp_level]);
+	return 0;
+}
+
+int smblib_rerun_aicl(struct smb_charger *chg)
+{
+	int rc, settled_icl_ua;
+	u8 stat;
+
+	rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+								rc);
+		return rc;
+	}
+
+	/* USB is suspended so skip re-running AICL */
+	if (stat & USBIN_SUSPEND_STS_BIT)
+		return rc;
+
+	smblib_dbg(chg, PR_MISC, "re-running AICL\n");
+	switch (chg->smb_version) {
+	case PMI8998_SUBTYPE:
+		rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
+							&settled_icl_ua);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't get settled ICL rc=%d\n", rc);
+			return rc;
+		}
+
+		vote(chg->usb_icl_votable, AICL_RERUN_VOTER, true,
+				max(settled_icl_ua - chg->param.usb_icl.step_u,
+				chg->param.usb_icl.step_u));
+		vote(chg->usb_icl_votable, AICL_RERUN_VOTER, false, 0);
+		break;
+	case PM660_SUBTYPE:
+		/*
+		 * Use restart_AICL instead of trigger_AICL as it runs the
+		 * complete AICL instead of starting from the last settled
+		 * value.
+		 */
+		rc = smblib_masked_write(chg, CMD_HVDCP_2_REG,
+					RESTART_AICL_BIT, RESTART_AICL_BIT);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+									rc);
+		break;
+	default:
+		smblib_dbg(chg, PR_PARALLEL, "unknown SMB chip %d\n",
+				chg->smb_version);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int smblib_dp_pulse(struct smb_charger *chg)
+{
+	int rc;
+
+	/* QC 3.0 increment */
+	rc = smblib_masked_write(chg, CMD_HVDCP_2_REG, SINGLE_INCREMENT_BIT,
+			SINGLE_INCREMENT_BIT);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+				rc);
+
+	return rc;
+}
+
+static int smblib_dm_pulse(struct smb_charger *chg)
+{
+	int rc;
+
+	/* QC 3.0 decrement */
+	rc = smblib_masked_write(chg, CMD_HVDCP_2_REG, SINGLE_DECREMENT_BIT,
+			SINGLE_DECREMENT_BIT);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+				rc);
+
+	return rc;
+}
+
+int smblib_dp_dm(struct smb_charger *chg, int val)
+{
+	int target_icl_ua, rc = 0;
+
+	switch (val) {
+	case POWER_SUPPLY_DP_DM_DP_PULSE:
+		rc = smblib_dp_pulse(chg);
+		if (!rc)
+			chg->pulse_cnt++;
+		smblib_dbg(chg, PR_PARALLEL, "DP_DM_DP_PULSE rc=%d cnt=%d\n",
+				rc, chg->pulse_cnt);
+		break;
+	case POWER_SUPPLY_DP_DM_DM_PULSE:
+		rc = smblib_dm_pulse(chg);
+		if (!rc && chg->pulse_cnt)
+			chg->pulse_cnt--;
+		smblib_dbg(chg, PR_PARALLEL, "DP_DM_DM_PULSE rc=%d cnt=%d\n",
+				rc, chg->pulse_cnt);
+		break;
+	case POWER_SUPPLY_DP_DM_ICL_DOWN:
+		chg->usb_icl_delta_ua -= 100000;
+		target_icl_ua = get_effective_result(chg->usb_icl_votable);
+		vote(chg->usb_icl_votable, SW_QC3_VOTER, true,
+				target_icl_ua + chg->usb_icl_delta_ua);
+		break;
+	case POWER_SUPPLY_DP_DM_ICL_UP:
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+/*******************
+ * DC PSY GETTERS *
+ *******************/
+
+int smblib_get_prop_dc_present(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, DCIN_BASE + INT_RT_STS_OFFSET, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read DCIN_RT_STS rc=%d\n", rc);
+		return rc;
+	}
+
+	val->intval = (bool)(stat & DCIN_PLUGIN_RT_STS_BIT);
+	return 0;
+}
+
+int smblib_get_prop_dc_online(struct smb_charger *chg,
+			       union power_supply_propval *val)
+{
+	int rc = 0;
+	u8 stat;
+
+	if (get_client_vote(chg->dc_suspend_votable, USER_VOTER)) {
+		val->intval = false;
+		return rc;
+	}
+
+	rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+			rc);
+		return rc;
+	}
+	smblib_dbg(chg, PR_REGISTER, "POWER_PATH_STATUS = 0x%02x\n",
+		   stat);
+
+	val->intval = (stat & USE_DCIN_BIT) &&
+		      (stat & VALID_INPUT_POWER_SOURCE_STS_BIT);
+
+	return rc;
+}
+
+int smblib_get_prop_dc_current_max(struct smb_charger *chg,
+				    union power_supply_propval *val)
+{
+	val->intval = get_effective_result_locked(chg->dc_icl_votable);
+	return 0;
+}
+
+/*******************
+ * DC PSY SETTERS *
+ * *****************/
+
+int smblib_set_prop_dc_current_max(struct smb_charger *chg,
+				    const union power_supply_propval *val)
+{
+	int rc;
+
+	rc = vote(chg->dc_icl_votable, USER_VOTER, true, val->intval);
+	return rc;
+}
+
+/*******************
+ * USB PSY GETTERS *
+ *******************/
+
+int smblib_get_prop_usb_present(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read USBIN_RT_STS rc=%d\n", rc);
+		return rc;
+	}
+
+	val->intval = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
+	return 0;
+}
+
+int smblib_get_prop_usb_online(struct smb_charger *chg,
+			       union power_supply_propval *val)
+{
+	int rc = 0;
+	u8 stat;
+
+	if (get_client_vote(chg->usb_icl_votable, USER_VOTER) == 0) {
+		val->intval = false;
+		return rc;
+	}
+
+	rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+			rc);
+		return rc;
+	}
+	smblib_dbg(chg, PR_REGISTER, "POWER_PATH_STATUS = 0x%02x\n",
+		   stat);
+
+	val->intval = (stat & USE_USBIN_BIT) &&
+		      (stat & VALID_INPUT_POWER_SOURCE_STS_BIT);
+	return rc;
+}
+
+int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
+				    union power_supply_propval *val)
+{
+	int rc = 0;
+
+	rc = smblib_get_prop_usb_present(chg, val);
+	if (rc < 0 || !val->intval)
+		return rc;
+
+	if (!chg->iio.usbin_v_chan ||
+		PTR_ERR(chg->iio.usbin_v_chan) == -EPROBE_DEFER)
+		chg->iio.usbin_v_chan = iio_channel_get(chg->dev, "usbin_v");
+
+	if (IS_ERR(chg->iio.usbin_v_chan))
+		return PTR_ERR(chg->iio.usbin_v_chan);
+
+	return iio_read_channel_processed(chg->iio.usbin_v_chan, &val->intval);
+}
+
+int smblib_get_prop_pd_current_max(struct smb_charger *chg,
+				    union power_supply_propval *val)
+{
+	val->intval = get_client_vote_locked(chg->usb_icl_votable, PD_VOTER);
+	return 0;
+}
+
+int smblib_get_prop_usb_current_max(struct smb_charger *chg,
+				    union power_supply_propval *val)
+{
+	val->intval = get_client_vote_locked(chg->usb_icl_votable,
+			USB_PSY_VOTER);
+	return 0;
+}
+
+int smblib_get_prop_usb_current_now(struct smb_charger *chg,
+				    union power_supply_propval *val)
+{
+	int rc = 0;
+
+	rc = smblib_get_prop_usb_present(chg, val);
+	if (rc < 0 || !val->intval)
+		return rc;
+
+	if (!chg->iio.usbin_i_chan ||
+		PTR_ERR(chg->iio.usbin_i_chan) == -EPROBE_DEFER)
+		chg->iio.usbin_i_chan = iio_channel_get(chg->dev, "usbin_i");
+
+	if (IS_ERR(chg->iio.usbin_i_chan))
+		return PTR_ERR(chg->iio.usbin_i_chan);
+
+	return iio_read_channel_processed(chg->iio.usbin_i_chan, &val->intval);
+}
+
+int smblib_get_prop_charger_temp(struct smb_charger *chg,
+				 union power_supply_propval *val)
+{
+	int rc;
+
+	if (!chg->iio.temp_chan ||
+		PTR_ERR(chg->iio.temp_chan) == -EPROBE_DEFER)
+		chg->iio.temp_chan = iio_channel_get(chg->dev, "charger_temp");
+
+	if (IS_ERR(chg->iio.temp_chan))
+		return PTR_ERR(chg->iio.temp_chan);
+
+	rc = iio_read_channel_processed(chg->iio.temp_chan, &val->intval);
+	val->intval /= 100;
+	return rc;
+}
+
+int smblib_get_prop_charger_temp_max(struct smb_charger *chg,
+				    union power_supply_propval *val)
+{
+	int rc;
+
+	if (!chg->iio.temp_max_chan ||
+		PTR_ERR(chg->iio.temp_max_chan) == -EPROBE_DEFER)
+		chg->iio.temp_max_chan = iio_channel_get(chg->dev,
+							 "charger_temp_max");
+	if (IS_ERR(chg->iio.temp_max_chan))
+		return PTR_ERR(chg->iio.temp_max_chan);
+
+	rc = iio_read_channel_processed(chg->iio.temp_max_chan, &val->intval);
+	val->intval /= 100;
+	return rc;
+}
+
+int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg,
+					 union power_supply_propval *val)
+{
+	int rc = 0;
+	u8 stat;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+		return rc;
+	}
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n",
+		   stat);
+
+	if (stat & CC_ATTACHED_BIT)
+		val->intval = (bool)(stat & CC_ORIENTATION_BIT) + 1;
+	else
+		val->intval = 0;
+
+	return rc;
+}
+
+static const char * const smblib_typec_mode_name[] = {
+	[POWER_SUPPLY_TYPEC_NONE]		  = "NONE",
+	[POWER_SUPPLY_TYPEC_SOURCE_DEFAULT]	  = "SOURCE_DEFAULT",
+	[POWER_SUPPLY_TYPEC_SOURCE_MEDIUM]	  = "SOURCE_MEDIUM",
+	[POWER_SUPPLY_TYPEC_SOURCE_HIGH]	  = "SOURCE_HIGH",
+	[POWER_SUPPLY_TYPEC_NON_COMPLIANT]	  = "NON_COMPLIANT",
+	[POWER_SUPPLY_TYPEC_SINK]		  = "SINK",
+	[POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE]   = "SINK_POWERED_CABLE",
+	[POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY] = "SINK_DEBUG_ACCESSORY",
+	[POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER]   = "SINK_AUDIO_ADAPTER",
+	[POWER_SUPPLY_TYPEC_POWERED_CABLE_ONLY]   = "POWERED_CABLE_ONLY",
+};
+
+static int smblib_get_prop_ufp_mode(struct smb_charger *chg)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_1_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_1 rc=%d\n", rc);
+		return POWER_SUPPLY_TYPEC_NONE;
+	}
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_1 = 0x%02x\n", stat);
+
+	switch (stat) {
+	case 0:
+		return POWER_SUPPLY_TYPEC_NONE;
+	case UFP_TYPEC_RDSTD_BIT:
+		return POWER_SUPPLY_TYPEC_SOURCE_DEFAULT;
+	case UFP_TYPEC_RD1P5_BIT:
+		return POWER_SUPPLY_TYPEC_SOURCE_MEDIUM;
+	case UFP_TYPEC_RD3P0_BIT:
+		return POWER_SUPPLY_TYPEC_SOURCE_HIGH;
+	default:
+		break;
+	}
+
+	return POWER_SUPPLY_TYPEC_NON_COMPLIANT;
+}
+
+static int smblib_get_prop_dfp_mode(struct smb_charger *chg)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_2_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_2 rc=%d\n", rc);
+		return POWER_SUPPLY_TYPEC_NONE;
+	}
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_2 = 0x%02x\n", stat);
+
+	switch (stat & DFP_TYPEC_MASK) {
+	case DFP_RA_RA_BIT:
+		return POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER;
+	case DFP_RD_RD_BIT:
+		return POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY;
+	case DFP_RD_RA_VCONN_BIT:
+		return POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE;
+	case DFP_RD_OPEN_BIT:
+		return POWER_SUPPLY_TYPEC_SINK;
+	case DFP_RA_OPEN_BIT:
+		return POWER_SUPPLY_TYPEC_POWERED_CABLE_ONLY;
+	default:
+		break;
+	}
+
+	return POWER_SUPPLY_TYPEC_NONE;
+}
+
+int smblib_get_prop_typec_mode(struct smb_charger *chg,
+			       union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+		val->intval = POWER_SUPPLY_TYPEC_NONE;
+		return rc;
+	}
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n", stat);
+
+	if (!(stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT)) {
+		val->intval = POWER_SUPPLY_TYPEC_NONE;
+		return rc;
+	}
+
+	if (stat & UFP_DFP_MODE_STATUS_BIT)
+		val->intval = smblib_get_prop_dfp_mode(chg);
+	else
+		val->intval = smblib_get_prop_ufp_mode(chg);
+
+	return rc;
+}
+
+int smblib_get_prop_typec_power_role(struct smb_charger *chg,
+				     union power_supply_propval *val)
+{
+	int rc = 0;
+	u8 ctrl;
+
+	rc = smblib_read(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, &ctrl);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
+			rc);
+		return rc;
+	}
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_INTRPT_ENB_SOFTWARE_CTRL = 0x%02x\n",
+		   ctrl);
+
+	if (ctrl & TYPEC_DISABLE_CMD_BIT) {
+		val->intval = POWER_SUPPLY_TYPEC_PR_NONE;
+		return rc;
+	}
+
+	switch (ctrl & (DFP_EN_CMD_BIT | UFP_EN_CMD_BIT)) {
+	case 0:
+		val->intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+		break;
+	case DFP_EN_CMD_BIT:
+		val->intval = POWER_SUPPLY_TYPEC_PR_SOURCE;
+		break;
+	case UFP_EN_CMD_BIT:
+		val->intval = POWER_SUPPLY_TYPEC_PR_SINK;
+		break;
+	default:
+		val->intval = POWER_SUPPLY_TYPEC_PR_NONE;
+		smblib_err(chg, "unsupported power role 0x%02lx\n",
+			ctrl & (DFP_EN_CMD_BIT | UFP_EN_CMD_BIT));
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+int smblib_get_prop_pd_allowed(struct smb_charger *chg,
+			       union power_supply_propval *val)
+{
+	val->intval = get_effective_result(chg->pd_allowed_votable);
+	return 0;
+}
+
+int smblib_get_prop_input_current_settled(struct smb_charger *chg,
+					  union power_supply_propval *val)
+{
+	return smblib_get_charge_param(chg, &chg->param.icl_stat, &val->intval);
+}
+
+#define HVDCP3_STEP_UV	200000
+int smblib_get_prop_input_voltage_settled(struct smb_charger *chg,
+						union power_supply_propval *val)
+{
+	const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
+	int rc, pulses;
+	u8 stat;
+
+	val->intval = MICRO_5V;
+	if (apsd_result == NULL) {
+		smblib_err(chg, "APSD result is NULL\n");
+		return 0;
+	}
+
+	switch (apsd_result->pst) {
+	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
+		rc = smblib_read(chg, QC_PULSE_COUNT_STATUS_REG, &stat);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't read QC_PULSE_COUNT rc=%d\n", rc);
+			return 0;
+		}
+		pulses = (stat & QC_PULSE_COUNT_MASK);
+		val->intval = MICRO_5V + HVDCP3_STEP_UV * pulses;
+		break;
+	default:
+		val->intval = MICRO_5V;
+		break;
+	}
+
+	return 0;
+}
+
+int smblib_get_prop_pd_in_hard_reset(struct smb_charger *chg,
+			       union power_supply_propval *val)
+{
+	int rc;
+	u8 ctrl;
+
+	rc = smblib_read(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, &ctrl);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG rc=%d\n",
+			rc);
+		return rc;
+	}
+	val->intval = ctrl & EXIT_SNK_BASED_ON_CC_BIT;
+	return 0;
+}
+
+int smblib_get_pe_start(struct smb_charger *chg,
+			       union power_supply_propval *val)
+{
+	/*
+	 * hvdcp timeout voter is the last one to allow pd. Use its vote
+	 * to indicate start of pe engine
+	 */
+	val->intval
+		= !get_client_vote_locked(chg->pd_disallowed_votable_indirect,
+			HVDCP_TIMEOUT_VOTER);
+	return 0;
+}
+
+int smblib_get_prop_die_health(struct smb_charger *chg,
+						union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, TEMP_RANGE_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TEMP_RANGE_STATUS_REG rc=%d\n",
+									rc);
+		return rc;
+	}
+
+	/* TEMP_RANGE bits are mutually exclusive */
+	switch (stat & TEMP_RANGE_MASK) {
+	case TEMP_BELOW_RANGE_BIT:
+		val->intval = POWER_SUPPLY_HEALTH_COOL;
+		break;
+	case TEMP_WITHIN_RANGE_BIT:
+		val->intval = POWER_SUPPLY_HEALTH_WARM;
+		break;
+	case TEMP_ABOVE_RANGE_BIT:
+		val->intval = POWER_SUPPLY_HEALTH_HOT;
+		break;
+	case ALERT_LEVEL_BIT:
+		val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+		break;
+	default:
+		val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+	}
+
+	return 0;
+}
+
+/*******************
+ * USB PSY SETTERS *
+ * *****************/
+
+int smblib_set_prop_pd_current_max(struct smb_charger *chg,
+				    const union power_supply_propval *val)
+{
+	int rc;
+
+	if (chg->pd_active)
+		rc = vote(chg->usb_icl_votable, PD_VOTER, true, val->intval);
+	else
+		rc = -EPERM;
+
+	return rc;
+}
+
+int smblib_set_prop_usb_current_max(struct smb_charger *chg,
+				    const union power_supply_propval *val)
+{
+	int rc = 0;
+
+	if (!chg->pd_active) {
+		rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+				true, val->intval);
+	} else if (chg->system_suspend_supported) {
+		if (val->intval <= USBIN_25MA)
+			rc = vote(chg->usb_icl_votable,
+				PD_SUSPEND_SUPPORTED_VOTER, true, val->intval);
+		else
+			rc = vote(chg->usb_icl_votable,
+				PD_SUSPEND_SUPPORTED_VOTER, false, 0);
+	}
+	return rc;
+}
+
+int smblib_set_prop_boost_current(struct smb_charger *chg,
+				    const union power_supply_propval *val)
+{
+	int rc = 0;
+
+	rc = smblib_set_charge_param(chg, &chg->param.freq_boost,
+				val->intval <= chg->boost_threshold_ua ?
+				chg->chg_freq.freq_below_otg_threshold :
+				chg->chg_freq.freq_above_otg_threshold);
+	if (rc < 0) {
+		dev_err(chg->dev, "Error in setting freq_boost rc=%d\n", rc);
+		return rc;
+	}
+
+	chg->boost_current_ua = val->intval;
+	return rc;
+}
+
+int smblib_set_prop_typec_power_role(struct smb_charger *chg,
+				     const union power_supply_propval *val)
+{
+	int rc = 0;
+	u8 power_role;
+
+	switch (val->intval) {
+	case POWER_SUPPLY_TYPEC_PR_NONE:
+		power_role = TYPEC_DISABLE_CMD_BIT;
+		break;
+	case POWER_SUPPLY_TYPEC_PR_DUAL:
+		power_role = 0;
+		break;
+	case POWER_SUPPLY_TYPEC_PR_SINK:
+		power_role = UFP_EN_CMD_BIT;
+		break;
+	case POWER_SUPPLY_TYPEC_PR_SOURCE:
+		power_role = DFP_EN_CMD_BIT;
+		break;
+	default:
+		smblib_err(chg, "power role %d not supported\n", val->intval);
+		return -EINVAL;
+	}
+
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 TYPEC_POWER_ROLE_CMD_MASK, power_role);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't write 0x%02x to TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
+			power_role, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int smblib_set_prop_usb_voltage_min(struct smb_charger *chg,
+				    const union power_supply_propval *val)
+{
+	int rc, min_uv;
+
+	min_uv = min(val->intval, chg->voltage_max_uv);
+	rc = smblib_set_usb_pd_allowed_voltage(chg, min_uv,
+					       chg->voltage_max_uv);
+	if (rc < 0) {
+		smblib_err(chg, "invalid max voltage %duV rc=%d\n",
+			val->intval, rc);
+		return rc;
+	}
+
+	if (chg->mode == PARALLEL_MASTER)
+		vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER,
+		     min_uv > MICRO_5V, 0);
+
+	chg->voltage_min_uv = min_uv;
+	return rc;
+}
+
+int smblib_set_prop_usb_voltage_max(struct smb_charger *chg,
+				    const union power_supply_propval *val)
+{
+	int rc, max_uv;
+
+	max_uv = max(val->intval, chg->voltage_min_uv);
+	rc = smblib_set_usb_pd_allowed_voltage(chg, chg->voltage_min_uv,
+					       max_uv);
+	if (rc < 0) {
+		smblib_err(chg, "invalid min voltage %duV rc=%d\n",
+			val->intval, rc);
+		return rc;
+	}
+
+	chg->voltage_max_uv = max_uv;
+	rc = smblib_rerun_aicl(chg);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't re-run AICL rc=%d\n", rc);
+
+	return rc;
+}
+
+int smblib_set_prop_pd_active(struct smb_charger *chg,
+			      const union power_supply_propval *val)
+{
+	int rc;
+	u8 stat = 0;
+	bool cc_debounced;
+	bool orientation;
+	bool pd_active = val->intval;
+
+	if (!get_effective_result(chg->pd_allowed_votable)) {
+		smblib_err(chg, "PD is not allowed\n");
+		return -EINVAL;
+	}
+
+	vote(chg->apsd_disable_votable, PD_VOTER, pd_active, 0);
+	vote(chg->pd_allowed_votable, PD_VOTER, pd_active, 0);
+
+	/*
+	 * VCONN_EN_ORIENTATION_BIT controls whether to use CC1 or CC2 line
+	 * when TYPEC_SPARE_CFG_BIT (CC pin selection s/w override) is set
+	 * or when VCONN_EN_VALUE_BIT is set.
+	 */
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+		return rc;
+	}
+
+	if (pd_active) {
+		orientation = stat & CC_ORIENTATION_BIT;
+		rc = smblib_masked_write(chg,
+				TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				VCONN_EN_ORIENTATION_BIT,
+				orientation ? 0 : VCONN_EN_ORIENTATION_BIT);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't enable vconn on CC line rc=%d\n", rc);
+			return rc;
+		}
+		/*
+		 * Enforce 500mA for PD until the real vote comes in later.
+		 * It is guaranteed that pd_active is set prior to
+		 * pd_current_max
+		 */
+		rc = vote(chg->usb_icl_votable, PD_VOTER, true, USBIN_500MA);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't vote for USB ICL rc=%d\n",
+					rc);
+			return rc;
+		}
+
+		/* clear USB ICL vote for DCP_VOTER */
+		rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
+		if (rc < 0)
+			smblib_err(chg,
+				"Couldn't un-vote DCP from USB ICL rc=%d\n",
+				rc);
+
+		/* clear USB ICL vote for PL_USBIN_USBIN_VOTER */
+		rc = vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
+		if (rc < 0)
+			smblib_err(chg,
+					"Couldn't un-vote PL_USBIN_USBIN from USB ICL rc=%d\n",
+					rc);
+
+		/* remove USB_PSY_VOTER */
+		rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't unvote USB_PSY rc=%d\n", rc);
+			return rc;
+		}
+
+		/* pd active set, parallel charger can be enabled now */
+		rc = vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER,
+				false, 0);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't unvote PL_DELAY_HVDCP_VOTER rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	/* CC pin selection s/w override in PD session; h/w otherwise. */
+	rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
+				 TYPEC_SPARE_CFG_BIT,
+				 pd_active ? TYPEC_SPARE_CFG_BIT : 0);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't change cc_out ctrl to %s rc=%d\n",
+			pd_active ? "SW" : "HW", rc);
+		return rc;
+	}
+
+	cc_debounced = (bool)(stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
+	if (!pd_active && cc_debounced)
+		try_rerun_apsd_for_hvdcp(chg);
+
+	chg->pd_active = pd_active;
+	smblib_update_usb_type(chg);
+	power_supply_changed(chg->usb_psy);
+
+	return rc;
+}
+
+int smblib_set_prop_ship_mode(struct smb_charger *chg,
+				const union power_supply_propval *val)
+{
+	int rc;
+
+	smblib_dbg(chg, PR_MISC, "Set ship mode: %d!!\n", !!val->intval);
+
+	rc = smblib_masked_write(chg, SHIP_MODE_REG, SHIP_MODE_EN_BIT,
+			!!val->intval ? SHIP_MODE_EN_BIT : 0);
+	if (rc < 0)
+		dev_err(chg->dev, "Couldn't %s ship mode, rc=%d\n",
+				!!val->intval ? "enable" : "disable", rc);
+
+	return rc;
+}
+
+int smblib_reg_block_update(struct smb_charger *chg,
+				struct reg_info *entry)
+{
+	int rc = 0;
+
+	while (entry && entry->reg) {
+		rc = smblib_read(chg, entry->reg, &entry->bak);
+		if (rc < 0) {
+			dev_err(chg->dev, "Error in reading %s rc=%d\n",
+				entry->desc, rc);
+			break;
+		}
+		entry->bak &= entry->mask;
+
+		rc = smblib_masked_write(chg, entry->reg,
+					 entry->mask, entry->val);
+		if (rc < 0) {
+			dev_err(chg->dev, "Error in writing %s rc=%d\n",
+				entry->desc, rc);
+			break;
+		}
+		entry++;
+	}
+
+	return rc;
+}
+
+int smblib_reg_block_restore(struct smb_charger *chg,
+				struct reg_info *entry)
+{
+	int rc = 0;
+
+	while (entry && entry->reg) {
+		rc = smblib_masked_write(chg, entry->reg,
+					 entry->mask, entry->bak);
+		if (rc < 0) {
+			dev_err(chg->dev, "Error in writing %s rc=%d\n",
+				entry->desc, rc);
+			break;
+		}
+		entry++;
+	}
+
+	return rc;
+}
+
+static struct reg_info cc2_detach_settings[] = {
+	{
+		.reg	= TYPE_C_CFG_REG,
+		.mask	= APSD_START_ON_CC_BIT,
+		.val	= 0,
+		.desc	= "TYPE_C_CFG_REG",
+	},
+	{
+		.reg	= TYPE_C_CFG_2_REG,
+		.mask	= TYPE_C_UFP_MODE_BIT | EN_TRY_SOURCE_MODE_BIT,
+		.val	= TYPE_C_UFP_MODE_BIT,
+		.desc	= "TYPE_C_CFG_2_REG",
+	},
+	{
+		.reg	= TYPE_C_CFG_3_REG,
+		.mask	= EN_TRYSINK_MODE_BIT,
+		.val	= 0,
+		.desc	= "TYPE_C_CFG_3_REG",
+	},
+	{
+		.reg	= TAPER_TIMER_SEL_CFG_REG,
+		.mask	= TYPEC_SPARE_CFG_BIT,
+		.val	= TYPEC_SPARE_CFG_BIT,
+		.desc	= "TAPER_TIMER_SEL_CFG_REG",
+	},
+	{
+		.reg	= TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+		.mask	= VCONN_EN_ORIENTATION_BIT,
+		.val	= 0,
+		.desc	= "TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG",
+	},
+	{
+		.reg	= MISC_CFG_REG,
+		.mask	= TCC_DEBOUNCE_20MS_BIT,
+		.val	= TCC_DEBOUNCE_20MS_BIT,
+		.desc	= "Tccdebounce time"
+	},
+	{
+	},
+};
+
+static int smblib_cc2_sink_removal_enter(struct smb_charger *chg)
+{
+	int rc = 0;
+	union power_supply_propval cc2_val = {0, };
+
+	if ((chg->wa_flags & TYPEC_CC2_REMOVAL_WA_BIT) == 0)
+		return rc;
+
+	if (chg->cc2_sink_detach_flag != CC2_SINK_NONE)
+		return rc;
+
+	rc = smblib_get_prop_typec_cc_orientation(chg, &cc2_val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get cc orientation rc=%d\n", rc);
+		return rc;
+	}
+	if (cc2_val.intval == 1)
+		return rc;
+
+	rc = smblib_get_prop_typec_mode(chg, &cc2_val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get prop typec mode rc=%d\n", rc);
+		return rc;
+	}
+
+	switch (cc2_val.intval) {
+	case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+		smblib_reg_block_update(chg, cc2_detach_settings);
+		chg->cc2_sink_detach_flag = CC2_SINK_STD;
+		schedule_work(&chg->rdstd_cc2_detach_work);
+		break;
+	case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+	case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+		chg->cc2_sink_detach_flag = CC2_SINK_MEDIUM_HIGH;
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+static int smblib_cc2_sink_removal_exit(struct smb_charger *chg)
+{
+	int rc = 0;
+
+	if ((chg->wa_flags & TYPEC_CC2_REMOVAL_WA_BIT) == 0)
+		return rc;
+
+	if (chg->cc2_sink_detach_flag == CC2_SINK_STD) {
+		cancel_work_sync(&chg->rdstd_cc2_detach_work);
+		smblib_reg_block_restore(chg, cc2_detach_settings);
+	}
+
+	chg->cc2_sink_detach_flag = CC2_SINK_NONE;
+
+	return rc;
+}
+
+int smblib_set_prop_pd_in_hard_reset(struct smb_charger *chg,
+				const union power_supply_propval *val)
+{
+	int rc;
+
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 EXIT_SNK_BASED_ON_CC_BIT,
+				 (val->intval) ? EXIT_SNK_BASED_ON_CC_BIT : 0);
+	if (rc < 0) {
+		smblib_err(chg, "Could not set EXIT_SNK_BASED_ON_CC rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, val->intval, 0);
+
+	if (val->intval)
+		rc = smblib_cc2_sink_removal_enter(chg);
+	else
+		rc = smblib_cc2_sink_removal_exit(chg);
+
+	if (rc < 0) {
+		smblib_err(chg, "Could not detect cc2 removal rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+/************************
+ * USB MAIN PSY GETTERS *
+ ************************/
+int smblib_get_prop_fcc_delta(struct smb_charger *chg,
+			       union power_supply_propval *val)
+{
+	int rc, jeita_cc_delta_ua, step_cc_delta_ua, hw_cc_delta_ua = 0;
+
+	rc = smblib_get_step_cc_delta(chg, &step_cc_delta_ua);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get step cc delta rc=%d\n", rc);
+		step_cc_delta_ua = 0;
+	} else {
+		hw_cc_delta_ua = step_cc_delta_ua;
+	}
+
+	rc = smblib_get_jeita_cc_delta(chg, &jeita_cc_delta_ua);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get jeita cc delta rc=%d\n", rc);
+		jeita_cc_delta_ua = 0;
+	} else if (jeita_cc_delta_ua < 0) {
+		/* HW will take the min between JEITA and step charge */
+		hw_cc_delta_ua = min(hw_cc_delta_ua, jeita_cc_delta_ua);
+	}
+
+	val->intval = hw_cc_delta_ua;
+	return 0;
+}
+
+/************************
+ * USB MAIN PSY SETTERS *
+ ************************/
+
+#define SDP_CURRENT_MA			500000
+#define CDP_CURRENT_MA			1500000
+#define DCP_CURRENT_MA			1500000
+#define HVDCP_CURRENT_MA		3000000
+#define TYPEC_DEFAULT_CURRENT_MA	900000
+#define TYPEC_MEDIUM_CURRENT_MA		1500000
+#define TYPEC_HIGH_CURRENT_MA		3000000
+static int smblib_get_charge_current(struct smb_charger *chg,
+				int *total_current_ua)
+{
+	const struct apsd_result *apsd_result = smblib_update_usb_type(chg);
+	union power_supply_propval val = {0, };
+	int rc, typec_source_rd, current_ua;
+	bool non_compliant;
+	u8 stat5;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat5);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_5 rc=%d\n", rc);
+		return rc;
+	}
+	non_compliant = stat5 & TYPEC_NONCOMP_LEGACY_CABLE_STATUS_BIT;
+
+	/* get settled ICL */
+	rc = smblib_get_prop_input_current_settled(chg, &val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get settled ICL rc=%d\n", rc);
+		return rc;
+	}
+
+	typec_source_rd = smblib_get_prop_ufp_mode(chg);
+
+	/* QC 2.0/3.0 adapter */
+	if (apsd_result->bit & (QC_3P0_BIT | QC_2P0_BIT)) {
+		*total_current_ua = HVDCP_CURRENT_MA;
+		return 0;
+	}
+
+	if (non_compliant) {
+		switch (apsd_result->bit) {
+		case CDP_CHARGER_BIT:
+			current_ua = CDP_CURRENT_MA;
+			break;
+		case DCP_CHARGER_BIT:
+		case OCP_CHARGER_BIT:
+		case FLOAT_CHARGER_BIT:
+			current_ua = DCP_CURRENT_MA;
+			break;
+		default:
+			current_ua = 0;
+			break;
+		}
+
+		*total_current_ua = max(current_ua, val.intval);
+		return 0;
+	}
+
+	switch (typec_source_rd) {
+	case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+		switch (apsd_result->bit) {
+		case CDP_CHARGER_BIT:
+			current_ua = CDP_CURRENT_MA;
+			break;
+		case DCP_CHARGER_BIT:
+		case OCP_CHARGER_BIT:
+		case FLOAT_CHARGER_BIT:
+			current_ua = chg->default_icl_ua;
+			break;
+		default:
+			current_ua = 0;
+			break;
+		}
+		break;
+	case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+		current_ua = TYPEC_MEDIUM_CURRENT_MA;
+		break;
+	case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+		current_ua = TYPEC_HIGH_CURRENT_MA;
+		break;
+	case POWER_SUPPLY_TYPEC_NON_COMPLIANT:
+	case POWER_SUPPLY_TYPEC_NONE:
+	default:
+		current_ua = 0;
+		break;
+	}
+
+	*total_current_ua = max(current_ua, val.intval);
+	return 0;
+}
+
+int smblib_set_icl_reduction(struct smb_charger *chg, int reduction_ua)
+{
+	int current_ua, rc;
+
+	if (reduction_ua == 0) {
+		vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
+	} else {
+		/*
+		 * No usb_icl voter means we are defaulting to hw chosen
+		 * max limit. We need a vote from s/w to enforce the reduction.
+		 */
+		if (get_effective_result(chg->usb_icl_votable) == -EINVAL) {
+			rc = smblib_get_charge_current(chg, &current_ua);
+			if (rc < 0) {
+				pr_err("Failed to get ICL rc=%d\n", rc);
+				return rc;
+			}
+			vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, true,
+					current_ua);
+		}
+	}
+
+	chg->icl_reduction_ua = reduction_ua;
+
+	return rerun_election(chg->usb_icl_votable);
+}
+
+/************************
+ * PARALLEL PSY GETTERS *
+ ************************/
+
+int smblib_get_prop_slave_current_now(struct smb_charger *chg,
+				      union power_supply_propval *pval)
+{
+	if (IS_ERR_OR_NULL(chg->iio.batt_i_chan))
+		chg->iio.batt_i_chan = iio_channel_get(chg->dev, "batt_i");
+
+	if (IS_ERR(chg->iio.batt_i_chan))
+		return PTR_ERR(chg->iio.batt_i_chan);
+
+	return iio_read_channel_processed(chg->iio.batt_i_chan, &pval->intval);
+}
+
+/**********************
+ * INTERRUPT HANDLERS *
+ **********************/
+
+irqreturn_t smblib_handle_debug(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_otg_overcurrent(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, OTG_BASE + INT_RT_STS_OFFSET, &stat);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't read OTG_INT_RT_STS rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	if (stat & OTG_OVERCURRENT_RT_STS_BIT)
+		schedule_work(&chg->otg_oc_work);
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_chg_state_change(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	u8 stat;
+	int rc;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+			rc);
+		return IRQ_HANDLED;
+	}
+
+	stat = stat & BATTERY_CHARGER_STATUS_MASK;
+	power_supply_changed(chg->batt_psy);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_step_chg_state_change(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+	if (chg->step_chg_enabled)
+		rerun_election(chg->fcc_votable);
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_step_chg_soc_update_fail(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+	if (chg->step_chg_enabled)
+		rerun_election(chg->fcc_votable);
+
+	return IRQ_HANDLED;
+}
+
+#define STEP_SOC_REQ_MS	3000
+irqreturn_t smblib_handle_step_chg_soc_update_request(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	int rc;
+	union power_supply_propval pval = {0, };
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+	if (!chg->bms_psy) {
+		schedule_delayed_work(&chg->step_soc_req_work,
+				      msecs_to_jiffies(STEP_SOC_REQ_MS));
+		return IRQ_HANDLED;
+	}
+
+	rc = smblib_get_prop_batt_capacity(chg, &pval);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't get batt capacity rc=%d\n", rc);
+	else
+		step_charge_soc_update(chg, pval.intval);
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_batt_temp_changed(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	rerun_election(chg->fcc_votable);
+	power_supply_changed(chg->batt_psy);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_batt_psy_changed(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+	power_supply_changed(chg->batt_psy);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_usb_psy_changed(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+	power_supply_changed(chg->usb_psy);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_usbin_uv(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	struct storm_watch *wdata;
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+	if (!chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data)
+		return IRQ_HANDLED;
+
+	wdata = &chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data->storm_data;
+	reset_storm_count(wdata);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	int rc;
+	u8 stat;
+	bool vbus_rising;
+
+	rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't read USB_INT_RT_STS rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	vbus_rising = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT);
+	smblib_set_opt_freq_buck(chg,
+		vbus_rising ? chg->chg_freq.freq_5V :
+			chg->chg_freq.freq_removal);
+
+	/* fetch the DPDM regulator */
+	if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
+						"dpdm-supply", NULL)) {
+		chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
+		if (IS_ERR(chg->dpdm_reg)) {
+			smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n",
+				PTR_ERR(chg->dpdm_reg));
+			chg->dpdm_reg = NULL;
+		}
+	}
+
+	if (vbus_rising) {
+		if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
+			smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
+			rc = regulator_enable(chg->dpdm_reg);
+			if (rc < 0)
+				smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
+					rc);
+		}
+	} else {
+		if (chg->wa_flags & BOOST_BACK_WA)
+			vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0);
+
+		if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
+			smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
+			rc = regulator_disable(chg->dpdm_reg);
+			if (rc < 0)
+				smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
+					rc);
+		}
+
+		if (chg->micro_usb_mode) {
+			smblib_update_usb_type(chg);
+			extcon_set_cable_state_(chg->extcon, EXTCON_USB, false);
+			smblib_uusb_removal(chg);
+		}
+	}
+
+	power_supply_changed(chg->usb_psy);
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s %s\n",
+		irq_data->name, vbus_rising ? "attached" : "detached");
+	return IRQ_HANDLED;
+}
+
+#define USB_WEAK_INPUT_UA	1400000
+#define ICL_CHANGE_DELAY_MS	1000
+irqreturn_t smblib_handle_icl_change(int irq, void *data)
+{
+	u8 stat;
+	int rc, settled_ua, delay = ICL_CHANGE_DELAY_MS;
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	if (chg->mode == PARALLEL_MASTER) {
+		rc = smblib_read(chg, AICL_STATUS_REG, &stat);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't read AICL_STATUS rc=%d\n",
+					rc);
+			return IRQ_HANDLED;
+		}
+
+		rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
+				&settled_ua);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't get ICL status rc=%d\n", rc);
+			return IRQ_HANDLED;
+		}
+
+		/* If AICL settled then schedule work now */
+		if ((settled_ua == get_effective_result(chg->usb_icl_votable))
+				|| (stat & AICL_DONE_BIT))
+			delay = 0;
+
+		schedule_delayed_work(&chg->icl_change_work,
+						msecs_to_jiffies(delay));
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void smblib_handle_slow_plugin_timeout(struct smb_charger *chg,
+					      bool rising)
+{
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: slow-plugin-timeout %s\n",
+		   rising ? "rising" : "falling");
+}
+
+static void smblib_handle_sdp_enumeration_done(struct smb_charger *chg,
+					       bool rising)
+{
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: sdp-enumeration-done %s\n",
+		   rising ? "rising" : "falling");
+}
+
+#define QC3_PULSES_FOR_6V	5
+#define QC3_PULSES_FOR_9V	20
+#define QC3_PULSES_FOR_12V	35
+static void smblib_hvdcp_adaptive_voltage_change(struct smb_charger *chg)
+{
+	int rc;
+	u8 stat;
+	int pulses;
+
+	power_supply_changed(chg->usb_main_psy);
+	if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB_HVDCP) {
+		rc = smblib_read(chg, QC_CHANGE_STATUS_REG, &stat);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't read QC_CHANGE_STATUS rc=%d\n", rc);
+			return;
+		}
+
+		switch (stat & QC_2P0_STATUS_MASK) {
+		case QC_5V_BIT:
+			smblib_set_opt_freq_buck(chg,
+					chg->chg_freq.freq_5V);
+			break;
+		case QC_9V_BIT:
+			smblib_set_opt_freq_buck(chg,
+					chg->chg_freq.freq_9V);
+			break;
+		case QC_12V_BIT:
+			smblib_set_opt_freq_buck(chg,
+					chg->chg_freq.freq_12V);
+			break;
+		default:
+			smblib_set_opt_freq_buck(chg,
+					chg->chg_freq.freq_removal);
+			break;
+		}
+	}
+
+	if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB_HVDCP_3) {
+		rc = smblib_read(chg, QC_PULSE_COUNT_STATUS_REG, &stat);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't read QC_PULSE_COUNT rc=%d\n", rc);
+			return;
+		}
+		pulses = (stat & QC_PULSE_COUNT_MASK);
+
+		if (pulses < QC3_PULSES_FOR_6V)
+			smblib_set_opt_freq_buck(chg,
+				chg->chg_freq.freq_5V);
+		else if (pulses < QC3_PULSES_FOR_9V)
+			smblib_set_opt_freq_buck(chg,
+				chg->chg_freq.freq_6V_8V);
+		else if (pulses < QC3_PULSES_FOR_12V)
+			smblib_set_opt_freq_buck(chg,
+				chg->chg_freq.freq_9V);
+		else
+			smblib_set_opt_freq_buck(chg,
+				chg->chg_freq.freq_12V);
+	}
+}
+
+/* triggers when HVDCP 3.0 authentication has finished */
+static void smblib_handle_hvdcp_3p0_auth_done(struct smb_charger *chg,
+					      bool rising)
+{
+	const struct apsd_result *apsd_result;
+	int rc;
+
+	if (!rising)
+		return;
+
+	if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+		/*
+		 * Disable AUTH_IRQ_EN_CFG_BIT to receive adapter voltage
+		 * change interrupt.
+		 */
+		rc = smblib_masked_write(chg,
+				USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+				AUTH_IRQ_EN_CFG_BIT, 0);
+		if (rc < 0)
+			smblib_err(chg,
+				"Couldn't enable QC auth setting rc=%d\n", rc);
+	}
+
+	if (chg->mode == PARALLEL_MASTER)
+		vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, true, 0);
+
+	/* QC authentication done, parallel charger can be enabled now */
+	vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, false, 0);
+
+	/* the APSD done handler will set the USB supply type */
+	apsd_result = smblib_get_apsd_result(chg);
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: hvdcp-3p0-auth-done rising; %s detected\n",
+		   apsd_result->name);
+}
+
+static void smblib_handle_hvdcp_check_timeout(struct smb_charger *chg,
+					      bool rising, bool qc_charger)
+{
+	const struct apsd_result *apsd_result = smblib_update_usb_type(chg);
+
+	/* Hold off PD only until hvdcp 2.0 detection timeout */
+	if (rising) {
+		vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+								false, 0);
+		if (get_effective_result(chg->pd_disallowed_votable_indirect))
+			/* could be a legacy cable, try doing hvdcp */
+			try_rerun_apsd_for_hvdcp(chg);
+
+		/*
+		 * HVDCP detection timeout done
+		 * If adapter is not QC2.0/QC3.0 - it is a plain old DCP.
+		 */
+		if (!qc_charger && (apsd_result->bit & DCP_CHARGER_BIT))
+			/* enforce DCP ICL if specified */
+			vote(chg->usb_icl_votable, DCP_VOTER,
+				chg->dcp_icl_ua != -EINVAL, chg->dcp_icl_ua);
+		/*
+		 * If adapter is not QC2.0/QC3.0 remove vote for parallel
+		 * disable.
+		 * Otherwise if adapter is QC2.0/QC3.0 wait for authentication
+		 * to complete.
+		 */
+		if (!qc_charger)
+			vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER,
+					false, 0);
+	}
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: smblib_handle_hvdcp_check_timeout %s\n",
+		   rising ? "rising" : "falling");
+}
+
+/* triggers when HVDCP is detected */
+static void smblib_handle_hvdcp_detect_done(struct smb_charger *chg,
+					    bool rising)
+{
+	if (!rising)
+		return;
+
+	/* the APSD done handler will set the USB supply type */
+	cancel_delayed_work_sync(&chg->hvdcp_detect_work);
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: hvdcp-detect-done %s\n",
+		   rising ? "rising" : "falling");
+}
+
+#define HVDCP_DET_MS 2500
+static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising)
+{
+	const struct apsd_result *apsd_result;
+
+	if (!rising)
+		return;
+
+	apsd_result = smblib_update_usb_type(chg);
+	switch (apsd_result->bit) {
+	case SDP_CHARGER_BIT:
+	case CDP_CHARGER_BIT:
+		if (chg->micro_usb_mode)
+			extcon_set_cable_state_(chg->extcon, EXTCON_USB,
+					true);
+		/* if not DCP then no hvdcp timeout happens. Enable pd here */
+		vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+				false, 0);
+		break;
+	case OCP_CHARGER_BIT:
+	case FLOAT_CHARGER_BIT:
+		/*
+		 * if not DCP then no hvdcp timeout happens. Enable
+		 * pd/parallel here.
+		 */
+		vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+				false, 0);
+		vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, false, 0);
+		break;
+	case DCP_CHARGER_BIT:
+		if (chg->wa_flags & QC_CHARGER_DETECTION_WA_BIT)
+			schedule_delayed_work(&chg->hvdcp_detect_work,
+					      msecs_to_jiffies(HVDCP_DET_MS));
+		break;
+	default:
+		break;
+	}
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: apsd-done rising; %s detected\n",
+		   apsd_result->name);
+}
+
+irqreturn_t smblib_handle_usb_source_change(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	int rc = 0;
+	u8 stat;
+
+	rc = smblib_read(chg, APSD_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+	smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", stat);
+
+	smblib_handle_apsd_done(chg,
+		(bool)(stat & APSD_DTC_STATUS_DONE_BIT));
+
+	smblib_handle_hvdcp_detect_done(chg,
+		(bool)(stat & QC_CHARGER_BIT));
+
+	smblib_handle_hvdcp_check_timeout(chg,
+		(bool)(stat & HVDCP_CHECK_TIMEOUT_BIT),
+		(bool)(stat & QC_CHARGER_BIT));
+
+	smblib_handle_hvdcp_3p0_auth_done(chg,
+		(bool)(stat & QC_AUTH_DONE_STATUS_BIT));
+
+	smblib_handle_sdp_enumeration_done(chg,
+		(bool)(stat & ENUMERATION_DONE_BIT));
+
+	smblib_handle_slow_plugin_timeout(chg,
+		(bool)(stat & SLOW_PLUGIN_TIMEOUT_BIT));
+
+	smblib_hvdcp_adaptive_voltage_change(chg);
+
+	power_supply_changed(chg->usb_psy);
+
+	rc = smblib_read(chg, APSD_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+	smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", stat);
+
+	return IRQ_HANDLED;
+}
+
+static void typec_source_removal(struct smb_charger *chg)
+{
+	int rc;
+
+	/* reset both usbin current and voltage votes */
+	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+	vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
+
+	cancel_delayed_work_sync(&chg->hvdcp_detect_work);
+
+	if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+		/* re-enable AUTH_IRQ_EN_CFG_BIT */
+		rc = smblib_masked_write(chg,
+				USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+				AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
+		if (rc < 0)
+			smblib_err(chg,
+				"Couldn't enable QC auth setting rc=%d\n", rc);
+	}
+
+	/* reconfigure allowed voltage for HVDCP */
+	rc = smblib_set_adapter_allowance(chg,
+			USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n",
+			rc);
+
+	chg->voltage_min_uv = MICRO_5V;
+	chg->voltage_max_uv = MICRO_5V;
+
+	/* clear USB ICL vote for PD_VOTER */
+	rc = vote(chg->usb_icl_votable, PD_VOTER, false, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't un-vote PD from USB ICL rc=%d\n", rc);
+
+	/* clear USB ICL vote for USB_PSY_VOTER */
+	rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
+	if (rc < 0)
+		smblib_err(chg,
+			"Couldn't un-vote USB_PSY from USB ICL rc=%d\n", rc);
+
+	/* clear USB ICL vote for DCP_VOTER */
+	rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
+	if (rc < 0)
+		smblib_err(chg,
+			"Couldn't un-vote DCP from USB ICL rc=%d\n", rc);
+
+	/* clear USB ICL vote for PL_USBIN_USBIN_VOTER */
+	rc = vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
+	if (rc < 0)
+		smblib_err(chg,
+			"Couldn't un-vote PL_USBIN_USBIN from USB ICL rc=%d\n",
+			rc);
+}
+
+static void typec_source_insertion(struct smb_charger *chg)
+{
+}
+
+static void typec_sink_insertion(struct smb_charger *chg)
+{
+	/* when a sink is inserted we should not wait on hvdcp timeout to
+	 * enable pd
+	 */
+	vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+			false, 0);
+}
+
+static void typec_sink_removal(struct smb_charger *chg)
+{
+	smblib_set_charge_param(chg, &chg->param.freq_boost,
+			chg->chg_freq.freq_above_otg_threshold);
+	chg->boost_current_ua = 0;
+}
+
+static void smblib_handle_typec_removal(struct smb_charger *chg)
+{
+	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, true, 0);
+	vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER, true, 0);
+	vote(chg->pd_disallowed_votable_indirect, LEGACY_CABLE_VOTER, true, 0);
+	vote(chg->pd_disallowed_votable_indirect, VBUS_CC_SHORT_VOTER, true, 0);
+	vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, true, 0);
+
+	/* reset votes from vbus_cc_short */
+	vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
+			true, 0);
+	vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
+			true, 0);
+	/*
+	 * cable could be removed during hard reset, remove its vote to
+	 * disable apsd
+	 */
+	vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, false, 0);
+
+	chg->vconn_attempts = 0;
+	chg->otg_attempts = 0;
+	chg->pulse_cnt = 0;
+	chg->usb_icl_delta_ua = 0;
+
+	chg->usb_ever_removed = true;
+
+	smblib_update_usb_type(chg);
+
+	typec_source_removal(chg);
+	typec_sink_removal(chg);
+}
+
+static void smblib_handle_typec_insertion(struct smb_charger *chg,
+		bool sink_attached, bool legacy_cable)
+{
+	int rp;
+	bool vbus_cc_short = false;
+	bool valid_legacy_cable;
+
+	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, false, 0);
+
+	if (sink_attached) {
+		typec_source_removal(chg);
+		typec_sink_insertion(chg);
+	} else {
+		typec_source_insertion(chg);
+		typec_sink_removal(chg);
+	}
+
+	valid_legacy_cable = legacy_cable &&
+		(chg->usb_ever_removed || !smblib_sysok_reason_usbin(chg));
+	vote(chg->pd_disallowed_votable_indirect, LEGACY_CABLE_VOTER,
+			valid_legacy_cable, 0);
+
+	if (valid_legacy_cable) {
+		rp = smblib_get_prop_ufp_mode(chg);
+		if (rp == POWER_SUPPLY_TYPEC_SOURCE_HIGH
+				|| rp == POWER_SUPPLY_TYPEC_NON_COMPLIANT) {
+			vbus_cc_short = true;
+			smblib_err(chg, "Disabling PD and HVDCP, VBUS-CC shorted, rp = %d found\n",
+					rp);
+		}
+	}
+
+	vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
+			vbus_cc_short, 0);
+	vote(chg->pd_disallowed_votable_indirect, VBUS_CC_SHORT_VOTER,
+			vbus_cc_short, 0);
+}
+
+static void smblib_handle_typec_debounce_done(struct smb_charger *chg,
+			bool rising, bool sink_attached, bool legacy_cable)
+{
+	int rc;
+	union power_supply_propval pval = {0, };
+
+	if (rising)
+		smblib_handle_typec_insertion(chg, sink_attached, legacy_cable);
+	else
+		smblib_handle_typec_removal(chg);
+
+	rc = smblib_get_prop_typec_mode(chg, &pval);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't get prop typec mode rc=%d\n", rc);
+
+	/*
+	 * HW BUG - after cable is removed, medium or high rd reading
+	 * falls to std. Use it for signal of typec cc detachment in
+	 * software WA.
+	 */
+	if (chg->cc2_sink_detach_flag == CC2_SINK_MEDIUM_HIGH
+		&& pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
+
+		chg->cc2_sink_detach_flag = CC2_SINK_WA_DONE;
+
+		rc = smblib_masked_write(chg,
+				TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				EXIT_SNK_BASED_ON_CC_BIT, 0);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't get prop typec mode rc=%d\n",
+				rc);
+	}
+
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: debounce-done %s; Type-C %s detected\n",
+		   rising ? "rising" : "falling",
+		   smblib_typec_mode_name[pval.intval]);
+}
+
+irqreturn_t smblib_handle_usb_typec_change_for_uusb(struct smb_charger *chg)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_3_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_3 rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_3 = 0x%02x OTG=%d\n",
+		stat, !!(stat & (U_USB_GND_NOVBUS_BIT | U_USB_GND_BIT)));
+
+	extcon_set_cable_state_(chg->extcon, EXTCON_USB_HOST,
+			!!(stat & (U_USB_GND_NOVBUS_BIT | U_USB_GND_BIT)));
+	power_supply_changed(chg->usb_psy);
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_usb_typec_change(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	int rc;
+	u8 stat4, stat5;
+	bool debounce_done, sink_attached, legacy_cable;
+
+	if (chg->micro_usb_mode)
+		return smblib_handle_usb_typec_change_for_uusb(chg);
+
+	/* WA - not when PD hard_reset WIP on cc2 in sink mode */
+	if (chg->cc2_sink_detach_flag == CC2_SINK_STD)
+		return IRQ_HANDLED;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat4);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat5);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_5 rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	debounce_done = (bool)(stat4 & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
+	sink_attached = (bool)(stat4 & UFP_DFP_MODE_STATUS_BIT);
+	legacy_cable = (bool)(stat5 & TYPEC_LEGACY_CABLE_STATUS_BIT);
+
+	smblib_handle_typec_debounce_done(chg,
+			debounce_done, sink_attached, legacy_cable);
+
+	if (stat4 & TYPEC_VBUS_ERROR_STATUS_BIT)
+		smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s vbus-error\n",
+			irq_data->name);
+
+	if (stat4 & TYPEC_VCONN_OVERCURR_STATUS_BIT)
+		schedule_work(&chg->vconn_oc_work);
+
+	power_supply_changed(chg->usb_psy);
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n", stat4);
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_5 = 0x%02x\n", stat5);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_dc_plugin(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	power_supply_changed(chg->dc_psy);
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_high_duty_cycle(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+
+	chg->is_hdc = true;
+	schedule_delayed_work(&chg->clear_hdc_work, msecs_to_jiffies(60));
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	int rc;
+	u8 stat;
+
+	if (!(chg->wa_flags & BOOST_BACK_WA))
+		return IRQ_HANDLED;
+
+	rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n", rc);
+		return IRQ_HANDLED;
+	}
+
+	if ((stat & USE_USBIN_BIT) &&
+			get_effective_result(chg->usb_icl_votable) < USBIN_25MA)
+		return IRQ_HANDLED;
+
+	if (stat & USE_DCIN_BIT)
+		return IRQ_HANDLED;
+
+	if (is_storming(&irq_data->storm_data)) {
+		smblib_err(chg, "Reverse boost detected: voting 0mA to suspend input\n");
+		vote(chg->usb_icl_votable, BOOST_BACK_VOTER, true, 0);
+	}
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_wdog_bark(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb_charger *chg = irq_data->parent_data;
+	int rc;
+
+	rc = smblib_write(chg, BARK_BITE_WDOG_PET_REG, BARK_BITE_WDOG_PET_BIT);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't pet the dog rc=%d\n", rc);
+
+	return IRQ_HANDLED;
+}
+
+/***************
+ * Work Queues *
+ ***************/
+
+static void smblib_hvdcp_detect_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+					       hvdcp_detect_work.work);
+
+	vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+				false, 0);
+	if (get_effective_result(chg->pd_disallowed_votable_indirect))
+		/* pd is still disabled, try hvdcp */
+		try_rerun_apsd_for_hvdcp(chg);
+	else
+		/* notify pd now that pd is allowed */
+		power_supply_changed(chg->usb_psy);
+}
+
+static void bms_update_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						bms_update_work);
+
+	smblib_suspend_on_debug_battery(chg);
+
+	if (chg->batt_psy)
+		power_supply_changed(chg->batt_psy);
+}
+
+static void step_soc_req_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						step_soc_req_work.work);
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	rc = smblib_get_prop_batt_capacity(chg, &pval);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get batt capacity rc=%d\n", rc);
+		return;
+	}
+
+	step_charge_soc_update(chg, pval.intval);
+}
+
+static void clear_hdc_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						clear_hdc_work.work);
+
+	chg->is_hdc = 0;
+}
+
+static void rdstd_cc2_detach_work(struct work_struct *work)
+{
+	int rc;
+	u8 stat;
+	struct smb_irq_data irq_data = {NULL, "cc2-removal-workaround"};
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						rdstd_cc2_detach_work);
+
+	/*
+	 * WA steps -
+	 * 1. Enable both UFP and DFP, wait for 10ms.
+	 * 2. Disable DFP, wait for 30ms.
+	 * 3. Removal detected if both TYPEC_DEBOUNCE_DONE_STATUS
+	 *    and TIMER_STAGE bits are gone, otherwise repeat all by
+	 *    work rescheduling.
+	 * Note, work will be cancelled when pd_hard_reset is 0.
+	 */
+
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 UFP_EN_CMD_BIT | DFP_EN_CMD_BIT,
+				 UFP_EN_CMD_BIT | DFP_EN_CMD_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't write TYPE_C_CTRL_REG rc=%d\n", rc);
+		return;
+	}
+
+	usleep_range(10000, 11000);
+
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 UFP_EN_CMD_BIT | DFP_EN_CMD_BIT,
+				 UFP_EN_CMD_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't write TYPE_C_CTRL_REG rc=%d\n", rc);
+		return;
+	}
+
+	usleep_range(30000, 31000);
+
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
+			rc);
+		return;
+	}
+	if (stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT)
+		goto rerun;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg,
+			"Couldn't read TYPE_C_STATUS_5_REG rc=%d\n", rc);
+		return;
+	}
+	if (stat & TIMER_STAGE_2_BIT)
+		goto rerun;
+
+	/* Bingo, cc2 removal detected */
+	smblib_reg_block_restore(chg, cc2_detach_settings);
+	chg->cc2_sink_detach_flag = CC2_SINK_WA_DONE;
+	irq_data.parent_data = chg;
+	smblib_handle_usb_typec_change(0, &irq_data);
+
+	return;
+
+rerun:
+	schedule_work(&chg->rdstd_cc2_detach_work);
+}
+
+static void smblib_otg_oc_exit(struct smb_charger *chg, bool success)
+{
+	int rc;
+
+	chg->otg_attempts = 0;
+	if (!success) {
+		smblib_err(chg, "OTG soft start failed\n");
+		chg->otg_en = false;
+	}
+
+	smblib_dbg(chg, PR_OTG, "enabling VBUS < 1V check\n");
+	rc = smblib_masked_write(chg, OTG_CFG_REG,
+					QUICKSTART_OTG_FASTROLESWAP_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't enable VBUS < 1V check rc=%d\n", rc);
+
+	if (!chg->external_vconn && chg->vconn_en) {
+		chg->vconn_attempts = 0;
+		if (success) {
+			rc = _smblib_vconn_regulator_enable(
+							chg->vconn_vreg->rdev);
+			if (rc < 0)
+				smblib_err(chg, "Couldn't enable VCONN rc=%d\n",
+									rc);
+		} else {
+			chg->vconn_en = false;
+		}
+	}
+}
+
+#define MAX_OC_FALLING_TRIES 10
+static void smblib_otg_oc_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+								otg_oc_work);
+	int rc, i;
+	u8 stat;
+
+	if (!chg->vbus_vreg || !chg->vbus_vreg->rdev)
+		return;
+
+	smblib_err(chg, "over-current detected on VBUS\n");
+	mutex_lock(&chg->otg_oc_lock);
+	if (!chg->otg_en)
+		goto unlock;
+
+	smblib_dbg(chg, PR_OTG, "disabling VBUS < 1V check\n");
+	smblib_masked_write(chg, OTG_CFG_REG,
+					QUICKSTART_OTG_FASTROLESWAP_BIT,
+					QUICKSTART_OTG_FASTROLESWAP_BIT);
+
+	/*
+	 * If 500ms has passed and another over-current interrupt has not
+	 * triggered then it is likely that the software based soft start was
+	 * successful and the VBUS < 1V restriction should be re-enabled.
+	 */
+	schedule_delayed_work(&chg->otg_ss_done_work, msecs_to_jiffies(500));
+
+	rc = _smblib_vbus_regulator_disable(chg->vbus_vreg->rdev);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't disable VBUS rc=%d\n", rc);
+		goto unlock;
+	}
+
+	if (++chg->otg_attempts > OTG_MAX_ATTEMPTS) {
+		cancel_delayed_work_sync(&chg->otg_ss_done_work);
+		smblib_err(chg, "OTG failed to enable after %d attempts\n",
+			   chg->otg_attempts - 1);
+		smblib_otg_oc_exit(chg, false);
+		goto unlock;
+	}
+
+	/*
+	 * The real time status should go low within 10ms. Poll every 1-2ms to
+	 * minimize the delay when re-enabling OTG.
+	 */
+	for (i = 0; i < MAX_OC_FALLING_TRIES; ++i) {
+		usleep_range(1000, 2000);
+		rc = smblib_read(chg, OTG_BASE + INT_RT_STS_OFFSET, &stat);
+		if (rc >= 0 && !(stat & OTG_OVERCURRENT_RT_STS_BIT))
+			break;
+	}
+
+	if (i >= MAX_OC_FALLING_TRIES) {
+		cancel_delayed_work_sync(&chg->otg_ss_done_work);
+		smblib_err(chg, "OTG OC did not fall after %dms\n",
+						2 * MAX_OC_FALLING_TRIES);
+		smblib_otg_oc_exit(chg, false);
+		goto unlock;
+	}
+
+	smblib_dbg(chg, PR_OTG, "OTG OC fell after %dms\n", 2 * i + 1);
+	rc = _smblib_vbus_regulator_enable(chg->vbus_vreg->rdev);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't enable VBUS rc=%d\n", rc);
+		goto unlock;
+	}
+
+unlock:
+	mutex_unlock(&chg->otg_oc_lock);
+}
+
+static void smblib_vconn_oc_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+								vconn_oc_work);
+	int rc, i;
+	u8 stat;
+
+	smblib_err(chg, "over-current detected on VCONN\n");
+	if (!chg->vconn_vreg || !chg->vconn_vreg->rdev)
+		return;
+
+	mutex_lock(&chg->otg_oc_lock);
+	rc = _smblib_vconn_regulator_disable(chg->vconn_vreg->rdev);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't disable VCONN rc=%d\n", rc);
+		goto unlock;
+	}
+
+	if (++chg->vconn_attempts > VCONN_MAX_ATTEMPTS) {
+		smblib_err(chg, "VCONN failed to enable after %d attempts\n",
+			   chg->otg_attempts - 1);
+		chg->vconn_en = false;
+		chg->vconn_attempts = 0;
+		goto unlock;
+	}
+
+	/*
+	 * The real time status should go low within 10ms. Poll every 1-2ms to
+	 * minimize the delay when re-enabling OTG.
+	 */
+	for (i = 0; i < MAX_OC_FALLING_TRIES; ++i) {
+		usleep_range(1000, 2000);
+		rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+		if (rc >= 0 && !(stat & TYPEC_VCONN_OVERCURR_STATUS_BIT))
+			break;
+	}
+
+	if (i >= MAX_OC_FALLING_TRIES) {
+		smblib_err(chg, "VCONN OC did not fall after %dms\n",
+						2 * MAX_OC_FALLING_TRIES);
+		chg->vconn_en = false;
+		chg->vconn_attempts = 0;
+		goto unlock;
+	}
+
+	smblib_dbg(chg, PR_OTG, "VCONN OC fell after %dms\n", 2 * i + 1);
+	if (++chg->vconn_attempts > VCONN_MAX_ATTEMPTS) {
+		smblib_err(chg, "VCONN failed to enable after %d attempts\n",
+			   chg->vconn_attempts - 1);
+		chg->vconn_en = false;
+		goto unlock;
+	}
+
+	rc = _smblib_vconn_regulator_enable(chg->vconn_vreg->rdev);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't enable VCONN rc=%d\n", rc);
+		goto unlock;
+	}
+
+unlock:
+	mutex_unlock(&chg->otg_oc_lock);
+}
+
+static void smblib_otg_ss_done_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+							otg_ss_done_work.work);
+	int rc;
+	bool success = false;
+	u8 stat;
+
+	mutex_lock(&chg->otg_oc_lock);
+	rc = smblib_read(chg, OTG_STATUS_REG, &stat);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't read OTG status rc=%d\n", rc);
+	else if (stat & BOOST_SOFTSTART_DONE_BIT)
+		success = true;
+
+	smblib_otg_oc_exit(chg, success);
+	mutex_unlock(&chg->otg_oc_lock);
+}
+
+static void smblib_icl_change_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+							icl_change_work.work);
+	int rc, settled_ua;
+
+	rc = smblib_get_charge_param(chg, &chg->param.icl_stat, &settled_ua);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get ICL status rc=%d\n", rc);
+		return;
+	}
+
+	power_supply_changed(chg->usb_main_psy);
+	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER,
+				settled_ua >= USB_WEAK_INPUT_UA, 0);
+
+	smblib_dbg(chg, PR_INTERRUPT, "icl_settled=%d\n", settled_ua);
+}
+
+static int smblib_create_votables(struct smb_charger *chg)
+{
+	int rc = 0;
+
+	chg->fcc_votable = find_votable("FCC");
+	if (!chg->fcc_votable) {
+		rc = -EPROBE_DEFER;
+		return rc;
+	}
+
+	chg->fv_votable = find_votable("FV");
+	if (!chg->fv_votable) {
+		rc = -EPROBE_DEFER;
+		return rc;
+	}
+
+	chg->pl_disable_votable = find_votable("PL_DISABLE");
+	if (!chg->pl_disable_votable) {
+		rc = -EPROBE_DEFER;
+		return rc;
+	}
+	vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
+	vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, true, 0);
+
+	chg->dc_suspend_votable = create_votable("DC_SUSPEND", VOTE_SET_ANY,
+					smblib_dc_suspend_vote_callback,
+					chg);
+	if (IS_ERR(chg->dc_suspend_votable)) {
+		rc = PTR_ERR(chg->dc_suspend_votable);
+		return rc;
+	}
+
+	chg->usb_icl_votable = create_votable("USB_ICL", VOTE_MIN,
+					smblib_usb_icl_vote_callback,
+					chg);
+	if (IS_ERR(chg->usb_icl_votable)) {
+		rc = PTR_ERR(chg->usb_icl_votable);
+		return rc;
+	}
+
+	chg->dc_icl_votable = create_votable("DC_ICL", VOTE_MIN,
+					smblib_dc_icl_vote_callback,
+					chg);
+	if (IS_ERR(chg->dc_icl_votable)) {
+		rc = PTR_ERR(chg->dc_icl_votable);
+		return rc;
+	}
+
+	chg->pd_disallowed_votable_indirect
+		= create_votable("PD_DISALLOWED_INDIRECT", VOTE_SET_ANY,
+			smblib_pd_disallowed_votable_indirect_callback, chg);
+	if (IS_ERR(chg->pd_disallowed_votable_indirect)) {
+		rc = PTR_ERR(chg->pd_disallowed_votable_indirect);
+		return rc;
+	}
+
+	chg->pd_allowed_votable = create_votable("PD_ALLOWED",
+					VOTE_SET_ANY, NULL, NULL);
+	if (IS_ERR(chg->pd_allowed_votable)) {
+		rc = PTR_ERR(chg->pd_allowed_votable);
+		return rc;
+	}
+
+	chg->awake_votable = create_votable("AWAKE", VOTE_SET_ANY,
+					smblib_awake_vote_callback,
+					chg);
+	if (IS_ERR(chg->awake_votable)) {
+		rc = PTR_ERR(chg->awake_votable);
+		return rc;
+	}
+
+	chg->chg_disable_votable = create_votable("CHG_DISABLE", VOTE_SET_ANY,
+					smblib_chg_disable_vote_callback,
+					chg);
+	if (IS_ERR(chg->chg_disable_votable)) {
+		rc = PTR_ERR(chg->chg_disable_votable);
+		return rc;
+	}
+
+	chg->pl_enable_votable_indirect = create_votable("PL_ENABLE_INDIRECT",
+					VOTE_SET_ANY,
+					smblib_pl_enable_indirect_vote_callback,
+					chg);
+	if (IS_ERR(chg->pl_enable_votable_indirect)) {
+		rc = PTR_ERR(chg->pl_enable_votable_indirect);
+		return rc;
+	}
+
+	chg->hvdcp_disable_votable_indirect = create_votable(
+				"HVDCP_DISABLE_INDIRECT",
+				VOTE_SET_ANY,
+				smblib_hvdcp_disable_indirect_vote_callback,
+				chg);
+	if (IS_ERR(chg->hvdcp_disable_votable_indirect)) {
+		rc = PTR_ERR(chg->hvdcp_disable_votable_indirect);
+		return rc;
+	}
+
+	chg->hvdcp_enable_votable = create_votable("HVDCP_ENABLE",
+					VOTE_SET_ANY,
+					smblib_hvdcp_enable_vote_callback,
+					chg);
+	if (IS_ERR(chg->hvdcp_enable_votable)) {
+		rc = PTR_ERR(chg->hvdcp_enable_votable);
+		return rc;
+	}
+
+	chg->apsd_disable_votable = create_votable("APSD_DISABLE",
+					VOTE_SET_ANY,
+					smblib_apsd_disable_vote_callback,
+					chg);
+	if (IS_ERR(chg->apsd_disable_votable)) {
+		rc = PTR_ERR(chg->apsd_disable_votable);
+		return rc;
+	}
+
+	chg->hvdcp_hw_inov_dis_votable = create_votable("HVDCP_HW_INOV_DIS",
+					VOTE_SET_ANY,
+					smblib_hvdcp_hw_inov_dis_vote_callback,
+					chg);
+	if (IS_ERR(chg->hvdcp_hw_inov_dis_votable)) {
+		rc = PTR_ERR(chg->hvdcp_hw_inov_dis_votable);
+		return rc;
+	}
+
+	return rc;
+}
+
+static void smblib_destroy_votables(struct smb_charger *chg)
+{
+	if (chg->dc_suspend_votable)
+		destroy_votable(chg->dc_suspend_votable);
+	if (chg->usb_icl_votable)
+		destroy_votable(chg->usb_icl_votable);
+	if (chg->dc_icl_votable)
+		destroy_votable(chg->dc_icl_votable);
+	if (chg->pd_disallowed_votable_indirect)
+		destroy_votable(chg->pd_disallowed_votable_indirect);
+	if (chg->pd_allowed_votable)
+		destroy_votable(chg->pd_allowed_votable);
+	if (chg->awake_votable)
+		destroy_votable(chg->awake_votable);
+	if (chg->chg_disable_votable)
+		destroy_votable(chg->chg_disable_votable);
+	if (chg->pl_enable_votable_indirect)
+		destroy_votable(chg->pl_enable_votable_indirect);
+	if (chg->apsd_disable_votable)
+		destroy_votable(chg->apsd_disable_votable);
+	if (chg->hvdcp_hw_inov_dis_votable)
+		destroy_votable(chg->hvdcp_hw_inov_dis_votable);
+}
+
+static void smblib_iio_deinit(struct smb_charger *chg)
+{
+	if (!IS_ERR_OR_NULL(chg->iio.temp_chan))
+		iio_channel_release(chg->iio.temp_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.temp_max_chan))
+		iio_channel_release(chg->iio.temp_max_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.usbin_i_chan))
+		iio_channel_release(chg->iio.usbin_i_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.usbin_v_chan))
+		iio_channel_release(chg->iio.usbin_v_chan);
+	if (!IS_ERR_OR_NULL(chg->iio.batt_i_chan))
+		iio_channel_release(chg->iio.batt_i_chan);
+}
+
+int smblib_init(struct smb_charger *chg)
+{
+	int rc = 0;
+
+	mutex_init(&chg->write_lock);
+	mutex_init(&chg->otg_oc_lock);
+	INIT_WORK(&chg->bms_update_work, bms_update_work);
+	INIT_WORK(&chg->rdstd_cc2_detach_work, rdstd_cc2_detach_work);
+	INIT_DELAYED_WORK(&chg->hvdcp_detect_work, smblib_hvdcp_detect_work);
+	INIT_DELAYED_WORK(&chg->step_soc_req_work, step_soc_req_work);
+	INIT_DELAYED_WORK(&chg->clear_hdc_work, clear_hdc_work);
+	INIT_WORK(&chg->otg_oc_work, smblib_otg_oc_work);
+	INIT_WORK(&chg->vconn_oc_work, smblib_vconn_oc_work);
+	INIT_DELAYED_WORK(&chg->otg_ss_done_work, smblib_otg_ss_done_work);
+	INIT_DELAYED_WORK(&chg->icl_change_work, smblib_icl_change_work);
+	chg->fake_capacity = -EINVAL;
+
+	switch (chg->mode) {
+	case PARALLEL_MASTER:
+		chg->qnovo_fcc_ua = -EINVAL;
+		chg->qnovo_fv_uv = -EINVAL;
+		rc = smblib_create_votables(chg);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't create votables rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		rc = smblib_register_notifier(chg);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't register notifier rc=%d\n", rc);
+			return rc;
+		}
+
+		chg->bms_psy = power_supply_get_by_name("bms");
+		chg->pl.psy = power_supply_get_by_name("parallel");
+		break;
+	case PARALLEL_SLAVE:
+		break;
+	default:
+		smblib_err(chg, "Unsupported mode %d\n", chg->mode);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+int smblib_deinit(struct smb_charger *chg)
+{
+	switch (chg->mode) {
+	case PARALLEL_MASTER:
+		power_supply_unreg_notifier(&chg->nb);
+		smblib_destroy_votables(chg);
+		break;
+	case PARALLEL_SLAVE:
+		break;
+	default:
+		smblib_err(chg, "Unsupported mode %d\n", chg->mode);
+		return -EINVAL;
+	}
+
+	smblib_iio_deinit(chg);
+
+	return 0;
+}
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
new file mode 100644
index 0000000..21ccd3c
--- /dev/null
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -0,0 +1,487 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SMB2_CHARGER_H
+#define __SMB2_CHARGER_H
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/consumer.h>
+#include <linux/extcon.h>
+#include "storm-watch.h"
+
+enum print_reason {
+	PR_INTERRUPT	= BIT(0),
+	PR_REGISTER	= BIT(1),
+	PR_MISC		= BIT(2),
+	PR_PARALLEL	= BIT(3),
+	PR_OTG		= BIT(4),
+};
+
+#define DEFAULT_VOTER			"DEFAULT_VOTER"
+#define USER_VOTER			"USER_VOTER"
+#define PD_VOTER			"PD_VOTER"
+#define DCP_VOTER			"DCP_VOTER"
+#define PL_USBIN_USBIN_VOTER		"PL_USBIN_USBIN_VOTER"
+#define USB_PSY_VOTER			"USB_PSY_VOTER"
+#define PL_TAPER_WORK_RUNNING_VOTER	"PL_TAPER_WORK_RUNNING_VOTER"
+#define PL_INDIRECT_VOTER		"PL_INDIRECT_VOTER"
+#define USBIN_I_VOTER			"USBIN_I_VOTER"
+#define USBIN_V_VOTER			"USBIN_V_VOTER"
+#define CHG_STATE_VOTER			"CHG_STATE_VOTER"
+#define TYPEC_SRC_VOTER			"TYPEC_SRC_VOTER"
+#define TAPER_END_VOTER			"TAPER_END_VOTER"
+#define THERMAL_DAEMON_VOTER		"THERMAL_DAEMON_VOTER"
+#define CC_DETACHED_VOTER		"CC_DETACHED_VOTER"
+#define HVDCP_TIMEOUT_VOTER		"HVDCP_TIMEOUT_VOTER"
+#define PD_DISALLOWED_INDIRECT_VOTER	"PD_DISALLOWED_INDIRECT_VOTER"
+#define PD_HARD_RESET_VOTER		"PD_HARD_RESET_VOTER"
+#define VBUS_CC_SHORT_VOTER		"VBUS_CC_SHORT_VOTER"
+#define LEGACY_CABLE_VOTER		"LEGACY_CABLE_VOTER"
+#define PD_INACTIVE_VOTER		"PD_INACTIVE_VOTER"
+#define BOOST_BACK_VOTER		"BOOST_BACK_VOTER"
+#define HVDCP_INDIRECT_VOTER		"HVDCP_INDIRECT_VOTER"
+#define MICRO_USB_VOTER			"MICRO_USB_VOTER"
+#define DEBUG_BOARD_VOTER		"DEBUG_BOARD_VOTER"
+#define PD_SUSPEND_SUPPORTED_VOTER	"PD_SUSPEND_SUPPORTED_VOTER"
+#define PL_DELAY_HVDCP_VOTER		"PL_DELAY_HVDCP_VOTER"
+#define CTM_VOTER			"CTM_VOTER"
+#define SW_QC3_VOTER			"SW_QC3_VOTER"
+#define AICL_RERUN_VOTER		"AICL_RERUN_VOTER"
+
+#define VCONN_MAX_ATTEMPTS	3
+#define OTG_MAX_ATTEMPTS	3
+
+enum smb_mode {
+	PARALLEL_MASTER = 0,
+	PARALLEL_SLAVE,
+	NUM_MODES,
+};
+
+enum cc2_sink_type {
+	CC2_SINK_NONE = 0,
+	CC2_SINK_STD,
+	CC2_SINK_MEDIUM_HIGH,
+	CC2_SINK_WA_DONE,
+};
+
+enum {
+	QC_CHARGER_DETECTION_WA_BIT	= BIT(0),
+	BOOST_BACK_WA			= BIT(1),
+	TYPEC_CC2_REMOVAL_WA_BIT	= BIT(2),
+	QC_AUTH_INTERRUPT_WA_BIT	= BIT(3),
+};
+
+enum smb_irq_index {
+	CHG_ERROR_IRQ = 0,
+	CHG_STATE_CHANGE_IRQ,
+	STEP_CHG_STATE_CHANGE_IRQ,
+	STEP_CHG_SOC_UPDATE_FAIL_IRQ,
+	STEP_CHG_SOC_UPDATE_REQ_IRQ,
+	OTG_FAIL_IRQ,
+	OTG_OVERCURRENT_IRQ,
+	OTG_OC_DIS_SW_STS_IRQ,
+	TESTMODE_CHANGE_DET_IRQ,
+	BATT_TEMP_IRQ,
+	BATT_OCP_IRQ,
+	BATT_OV_IRQ,
+	BATT_LOW_IRQ,
+	BATT_THERM_ID_MISS_IRQ,
+	BATT_TERM_MISS_IRQ,
+	USBIN_COLLAPSE_IRQ,
+	USBIN_LT_3P6V_IRQ,
+	USBIN_UV_IRQ,
+	USBIN_OV_IRQ,
+	USBIN_PLUGIN_IRQ,
+	USBIN_SRC_CHANGE_IRQ,
+	USBIN_ICL_CHANGE_IRQ,
+	TYPE_C_CHANGE_IRQ,
+	DCIN_COLLAPSE_IRQ,
+	DCIN_LT_3P6V_IRQ,
+	DCIN_UV_IRQ,
+	DCIN_OV_IRQ,
+	DCIN_PLUGIN_IRQ,
+	DIV2_EN_DG_IRQ,
+	DCIN_ICL_CHANGE_IRQ,
+	WDOG_SNARL_IRQ,
+	WDOG_BARK_IRQ,
+	AICL_FAIL_IRQ,
+	AICL_DONE_IRQ,
+	HIGH_DUTY_CYCLE_IRQ,
+	INPUT_CURRENT_LIMIT_IRQ,
+	TEMPERATURE_CHANGE_IRQ,
+	SWITCH_POWER_OK_IRQ,
+	SMB_IRQ_MAX,
+};
+
+struct smb_irq_info {
+	const char			*name;
+	const irq_handler_t		handler;
+	const bool			wake;
+	const struct storm_watch	storm_data;
+	struct smb_irq_data		*irq_data;
+	int				irq;
+};
+
+static const unsigned int smblib_extcon_cable[] = {
+	EXTCON_USB,
+	EXTCON_USB_HOST,
+	EXTCON_NONE,
+};
+
+struct smb_regulator {
+	struct regulator_dev	*rdev;
+	struct regulator_desc	rdesc;
+};
+
+struct smb_irq_data {
+	void			*parent_data;
+	const char		*name;
+	struct storm_watch	storm_data;
+};
+
+struct smb_chg_param {
+	const char	*name;
+	u16		reg;
+	int		min_u;
+	int		max_u;
+	int		step_u;
+	int		(*get_proc)(struct smb_chg_param *param,
+				    u8 val_raw);
+	int		(*set_proc)(struct smb_chg_param *param,
+				    int val_u,
+				    u8 *val_raw);
+};
+
+struct smb_chg_freq {
+	unsigned int		freq_5V;
+	unsigned int		freq_6V_8V;
+	unsigned int		freq_9V;
+	unsigned int		freq_12V;
+	unsigned int		freq_removal;
+	unsigned int		freq_below_otg_threshold;
+	unsigned int		freq_above_otg_threshold;
+};
+
+struct smb_params {
+	struct smb_chg_param	fcc;
+	struct smb_chg_param	fv;
+	struct smb_chg_param	usb_icl;
+	struct smb_chg_param	icl_stat;
+	struct smb_chg_param	otg_cl;
+	struct smb_chg_param	dc_icl;
+	struct smb_chg_param	dc_icl_pt_lv;
+	struct smb_chg_param	dc_icl_pt_hv;
+	struct smb_chg_param	dc_icl_div2_lv;
+	struct smb_chg_param	dc_icl_div2_mid_lv;
+	struct smb_chg_param	dc_icl_div2_mid_hv;
+	struct smb_chg_param	dc_icl_div2_hv;
+	struct smb_chg_param	jeita_cc_comp;
+	struct smb_chg_param	step_soc_threshold[4];
+	struct smb_chg_param	step_soc;
+	struct smb_chg_param	step_cc_delta[5];
+	struct smb_chg_param	freq_buck;
+	struct smb_chg_param	freq_boost;
+};
+
+struct parallel_params {
+	struct power_supply	*psy;
+};
+
+struct smb_iio {
+	struct iio_channel	*temp_chan;
+	struct iio_channel	*temp_max_chan;
+	struct iio_channel	*usbin_i_chan;
+	struct iio_channel	*usbin_v_chan;
+	struct iio_channel	*batt_i_chan;
+	struct iio_channel	*connector_temp_chan;
+	struct iio_channel	*connector_temp_thr1_chan;
+	struct iio_channel	*connector_temp_thr2_chan;
+	struct iio_channel	*connector_temp_thr3_chan;
+};
+
+struct reg_info {
+	u16		reg;
+	u8		mask;
+	u8		val;
+	u8		bak;
+	const char	*desc;
+};
+
+struct smb_charger {
+	struct device		*dev;
+	char			*name;
+	struct regmap		*regmap;
+	struct smb_irq_info	*irq_info;
+	struct smb_params	param;
+	struct smb_iio		iio;
+	int			*debug_mask;
+	enum smb_mode		mode;
+	bool			external_vconn;
+	struct smb_chg_freq	chg_freq;
+	int			smb_version;
+
+	/* locks */
+	struct mutex		write_lock;
+	struct mutex		ps_change_lock;
+	struct mutex		otg_oc_lock;
+
+	/* power supplies */
+	struct power_supply		*batt_psy;
+	struct power_supply		*usb_psy;
+	struct power_supply		*dc_psy;
+	struct power_supply		*bms_psy;
+	struct power_supply_desc	usb_psy_desc;
+	struct power_supply		*usb_main_psy;
+
+	/* notifiers */
+	struct notifier_block	nb;
+
+	/* parallel charging */
+	struct parallel_params	pl;
+
+	/* regulators */
+	struct smb_regulator	*vbus_vreg;
+	struct smb_regulator	*vconn_vreg;
+	struct regulator	*dpdm_reg;
+
+	/* votables */
+	struct votable		*dc_suspend_votable;
+	struct votable		*fcc_votable;
+	struct votable		*fv_votable;
+	struct votable		*usb_icl_votable;
+	struct votable		*dc_icl_votable;
+	struct votable		*pd_disallowed_votable_indirect;
+	struct votable		*pd_allowed_votable;
+	struct votable		*awake_votable;
+	struct votable		*pl_disable_votable;
+	struct votable		*chg_disable_votable;
+	struct votable		*pl_enable_votable_indirect;
+	struct votable		*hvdcp_disable_votable_indirect;
+	struct votable		*hvdcp_enable_votable;
+	struct votable		*apsd_disable_votable;
+	struct votable		*hvdcp_hw_inov_dis_votable;
+
+	/* work */
+	struct work_struct	bms_update_work;
+	struct work_struct	rdstd_cc2_detach_work;
+	struct delayed_work	hvdcp_detect_work;
+	struct delayed_work	ps_change_timeout_work;
+	struct delayed_work	step_soc_req_work;
+	struct delayed_work	clear_hdc_work;
+	struct work_struct	otg_oc_work;
+	struct work_struct	vconn_oc_work;
+	struct delayed_work	otg_ss_done_work;
+	struct delayed_work	icl_change_work;
+
+	/* cached status */
+	int			voltage_min_uv;
+	int			voltage_max_uv;
+	int			pd_active;
+	bool			system_suspend_supported;
+	int			boost_threshold_ua;
+	int			system_temp_level;
+	int			thermal_levels;
+	int			*thermal_mitigation;
+	int			dcp_icl_ua;
+	int			fake_capacity;
+	bool			step_chg_enabled;
+	bool			is_hdc;
+	bool			chg_done;
+	bool			micro_usb_mode;
+	bool			otg_en;
+	bool			vconn_en;
+	bool			suspend_input_on_debug_batt;
+	int			otg_attempts;
+	int			vconn_attempts;
+	int			default_icl_ua;
+
+	/* workaround flag */
+	u32			wa_flags;
+	enum cc2_sink_type	cc2_sink_detach_flag;
+	int			boost_current_ua;
+
+	/* extcon for VBUS / ID notification to USB for uUSB */
+	struct extcon_dev	*extcon;
+	bool			usb_ever_removed;
+
+	int			icl_reduction_ua;
+
+	/* qnovo */
+	int			qnovo_fcc_ua;
+	int			qnovo_fv_uv;
+	int			usb_icl_delta_ua;
+	int			pulse_cnt;
+};
+
+int smblib_read(struct smb_charger *chg, u16 addr, u8 *val);
+int smblib_masked_write(struct smb_charger *chg, u16 addr, u8 mask, u8 val);
+int smblib_write(struct smb_charger *chg, u16 addr, u8 val);
+
+int smblib_get_charge_param(struct smb_charger *chg,
+			    struct smb_chg_param *param, int *val_u);
+int smblib_get_usb_suspend(struct smb_charger *chg, int *suspend);
+
+int smblib_enable_charging(struct smb_charger *chg, bool enable);
+int smblib_set_charge_param(struct smb_charger *chg,
+			    struct smb_chg_param *param, int val_u);
+int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend);
+int smblib_set_dc_suspend(struct smb_charger *chg, bool suspend);
+
+int smblib_mapping_soc_from_field_value(struct smb_chg_param *param,
+					     int val_u, u8 *val_raw);
+int smblib_mapping_cc_delta_to_field_value(struct smb_chg_param *param,
+					   u8 val_raw);
+int smblib_mapping_cc_delta_from_field_value(struct smb_chg_param *param,
+					     int val_u, u8 *val_raw);
+int smblib_set_chg_freq(struct smb_chg_param *param,
+				int val_u, u8 *val_raw);
+
+int smblib_vbus_regulator_enable(struct regulator_dev *rdev);
+int smblib_vbus_regulator_disable(struct regulator_dev *rdev);
+int smblib_vbus_regulator_is_enabled(struct regulator_dev *rdev);
+
+int smblib_vconn_regulator_enable(struct regulator_dev *rdev);
+int smblib_vconn_regulator_disable(struct regulator_dev *rdev);
+int smblib_vconn_regulator_is_enabled(struct regulator_dev *rdev);
+
+irqreturn_t smblib_handle_debug(int irq, void *data);
+irqreturn_t smblib_handle_otg_overcurrent(int irq, void *data);
+irqreturn_t smblib_handle_chg_state_change(int irq, void *data);
+irqreturn_t smblib_handle_step_chg_state_change(int irq, void *data);
+irqreturn_t smblib_handle_step_chg_soc_update_fail(int irq, void *data);
+irqreturn_t smblib_handle_step_chg_soc_update_request(int irq, void *data);
+irqreturn_t smblib_handle_batt_temp_changed(int irq, void *data);
+irqreturn_t smblib_handle_batt_psy_changed(int irq, void *data);
+irqreturn_t smblib_handle_usb_psy_changed(int irq, void *data);
+irqreturn_t smblib_handle_usbin_uv(int irq, void *data);
+irqreturn_t smblib_handle_usb_plugin(int irq, void *data);
+irqreturn_t smblib_handle_usb_source_change(int irq, void *data);
+irqreturn_t smblib_handle_icl_change(int irq, void *data);
+irqreturn_t smblib_handle_usb_typec_change(int irq, void *data);
+irqreturn_t smblib_handle_dc_plugin(int irq, void *data);
+irqreturn_t smblib_handle_high_duty_cycle(int irq, void *data);
+irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data);
+irqreturn_t smblib_handle_wdog_bark(int irq, void *data);
+
+int smblib_get_prop_input_suspend(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_present(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_capacity(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_status(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_charge_type(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_health(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_system_temp_level(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_input_current_limited(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_voltage_now(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_current_now(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_batt_temp(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_step_chg_step(struct smb_charger *chg,
+				union power_supply_propval *val);
+
+int smblib_set_prop_input_suspend(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_batt_capacity(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_system_temp_level(struct smb_charger *chg,
+				const union power_supply_propval *val);
+
+int smblib_get_prop_dc_present(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_dc_online(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_dc_current_max(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_set_prop_dc_current_max(struct smb_charger *chg,
+				const union power_supply_propval *val);
+
+int smblib_get_prop_usb_present(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_usb_online(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_usb_suspend(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_pd_current_max(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_usb_current_max(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_usb_current_now(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_typec_mode(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_typec_power_role(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_pd_allowed(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_input_current_settled(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_input_voltage_settled(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_pd_in_hard_reset(struct smb_charger *chg,
+			       union power_supply_propval *val);
+int smblib_get_pe_start(struct smb_charger *chg,
+			       union power_supply_propval *val);
+int smblib_get_prop_charger_temp(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_charger_temp_max(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_get_prop_die_health(struct smb_charger *chg,
+			       union power_supply_propval *val);
+int smblib_set_prop_pd_current_max(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_usb_current_max(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_usb_voltage_min(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_usb_voltage_max(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_boost_current(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_typec_power_role(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_pd_active(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_set_prop_pd_in_hard_reset(struct smb_charger *chg,
+				const union power_supply_propval *val);
+int smblib_get_prop_slave_current_now(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_set_prop_ship_mode(struct smb_charger *chg,
+				const union power_supply_propval *val);
+void smblib_suspend_on_debug_battery(struct smb_charger *chg);
+int smblib_rerun_apsd_if_required(struct smb_charger *chg);
+int smblib_get_prop_fcc_delta(struct smb_charger *chg,
+			       union power_supply_propval *val);
+int smblib_icl_override(struct smb_charger *chg, bool override);
+int smblib_set_icl_reduction(struct smb_charger *chg, int reduction_ua);
+int smblib_dp_dm(struct smb_charger *chg, int val);
+int smblib_rerun_aicl(struct smb_charger *chg);
+
+int smblib_init(struct smb_charger *chg);
+int smblib_deinit(struct smb_charger *chg);
+#endif /* __SMB2_CHARGER_H */
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
new file mode 100644
index 0000000..54b6b38
--- /dev/null
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -0,0 +1,1024 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SMB2_CHARGER_REG_H
+#define __SMB2_CHARGER_REG_H
+
+#include <linux/bitops.h>
+
+#define CHGR_BASE	0x1000
+#define OTG_BASE	0x1100
+#define BATIF_BASE	0x1200
+#define USBIN_BASE	0x1300
+#define DCIN_BASE	0x1400
+#define MISC_BASE	0x1600
+#define CHGR_FREQ_BASE	0x1900
+
+#define PERPH_TYPE_OFFSET		0x04
+#define TYPE_MASK			GENMASK(7, 0)
+#define PERPH_SUBTYPE_OFFSET		0x05
+#define SUBTYPE_MASK			GENMASK(7, 0)
+#define INT_RT_STS_OFFSET		0x10
+
+/* CHGR Peripheral Registers */
+#define BATTERY_CHARGER_STATUS_1_REG	(CHGR_BASE + 0x06)
+#define BVR_INITIAL_RAMP_BIT		BIT(7)
+#define CC_SOFT_TERMINATE_BIT		BIT(6)
+#define STEP_CHARGING_STATUS_SHIFT	3
+#define STEP_CHARGING_STATUS_MASK	GENMASK(5, 3)
+#define BATTERY_CHARGER_STATUS_MASK	GENMASK(2, 0)
+enum {
+	TRICKLE_CHARGE = 0,
+	PRE_CHARGE,
+	FAST_CHARGE,
+	FULLON_CHARGE,
+	TAPER_CHARGE,
+	TERMINATE_CHARGE,
+	INHIBIT_CHARGE,
+	DISABLE_CHARGE,
+};
+
+#define BATTERY_CHARGER_STATUS_2_REG			(CHGR_BASE + 0x07)
+#define INPUT_CURRENT_LIMITED_BIT			BIT(7)
+#define CHARGER_ERROR_STATUS_SFT_EXPIRE_BIT		BIT(6)
+#define CHARGER_ERROR_STATUS_BAT_OV_BIT			BIT(5)
+#define CHARGER_ERROR_STATUS_BAT_TERM_MISSING_BIT	BIT(4)
+#define BAT_TEMP_STATUS_MASK				GENMASK(3, 0)
+#define BAT_TEMP_STATUS_SOFT_LIMIT_MASK			GENMASK(3, 2)
+#define BAT_TEMP_STATUS_HOT_SOFT_LIMIT_BIT		BIT(3)
+#define BAT_TEMP_STATUS_COLD_SOFT_LIMIT_BIT		BIT(2)
+#define BAT_TEMP_STATUS_TOO_HOT_BIT			BIT(1)
+#define BAT_TEMP_STATUS_TOO_COLD_BIT			BIT(0)
+
+#define CHG_OPTION_REG					(CHGR_BASE + 0x08)
+#define PIN_BIT						BIT(7)
+
+#define BATTERY_CHARGER_STATUS_3_REG			(CHGR_BASE + 0x09)
+#define FV_POST_JEITA_MASK				GENMASK(7, 0)
+
+#define BATTERY_CHARGER_STATUS_4_REG			(CHGR_BASE + 0x0A)
+#define CHARGE_CURRENT_POST_JEITA_MASK			GENMASK(7, 0)
+
+#define BATTERY_CHARGER_STATUS_5_REG			(CHGR_BASE + 0x0B)
+#define VALID_INPUT_POWER_SOURCE_BIT			BIT(7)
+#define DISABLE_CHARGING_BIT				BIT(6)
+#define FORCE_ZERO_CHARGE_CURRENT_BIT			BIT(5)
+#define CHARGING_ENABLE_BIT				BIT(4)
+#define TAPER_BIT					BIT(3)
+#define ENABLE_CHG_SENSORS_BIT				BIT(2)
+#define ENABLE_TAPER_SENSOR_BIT				BIT(1)
+#define TAPER_REGION_BIT				BIT(0)
+
+#define BATTERY_CHARGER_STATUS_6_REG			(CHGR_BASE + 0x0C)
+#define GF_BATT_OV_BIT					BIT(7)
+#define DROP_IN_BATTERY_VOLTAGE_REFERENCE_BIT		BIT(6)
+#define VBATT_LTET_RECHARGE_BIT				BIT(5)
+#define VBATT_GTET_INHIBIT_BIT				BIT(4)
+#define VBATT_GTET_FLOAT_VOLTAGE_BIT			BIT(3)
+#define BATT_GT_PRE_TO_FAST_BIT				BIT(2)
+#define BATT_GT_FULL_ON_BIT				BIT(1)
+#define VBATT_LT_2V_BIT					BIT(0)
+
+#define BATTERY_CHARGER_STATUS_7_REG			(CHGR_BASE + 0x0D)
+#define ENABLE_TRICKLE_BIT				BIT(7)
+#define ENABLE_PRE_CHARGING_BIT				BIT(6)
+#define ENABLE_FAST_CHARGING_BIT			BIT(5)
+#define ENABLE_FULLON_MODE_BIT				BIT(4)
+#define TOO_COLD_ADC_BIT				BIT(3)
+#define TOO_HOT_ADC_BIT					BIT(2)
+#define HOT_SL_ADC_BIT					BIT(1)
+#define COLD_SL_ADC_BIT					BIT(0)
+
+#define BATTERY_CHARGER_STATUS_8_REG			(CHGR_BASE + 0x0E)
+#define PRE_FAST_BIT					BIT(7)
+#define PRE_FULLON_BIT					BIT(6)
+#define PRE_RCHG_BIT					BIT(5)
+#define PRE_INHIBIT_BIT					BIT(4)
+#define PRE_OVRV_BIT					BIT(3)
+#define PRE_TERM_BIT					BIT(2)
+#define BAT_ID_BMISS_CMP_BIT				BIT(1)
+#define THERM_CMP_BIT					BIT(0)
+
+/* CHGR Interrupt Bits */
+#define CHGR_7_RT_STS_BIT				BIT(7)
+#define CHGR_6_RT_STS_BIT				BIT(6)
+#define FG_FVCAL_QUALIFIED_RT_STS_BIT			BIT(5)
+#define STEP_CHARGING_SOC_UPDATE_REQUEST_RT_STS_BIT	BIT(4)
+#define STEP_CHARGING_SOC_UPDATE_FAIL_RT_STS_BIT	BIT(3)
+#define STEP_CHARGING_STATE_CHANGE_RT_STS_BIT		BIT(2)
+#define CHARGING_STATE_CHANGE_RT_STS_BIT		BIT(1)
+#define CHGR_ERROR_RT_STS_BIT				BIT(0)
+
+#define STEP_CHG_SOC_VBATT_V_REG			(CHGR_BASE + 0x40)
+#define STEP_CHG_SOC_VBATT_V_MASK			GENMASK(7, 0)
+
+#define STEP_CHG_SOC_VBATT_V_UPDATE_REG			(CHGR_BASE + 0x41)
+#define STEP_CHG_SOC_VBATT_V_UPDATE_BIT			BIT(0)
+
+#define CHARGING_ENABLE_CMD_REG				(CHGR_BASE + 0x42)
+#define CHARGING_ENABLE_CMD_BIT				BIT(0)
+
+#define ALLOW_FAST_CHARGING_CMD_REG			(CHGR_BASE + 0x43)
+#define ALLOW_FAST_CHARGING_CMD_BIT			BIT(0)
+
+#define QNOVO_PT_ENABLE_CMD_REG				(CHGR_BASE + 0x44)
+#define QNOVO_PT_ENABLE_CMD_BIT				BIT(0)
+
+#define CHGR_CFG1_REG					(CHGR_BASE + 0x50)
+#define INCREASE_RCHG_TIMEOUT_CFG_BIT			BIT(1)
+#define LOAD_BAT_BIT					BIT(0)
+
+#define CHGR_CFG2_REG					(CHGR_BASE + 0x51)
+#define CHG_EN_SRC_BIT					BIT(7)
+#define CHG_EN_POLARITY_BIT				BIT(6)
+#define PRETOFAST_TRANSITION_CFG_BIT			BIT(5)
+#define BAT_OV_ECC_BIT					BIT(4)
+#define I_TERM_BIT					BIT(3)
+#define AUTO_RECHG_BIT					BIT(2)
+#define EN_ANALOG_DROP_IN_VBATT_BIT			BIT(1)
+#define CHARGER_INHIBIT_BIT				BIT(0)
+
+#define CHARGER_ENABLE_CFG_REG				(CHGR_BASE + 0x52)
+#define CHG_ENB_TIMEOUT_SETTING_BIT			BIT(1)
+#define FORCE_ZERO_CFG_BIT				BIT(0)
+
+#define CFG_REG						(CHGR_BASE + 0x53)
+#define CHG_OPTION_PIN_TRIM_BIT				BIT(7)
+#define BATN_SNS_CFG_BIT				BIT(4)
+#define CFG_TAPER_DIS_AFVC_BIT				BIT(3)
+#define BATFET_SHUTDOWN_CFG_BIT				BIT(2)
+#define VDISCHG_EN_CFG_BIT				BIT(1)
+#define VCHG_EN_CFG_BIT					BIT(0)
+
+#define CHARGER_SPARE_REG				(CHGR_BASE + 0x54)
+#define CHARGER_SPARE_MASK				GENMASK(5, 0)
+
+#define PRE_CHARGE_CURRENT_CFG_REG			(CHGR_BASE + 0x60)
+#define PRE_CHARGE_CURRENT_SETTING_MASK			GENMASK(5, 0)
+
+#define FAST_CHARGE_CURRENT_CFG_REG			(CHGR_BASE + 0x61)
+#define FAST_CHARGE_CURRENT_SETTING_MASK		GENMASK(7, 0)
+
+#define CHARGE_CURRENT_TERMINATION_CFG_REG		(CHGR_BASE + 0x62)
+#define ANALOG_CHARGE_CURRENT_TERMINATION_SETTING_MASK	GENMASK(2, 0)
+
+#define TCCC_CHARGE_CURRENT_TERMINATION_CFG_REG		(CHGR_BASE + 0x63)
+#define TCCC_CHARGE_CURRENT_TERMINATION_SETTING_MASK	GENMASK(3, 0)
+
+#define CHARGE_CURRENT_SOFTSTART_SETTING_CFG_REG	(CHGR_BASE + 0x64)
+#define CHARGE_CURRENT_SOFTSTART_SETTING_MASK		GENMASK(1, 0)
+
+#define FLOAT_VOLTAGE_CFG_REG				(CHGR_BASE + 0x70)
+#define FLOAT_VOLTAGE_SETTING_MASK			GENMASK(7, 0)
+
+#define AUTO_FLOAT_VOLTAGE_COMPENSATION_CFG_REG		(CHGR_BASE + 0x71)
+#define AUTO_FLOAT_VOLTAGE_COMPENSATION_MASK		GENMASK(2, 0)
+
+#define CHARGE_INHIBIT_THRESHOLD_CFG_REG		(CHGR_BASE + 0x72)
+#define CHARGE_INHIBIT_THRESHOLD_MASK			GENMASK(1, 0)
+#define CHARGE_INHIBIT_THRESHOLD_50MV			0
+#define CHARGE_INHIBIT_THRESHOLD_100MV			1
+#define CHARGE_INHIBIT_THRESHOLD_200MV			2
+#define CHARGE_INHIBIT_THRESHOLD_300MV			3
+
+#define RECHARGE_THRESHOLD_CFG_REG			(CHGR_BASE + 0x73)
+#define RECHARGE_THRESHOLD_MASK				GENMASK(1, 0)
+
+#define PRE_TO_FAST_CHARGE_THRESHOLD_CFG_REG		(CHGR_BASE + 0x74)
+#define PRE_TO_FAST_CHARGE_THRESHOLD_MASK		GENMASK(1, 0)
+
+#define FV_HYSTERESIS_CFG_REG				(CHGR_BASE + 0x75)
+#define FV_DROP_HYSTERESIS_CFG_MASK			GENMASK(7, 4)
+#define THRESH_HYSTERESIS_CFG_MASK			GENMASK(3, 0)
+
+#define FVC_CHARGE_INHIBIT_THRESHOLD_CFG_REG		(CHGR_BASE + 0x80)
+#define FVC_CHARGE_INHIBIT_THRESHOLD_MASK		GENMASK(5, 0)
+
+#define FVC_RECHARGE_THRESHOLD_CFG_REG			(CHGR_BASE + 0x81)
+#define FVC_RECHARGE_THRESHOLD_MASK			GENMASK(7, 0)
+
+#define FVC_PRE_TO_FAST_CHARGE_THRESHOLD_CFG_REG	(CHGR_BASE + 0x82)
+#define FVC_PRE_TO_FAST_CHARGE_THRESHOLD_MASK		GENMASK(7, 0)
+
+#define FVC_FULL_ON_THRESHOLD_CFG_REG			(CHGR_BASE + 0x83)
+#define FVC_FULL_ON_THRESHOLD_MASK			GENMASK(7, 0)
+
+#define FVC_CC_MODE_GLITCH_FILTER_SEL_CFG_REG		(CHGR_BASE + 0x84)
+#define FVC_CC_MODE_GLITCH_FILTER_SEL_MASK		GENMASK(1, 0)
+
+#define FVC_TERMINATION_GLITCH_FILTER_SEL_CFG_REG	(CHGR_BASE + 0x85)
+#define FVC_TERMINATION_GLITCH_FILTER_SEL_MASK		GENMASK(1, 0)
+
+#define JEITA_EN_CFG_REG		(CHGR_BASE + 0x90)
+#define JEITA_EN_HARDLIMIT_BIT		BIT(4)
+#define JEITA_EN_HOT_SL_FCV_BIT		BIT(3)
+#define JEITA_EN_COLD_SL_FCV_BIT	BIT(2)
+#define JEITA_EN_HOT_SL_CCC_BIT		BIT(1)
+#define JEITA_EN_COLD_SL_CCC_BIT	BIT(0)
+
+#define JEITA_FVCOMP_CFG_REG		(CHGR_BASE + 0x91)
+#define JEITA_FVCOMP_MASK		GENMASK(7, 0)
+
+#define JEITA_CCCOMP_CFG_REG		(CHGR_BASE + 0x92)
+#define JEITA_CCCOMP_MASK		GENMASK(7, 0)
+
+#define FV_CAL_CFG_REG			(CHGR_BASE + 0x76)
+#define FV_CALIBRATION_CFG_MASK		GENMASK(2, 0)
+
+#define FV_ADJUST_REG			(CHGR_BASE + 0x77)
+#define FLOAT_VOLTAGE_ADJUSTMENT_MASK	GENMASK(4, 0)
+
+#define FG_VADC_DISQ_THRESH_REG		(CHGR_BASE + 0x78)
+#define VADC_DISQUAL_THRESH_MASK	GENMASK(7, 0)
+
+#define FG_IADC_DISQ_THRESH_REG		(CHGR_BASE + 0x79)
+#define IADC_DISQUAL_THRESH_MASK	GENMASK(7, 0)
+
+#define FG_UPDATE_CFG_1_REG	(CHGR_BASE + 0x7A)
+#define BT_TMPR_TCOLD_BIT	BIT(7)
+#define BT_TMPR_COLD_BIT	BIT(6)
+#define BT_TMPR_HOT_BIT		BIT(5)
+#define BT_TMPR_THOT_BIT	BIT(4)
+#define CHG_DIE_TMPR_HOT_BIT	BIT(3)
+#define CHG_DIE_TMPR_THOT_BIT	BIT(2)
+#define SKIN_TMPR_HOT_BIT	BIT(1)
+#define SKIN_TMPR_THOT_BIT	BIT(0)
+
+#define FG_UPDATE_CFG_1_SEL_REG		(CHGR_BASE + 0x7B)
+#define BT_TMPR_TCOLD_SEL_BIT		BIT(7)
+#define BT_TMPR_COLD_SEL_BIT		BIT(6)
+#define BT_TMPR_HOT_SEL_BIT		BIT(5)
+#define BT_TMPR_THOT_SEL_BIT		BIT(4)
+#define CHG_DIE_TMPR_HOT_SEL_BIT	BIT(3)
+#define CHG_DIE_TMPR_THOT_SEL_BIT	BIT(2)
+#define SKIN_TMPR_HOT_SEL_BIT		BIT(1)
+#define SKIN_TMPR_THOT_SEL_BIT		BIT(0)
+
+#define FG_UPDATE_CFG_2_REG		(CHGR_BASE + 0x7C)
+#define SOC_LT_OTG_THRESH_BIT		BIT(3)
+#define SOC_LT_CHG_RECHARGE_THRESH_BIT	BIT(2)
+#define VBT_LT_CHG_RECHARGE_THRESH_BIT	BIT(1)
+#define IBT_LT_CHG_TERM_THRESH_BIT	BIT(0)
+
+#define FG_UPDATE_CFG_2_SEL_REG			(CHGR_BASE + 0x7D)
+#define SOC_LT_OTG_THRESH_SEL_BIT		BIT(3)
+#define SOC_LT_CHG_RECHARGE_THRESH_SEL_BIT	BIT(2)
+#define VBT_LT_CHG_RECHARGE_THRESH_SEL_BIT	BIT(1)
+#define IBT_LT_CHG_TERM_THRESH_SEL_BIT		BIT(0)
+
+#define FG_CHG_INTERFACE_CFG_REG	(CHGR_BASE + 0x7E)
+#define ESR_ISINK_CFG_MASK		GENMASK(7, 6)
+#define ESR_FASTCHG_DECR_CFG_MASK	GENMASK(5, 4)
+#define FG_CHARGER_INHIBIT_BIT		BIT(3)
+#define FG_BATFET_BIT			BIT(2)
+#define IADC_SYNC_CNV_BIT		BIT(1)
+#define VADC_SYNC_CNV_BIT		BIT(0)
+
+#define FG_CHG_INTERFACE_CFG_SEL_REG	(CHGR_BASE + 0x7F)
+#define ESR_ISINK_CFG_SEL_BIT		BIT(5)
+#define ESR_FASTCHG_DECR_CFG_SEL_BIT	BIT(4)
+#define FG_CHARGER_INHIBIT_SEL_BIT	BIT(3)
+#define FG_BATFET_SEL_BIT		BIT(2)
+#define IADC_SYNC_CNV_SEL_BIT		BIT(1)
+#define VADC_SYNC_CNV_SEL_BIT		BIT(0)
+
+#define CHGR_STEP_CHG_MODE_CFG_REG		(CHGR_BASE + 0xB0)
+#define STEP_CHARGING_SOC_FAIL_OPTION_BIT	BIT(3)
+#define STEP_CHARGING_MODE_SELECT_BIT		BIT(2)
+#define STEP_CHARGING_SOURCE_SELECT_BIT		BIT(1)
+#define STEP_CHARGING_ENABLE_BIT		BIT(0)
+
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_CFG_REG		(CHGR_BASE + 0xB1)
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_CFG_MASK	GENMASK(0, 1)
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_5S		0
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_10S		1
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_20S		2
+#define STEP_CHG_UPDATE_REQUEST_TIMEOUT_40S		3
+
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_CFG_REG		(CHGR_BASE + 0xB2)
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_CFG_MASK		GENMASK(0, 1)
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_10S		0
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_30S		1
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_60S		2
+#define STEP_CHG_UPDATE_FAIL_TIMEOUT_120S		3
+
+#define STEP_CHG_SOC_OR_BATT_V_TH1_REG	(CHGR_BASE + 0xB3)
+#define STEP_CHG_SOC_OR_BATT_V_TH2_REG	(CHGR_BASE + 0xB4)
+#define STEP_CHG_SOC_OR_BATT_V_TH3_REG	(CHGR_BASE + 0xB5)
+#define STEP_CHG_SOC_OR_BATT_V_TH4_REG	(CHGR_BASE + 0xB6)
+#define STEP_CHG_CURRENT_DELTA1_REG	(CHGR_BASE + 0xB7)
+#define STEP_CHG_CURRENT_DELTA2_REG	(CHGR_BASE + 0xB8)
+#define STEP_CHG_CURRENT_DELTA3_REG	(CHGR_BASE + 0xB9)
+#define STEP_CHG_CURRENT_DELTA4_REG	(CHGR_BASE + 0xBA)
+#define STEP_CHG_CURRENT_DELTA5_REG	(CHGR_BASE + 0xBB)
+
+/* OTG Peripheral Registers */
+#define RID_CC_CONTROL_23_16_REG	(OTG_BASE + 0x06)
+#define RID_CC_CONTROL_23_BIT		BIT(7)
+#define VCONN_SOFTSTART_EN_BIT		BIT(6)
+#define VCONN_SFTST_CFG_MASK		GENMASK(5, 4)
+#define CONNECT_RIDCC_SENSOR_TO_CC_MASK	GENMASK(3, 2)
+#define EN_CC_1P1CLAMP_BIT		BIT(1)
+#define ENABLE_CRUDESEN_CC_1_BIT	BIT(0)
+
+#define RID_CC_CONTROL_15_8_REG		(OTG_BASE + 0x07)
+#define ENABLE_CRUDESEN_CC_0_BIT	BIT(7)
+#define EN_FMB_2P5UA_CC_MASK		GENMASK(6, 5)
+#define EN_ISRC_180UA_BIT		BIT(4)
+#define ENABLE_CURRENTSOURCE_CC_MASK	GENMASK(3, 2)
+#define EN_BANDGAP_RID_C_DET_BIT	BIT(1)
+#define ENABLE_RD_CC_1_BIT		BIT(0)
+
+#define RID_CC_CONTROL_7_0_REG		(OTG_BASE + 0x08)
+#define ENABLE_RD_CC_0_BIT		BIT(7)
+#define VCONN_ILIM500MA_BIT		BIT(6)
+#define EN_MICRO_USB_MODE_BIT		BIT(5)
+#define UFP_DFP_MODE_BIT		BIT(4)
+#define VCONN_EN_CC_MASK		GENMASK(3, 2)
+#define VREF_SEL_RIDCC_SENSOR_MASK	GENMASK(1, 0)
+
+#define OTG_STATUS_REG			(OTG_BASE + 0x09)
+#define BOOST_SOFTSTART_DONE_BIT	BIT(3)
+#define OTG_STATE_MASK			GENMASK(2, 0)
+#define OTG_STATE_ENABLED		0x2
+
+/* OTG Interrupt Bits */
+#define TESTMODE_CHANGE_DETECT_RT_STS_BIT	BIT(3)
+#define OTG_OC_DIS_SW_STS_RT_STS_BIT		BIT(2)
+#define OTG_OVERCURRENT_RT_STS_BIT		BIT(1)
+#define OTG_FAIL_RT_STS_BIT			BIT(0)
+
+#define CMD_OTG_REG			(OTG_BASE + 0x40)
+#define OTG_EN_BIT			BIT(0)
+
+#define BAT_UVLO_THRESHOLD_CFG_REG	(OTG_BASE + 0x51)
+#define BAT_UVLO_THRESHOLD_MASK		GENMASK(1, 0)
+
+#define OTG_CURRENT_LIMIT_CFG_REG	(OTG_BASE + 0x52)
+#define OTG_CURRENT_LIMIT_MASK		GENMASK(2, 0)
+
+#define OTG_CFG_REG			(OTG_BASE + 0x53)
+#define OTG_RESERVED_MASK		GENMASK(7, 6)
+#define DIS_OTG_ON_TLIM_BIT		BIT(5)
+#define QUICKSTART_OTG_FASTROLESWAP_BIT	BIT(4)
+#define INCREASE_DFP_TIME_BIT		BIT(3)
+#define ENABLE_OTG_IN_DEBUG_MODE_BIT	BIT(2)
+#define OTG_EN_SRC_CFG_BIT		BIT(1)
+#define CONCURRENT_MODE_CFG_BIT		BIT(0)
+
+#define OTG_ENG_OTG_CFG_REG		(OTG_BASE + 0xC0)
+#define ENG_BUCKBOOST_HALT1_8_MODE_BIT	BIT(0)
+
+/* BATIF Peripheral Registers */
+/* BATIF Interrupt Bits */
+#define BAT_7_RT_STS_BIT			BIT(7)
+#define BAT_6_RT_STS_BIT			BIT(6)
+#define BAT_TERMINAL_MISSING_RT_STS_BIT		BIT(5)
+#define BAT_THERM_OR_ID_MISSING_RT_STS_BIT	BIT(4)
+#define BAT_LOW_RT_STS_BIT			BIT(3)
+#define BAT_OV_RT_STS_BIT			BIT(2)
+#define BAT_OCP_RT_STS_BIT			BIT(1)
+#define BAT_TEMP_RT_STS_BIT			BIT(0)
+
+#define SHIP_MODE_REG			(BATIF_BASE + 0x40)
+#define SHIP_MODE_EN_BIT		BIT(0)
+
+#define BATOCP_THRESHOLD_CFG_REG	(BATIF_BASE + 0x50)
+#define BATOCP_ENABLE_CFG_BIT		BIT(3)
+#define BATOCP_THRESHOLD_MASK		GENMASK(2, 0)
+
+#define BATOCP_INTRPT_DELAY_TMR_CFG_REG	(BATIF_BASE + 0x51)
+#define BATOCP_INTRPT_TIMEOUT_MASK	GENMASK(5, 3)
+#define BATOCP_DELAY_TIMEOUT_MASK	GENMASK(2, 0)
+
+#define BATOCP_RESET_TMR_CFG_REG	(BATIF_BASE + 0x52)
+#define EN_BATOCP_RESET_TMR_BIT		BIT(3)
+#define BATOCP_RESET_TIMEOUT_MASK	GENMASK(2, 0)
+
+#define LOW_BATT_DETECT_EN_CFG_REG	(BATIF_BASE + 0x60)
+#define LOW_BATT_DETECT_EN_BIT		BIT(0)
+
+#define LOW_BATT_THRESHOLD_CFG_REG	(BATIF_BASE + 0x61)
+#define LOW_BATT_THRESHOLD_MASK		GENMASK(3, 0)
+
+#define BAT_FET_CFG_REG			(BATIF_BASE + 0x62)
+#define BAT_FET_CFG_BIT			BIT(0)
+
+#define BAT_MISS_SRC_CFG_REG		(BATIF_BASE + 0x70)
+#define BAT_MISS_ALG_EN_BIT		BIT(2)
+#define BAT_MISS_RESERVED_BIT		BIT(1)
+#define BAT_MISS_PIN_SRC_EN_BIT		BIT(0)
+
+#define BAT_MISS_ALG_OPTIONS_CFG_REG	(BATIF_BASE + 0x71)
+#define BAT_MISS_INPUT_PLUGIN_BIT	BIT(2)
+#define BAT_MISS_TMR_START_OPTION_BIT	BIT(1)
+#define BAT_MISS_POLL_EN_BIT		BIT(0)
+
+#define BAT_MISS_PIN_GF_CFG_REG		(BATIF_BASE + 0x72)
+#define BAT_MISS_PIN_GF_MASK		GENMASK(1, 0)
+
+/* USBIN Peripheral Registers */
+#define USBIN_INPUT_STATUS_REG		(USBIN_BASE + 0x06)
+#define USBIN_INPUT_STATUS_7_BIT	BIT(7)
+#define USBIN_INPUT_STATUS_6_BIT	BIT(6)
+#define USBIN_12V_BIT			BIT(5)
+#define USBIN_9V_TO_12V_BIT		BIT(4)
+#define USBIN_9V_BIT			BIT(3)
+#define USBIN_5V_TO_12V_BIT		BIT(2)
+#define USBIN_5V_TO_9V_BIT		BIT(1)
+#define USBIN_5V_BIT			BIT(0)
+#define QC_2P0_STATUS_MASK		GENMASK(2, 0)
+
+#define APSD_STATUS_REG			(USBIN_BASE + 0x07)
+#define APSD_STATUS_7_BIT		BIT(7)
+#define HVDCP_CHECK_TIMEOUT_BIT		BIT(6)
+#define SLOW_PLUGIN_TIMEOUT_BIT		BIT(5)
+#define ENUMERATION_DONE_BIT		BIT(4)
+#define VADP_CHANGE_DONE_AFTER_AUTH_BIT	BIT(3)
+#define QC_AUTH_DONE_STATUS_BIT		BIT(2)
+#define QC_CHARGER_BIT			BIT(1)
+#define APSD_DTC_STATUS_DONE_BIT	BIT(0)
+
+#define APSD_RESULT_STATUS_REG		(USBIN_BASE + 0x08)
+#define ICL_OVERRIDE_LATCH_BIT		BIT(7)
+#define APSD_RESULT_STATUS_MASK		GENMASK(6, 0)
+#define QC_3P0_BIT			BIT(6)
+#define QC_2P0_BIT			BIT(5)
+#define FLOAT_CHARGER_BIT		BIT(4)
+#define DCP_CHARGER_BIT			BIT(3)
+#define CDP_CHARGER_BIT			BIT(2)
+#define OCP_CHARGER_BIT			BIT(1)
+#define SDP_CHARGER_BIT			BIT(0)
+
+#define QC_CHANGE_STATUS_REG		(USBIN_BASE + 0x09)
+#define QC_CHANGE_STATUS_7_BIT		BIT(7)
+#define QC_CHANGE_STATUS_6_BIT		BIT(6)
+#define QC_9V_TO_12V_REASON_BIT		BIT(5)
+#define QC_5V_TO_9V_REASON_BIT		BIT(4)
+#define QC_CONTINUOUS_BIT		BIT(3)
+#define QC_12V_BIT			BIT(2)
+#define QC_9V_BIT			BIT(1)
+#define QC_5V_BIT			BIT(0)
+
+#define QC_PULSE_COUNT_STATUS_REG		(USBIN_BASE + 0x0A)
+#define QC_PULSE_COUNT_STATUS_7_BIT		BIT(7)
+#define QC_PULSE_COUNT_STATUS_6_BIT		BIT(6)
+#define QC_PULSE_COUNT_MASK			GENMASK(5, 0)
+
+#define TYPE_C_STATUS_1_REG			(USBIN_BASE + 0x0B)
+#define UFP_TYPEC_MASK				GENMASK(7, 5)
+#define UFP_TYPEC_RDSTD_BIT			BIT(7)
+#define UFP_TYPEC_RD1P5_BIT			BIT(6)
+#define UFP_TYPEC_RD3P0_BIT			BIT(5)
+#define UFP_TYPEC_FMB_255K_BIT			BIT(4)
+#define UFP_TYPEC_FMB_301K_BIT			BIT(3)
+#define UFP_TYPEC_FMB_523K_BIT			BIT(2)
+#define UFP_TYPEC_FMB_619K_BIT			BIT(1)
+#define UFP_TYPEC_OPEN_OPEN_BIT			BIT(0)
+
+#define TYPE_C_STATUS_2_REG			(USBIN_BASE + 0x0C)
+#define DFP_TYPEC_MASK				0x8F
+#define DFP_RA_OPEN_BIT				BIT(7)
+#define TIMER_STAGE_BIT				BIT(6)
+#define EXIT_UFP_MODE_BIT			BIT(5)
+#define EXIT_DFP_MODE_BIT			BIT(4)
+#define DFP_RD_OPEN_BIT				BIT(3)
+#define DFP_RD_RA_VCONN_BIT			BIT(2)
+#define DFP_RD_RD_BIT				BIT(1)
+#define DFP_RA_RA_BIT				BIT(0)
+
+#define TYPE_C_STATUS_3_REG			(USBIN_BASE + 0x0D)
+#define ENABLE_BANDGAP_BIT			BIT(7)
+#define U_USB_GND_NOVBUS_BIT			BIT(6)
+#define U_USB_FLOAT_NOVBUS_BIT			BIT(5)
+#define U_USB_GND_BIT				BIT(4)
+#define U_USB_FMB1_BIT				BIT(3)
+#define U_USB_FLOAT1_BIT			BIT(2)
+#define U_USB_FMB2_BIT				BIT(1)
+#define U_USB_FLOAT2_BIT			BIT(0)
+
+#define TYPE_C_STATUS_4_REG			(USBIN_BASE + 0x0E)
+#define UFP_DFP_MODE_STATUS_BIT			BIT(7)
+#define TYPEC_VBUS_STATUS_BIT			BIT(6)
+#define TYPEC_VBUS_ERROR_STATUS_BIT		BIT(5)
+#define TYPEC_DEBOUNCE_DONE_STATUS_BIT		BIT(4)
+#define TYPEC_UFP_AUDIO_ADAPT_STATUS_BIT	BIT(3)
+#define TYPEC_VCONN_OVERCURR_STATUS_BIT		BIT(2)
+#define CC_ORIENTATION_BIT			BIT(1)
+#define CC_ATTACHED_BIT				BIT(0)
+
+#define TYPE_C_STATUS_5_REG			(USBIN_BASE + 0x0F)
+#define TRY_SOURCE_FAILED_BIT			BIT(6)
+#define TRY_SINK_FAILED_BIT			BIT(5)
+#define TIMER_STAGE_2_BIT			BIT(4)
+#define TYPEC_LEGACY_CABLE_STATUS_BIT		BIT(3)
+#define TYPEC_NONCOMP_LEGACY_CABLE_STATUS_BIT	BIT(2)
+#define TYPEC_TRYSOURCE_DETECT_STATUS_BIT	BIT(1)
+#define TYPEC_TRYSINK_DETECT_STATUS_BIT		BIT(0)
+
+/* USBIN Interrupt Bits */
+#define TYPE_C_CHANGE_RT_STS_BIT		BIT(7)
+#define USBIN_ICL_CHANGE_RT_STS_BIT		BIT(6)
+#define USBIN_SOURCE_CHANGE_RT_STS_BIT		BIT(5)
+#define USBIN_PLUGIN_RT_STS_BIT			BIT(4)
+#define USBIN_OV_RT_STS_BIT			BIT(3)
+#define USBIN_UV_RT_STS_BIT			BIT(2)
+#define USBIN_LT_3P6V_RT_STS_BIT		BIT(1)
+#define USBIN_COLLAPSE_RT_STS_BIT		BIT(0)
+
+#define QC_PULSE_COUNT_STATUS_1_REG		(USBIN_BASE + 0x30)
+
+#define USBIN_CMD_IL_REG			(USBIN_BASE + 0x40)
+#define BAT_2_SYS_FET_DIS_BIT			BIT(1)
+#define USBIN_SUSPEND_BIT			BIT(0)
+
+#define CMD_APSD_REG				(USBIN_BASE + 0x41)
+#define ICL_OVERRIDE_BIT			BIT(1)
+#define APSD_RERUN_BIT				BIT(0)
+
+#define CMD_HVDCP_2_REG				(USBIN_BASE + 0x43)
+#define RESTART_AICL_BIT			BIT(7)
+#define TRIGGER_AICL_BIT			BIT(6)
+#define FORCE_12V_BIT				BIT(5)
+#define FORCE_9V_BIT				BIT(4)
+#define FORCE_5V_BIT				BIT(3)
+#define IDLE_BIT				BIT(2)
+#define SINGLE_DECREMENT_BIT			BIT(1)
+#define SINGLE_INCREMENT_BIT			BIT(0)
+
+#define USB_MISC2_REG				(USBIN_BASE + 0x57)
+#define USB_MISC2_MASK				GENMASK(1, 0)
+
+#define TYPE_C_CFG_REG				(USBIN_BASE + 0x58)
+#define APSD_START_ON_CC_BIT			BIT(7)
+#define WAIT_FOR_APSD_BIT			BIT(6)
+#define FACTORY_MODE_DETECTION_EN_BIT		BIT(5)
+#define FACTORY_MODE_ICL_3A_4A_BIT		BIT(4)
+#define FACTORY_MODE_DIS_CHGING_CFG_BIT		BIT(3)
+#define SUSPEND_NON_COMPLIANT_CFG_BIT		BIT(2)
+#define VCONN_OC_CFG_BIT			BIT(1)
+#define TYPE_C_OR_U_USB_BIT			BIT(0)
+
+#define TYPE_C_CFG_2_REG			(USBIN_BASE + 0x59)
+#define TYPE_C_DFP_CURRSRC_MODE_BIT		BIT(7)
+#define VCONN_ILIM500MA_CFG_BIT			BIT(6)
+#define VCONN_SOFTSTART_CFG_MASK		GENMASK(5, 4)
+#define EN_TRY_SOURCE_MODE_BIT			BIT(3)
+#define USB_FACTORY_MODE_ENABLE_BIT		BIT(2)
+#define TYPE_C_UFP_MODE_BIT			BIT(1)
+#define EN_80UA_180UA_CUR_SOURCE_BIT		BIT(0)
+
+#define TYPE_C_CFG_3_REG			(USBIN_BASE + 0x5A)
+#define TVBUS_DEBOUNCE_BIT			BIT(7)
+#define TYPEC_LEGACY_CABLE_INT_EN_BIT		BIT(6)
+#define TYPEC_NONCOMPLIANT_LEGACY_CABLE_INT_EN_BIT		BIT(5)
+#define TYPEC_TRYSOURCE_DETECT_INT_EN_BIT	BIT(4)
+#define TYPEC_TRYSINK_DETECT_INT_EN_BIT		BIT(3)
+#define EN_TRYSINK_MODE_BIT			BIT(2)
+#define EN_LEGACY_CABLE_DETECTION_BIT		BIT(1)
+#define ALLOW_PD_DRING_UFP_TCCDB_BIT		BIT(0)
+
+#define USBIN_ADAPTER_ALLOW_CFG_REG		(USBIN_BASE + 0x60)
+#define USBIN_ADAPTER_ALLOW_MASK		GENMASK(3, 0)
+enum {
+	USBIN_ADAPTER_ALLOW_5V		= 0,
+	USBIN_ADAPTER_ALLOW_9V		= 2,
+	USBIN_ADAPTER_ALLOW_5V_OR_9V	= 3,
+	USBIN_ADAPTER_ALLOW_12V		= 4,
+	USBIN_ADAPTER_ALLOW_5V_OR_12V	= 5,
+	USBIN_ADAPTER_ALLOW_9V_TO_12V	= 6,
+	USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V = 7,
+	USBIN_ADAPTER_ALLOW_5V_TO_9V	= 8,
+	USBIN_ADAPTER_ALLOW_5V_TO_12V	= 12,
+};
+
+#define USBIN_OPTIONS_1_CFG_REG			(USBIN_BASE + 0x62)
+#define CABLE_R_SEL_BIT				BIT(7)
+#define HVDCP_AUTH_ALG_EN_CFG_BIT		BIT(6)
+#define HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT	BIT(5)
+#define INPUT_PRIORITY_BIT			BIT(4)
+#define AUTO_SRC_DETECT_BIT			BIT(3)
+#define HVDCP_EN_BIT				BIT(2)
+#define VADP_INCREMENT_VOLTAGE_LIMIT_BIT	BIT(1)
+#define VADP_TAPER_TIMER_EN_BIT			BIT(0)
+
+#define USBIN_OPTIONS_2_CFG_REG			(USBIN_BASE + 0x63)
+#define WIPWR_RST_EUD_CFG_BIT			BIT(7)
+#define SWITCHER_START_CFG_BIT			BIT(6)
+#define DCD_TIMEOUT_SEL_BIT			BIT(5)
+#define OCD_CURRENT_SEL_BIT			BIT(4)
+#define SLOW_PLUGIN_TIMER_EN_CFG_BIT		BIT(3)
+#define FLOAT_OPTIONS_MASK			GENMASK(2, 0)
+#define FLOAT_DIS_CHGING_CFG_BIT		BIT(2)
+#define SUSPEND_FLOAT_CFG_BIT			BIT(1)
+#define FORCE_FLOAT_SDP_CFG_BIT			BIT(0)
+
+#define TAPER_TIMER_SEL_CFG_REG			(USBIN_BASE + 0x64)
+#define TYPEC_SPARE_CFG_BIT			BIT(7)
+#define TAPER_TIMER_SEL_MASK			GENMASK(1, 0)
+
+#define USBIN_LOAD_CFG_REG			(USBIN_BASE + 0x65)
+#define USBIN_OV_CH_LOAD_OPTION_BIT		BIT(7)
+
+#define USBIN_ICL_OPTIONS_REG			(USBIN_BASE + 0x66)
+#define CFG_USB3P0_SEL_BIT			BIT(2)
+#define USB51_MODE_BIT				BIT(1)
+#define USBIN_MODE_CHG_BIT			BIT(0)
+
+#define TYPE_C_INTRPT_ENB_REG			(USBIN_BASE + 0x67)
+#define TYPEC_CCOUT_DETACH_INT_EN_BIT		BIT(7)
+#define TYPEC_CCOUT_ATTACH_INT_EN_BIT		BIT(6)
+#define TYPEC_VBUS_ERROR_INT_EN_BIT		BIT(5)
+#define TYPEC_UFP_AUDIOADAPT_INT_EN_BIT		BIT(4)
+#define TYPEC_DEBOUNCE_DONE_INT_EN_BIT		BIT(3)
+#define TYPEC_CCSTATE_CHANGE_INT_EN_BIT		BIT(2)
+#define TYPEC_VBUS_DEASSERT_INT_EN_BIT		BIT(1)
+#define TYPEC_VBUS_ASSERT_INT_EN_BIT		BIT(0)
+
+#define TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG	(USBIN_BASE + 0x68)
+#define EXIT_SNK_BASED_ON_CC_BIT		BIT(7)
+#define VCONN_EN_ORIENTATION_BIT		BIT(6)
+#define TYPEC_VCONN_OVERCURR_INT_EN_BIT		BIT(5)
+#define VCONN_EN_SRC_BIT			BIT(4)
+#define VCONN_EN_VALUE_BIT			BIT(3)
+#define TYPEC_POWER_ROLE_CMD_MASK		GENMASK(2, 0)
+#define UFP_EN_CMD_BIT				BIT(2)
+#define DFP_EN_CMD_BIT				BIT(1)
+#define TYPEC_DISABLE_CMD_BIT			BIT(0)
+
+#define USBIN_SOURCE_CHANGE_INTRPT_ENB_REG	(USBIN_BASE + 0x69)
+#define SLOW_IRQ_EN_CFG_BIT			BIT(5)
+#define ENUMERATION_IRQ_EN_CFG_BIT		BIT(4)
+#define VADP_IRQ_EN_CFG_BIT			BIT(3)
+#define AUTH_IRQ_EN_CFG_BIT			BIT(2)
+#define HVDCP_IRQ_EN_CFG_BIT			BIT(1)
+#define APSD_IRQ_EN_CFG_BIT			BIT(0)
+
+#define USBIN_CURRENT_LIMIT_CFG_REG		(USBIN_BASE + 0x70)
+#define USBIN_CURRENT_LIMIT_MASK		GENMASK(7, 0)
+
+#define USBIN_AICL_OPTIONS_CFG_REG		(USBIN_BASE + 0x80)
+#define SUSPEND_ON_COLLAPSE_USBIN_BIT		BIT(7)
+#define USBIN_AICL_HDC_EN_BIT			BIT(6)
+#define USBIN_AICL_START_AT_MAX_BIT		BIT(5)
+#define USBIN_AICL_RERUN_EN_BIT			BIT(4)
+#define USBIN_AICL_ADC_EN_BIT			BIT(3)
+#define USBIN_AICL_EN_BIT			BIT(2)
+#define USBIN_HV_COLLAPSE_RESPONSE_BIT		BIT(1)
+#define USBIN_LV_COLLAPSE_RESPONSE_BIT		BIT(0)
+
+#define USBIN_5V_AICL_THRESHOLD_CFG_REG		(USBIN_BASE + 0x81)
+#define USBIN_5V_AICL_THRESHOLD_CFG_MASK	GENMASK(2, 0)
+
+#define USBIN_9V_AICL_THRESHOLD_CFG_REG		(USBIN_BASE + 0x82)
+#define USBIN_9V_AICL_THRESHOLD_CFG_MASK	GENMASK(2, 0)
+
+#define USBIN_12V_AICL_THRESHOLD_CFG_REG	(USBIN_BASE + 0x83)
+#define USBIN_12V_AICL_THRESHOLD_CFG_MASK	GENMASK(2, 0)
+
+#define USBIN_CONT_AICL_THRESHOLD_CFG_REG	(USBIN_BASE + 0x84)
+#define USBIN_CONT_AICL_THRESHOLD_CFG_MASK	GENMASK(5, 0)
+
+/* DCIN Peripheral Registers */
+#define DCIN_INPUT_STATUS_REG		(DCIN_BASE + 0x06)
+#define DCIN_INPUT_STATUS_7_BIT		BIT(7)
+#define DCIN_INPUT_STATUS_6_BIT		BIT(6)
+#define DCIN_12V_BIT			BIT(5)
+#define DCIN_9V_TO_12V_BIT		BIT(4)
+#define DCIN_9V_BIT			BIT(3)
+#define DCIN_5V_TO_12V_BIT		BIT(2)
+#define DCIN_5V_TO_9V_BIT		BIT(1)
+#define DCIN_5V_BIT			BIT(0)
+
+#define WIPWR_STATUS_REG		(DCIN_BASE + 0x07)
+#define WIPWR_STATUS_7_BIT		BIT(7)
+#define WIPWR_STATUS_6_BIT		BIT(6)
+#define WIPWR_STATUS_5_BIT		BIT(5)
+#define DCIN_WIPWR_OV_DG_BIT		BIT(4)
+#define DIV2_EN_DG_BIT			BIT(3)
+#define SHUTDOWN_N_LATCH_BIT		BIT(2)
+#define CHG_OK_PIN_BIT			BIT(1)
+#define WIPWR_CHARGING_ENABLED_BIT	BIT(0)
+
+#define WIPWR_RANGE_STATUS_REG		(DCIN_BASE + 0x08)
+#define WIPWR_RANGE_STATUS_MASK		GENMASK(4, 0)
+
+/* DCIN Interrupt Bits */
+#define WIPWR_VOLTAGE_RANGE_RT_STS_BIT	BIT(7)
+#define DCIN_ICL_CHANGE_RT_STS_BIT	BIT(6)
+#define DIV2_EN_DG_RT_STS_BIT		BIT(5)
+#define DCIN_PLUGIN_RT_STS_BIT		BIT(4)
+#define DCIN_OV_RT_STS_BIT		BIT(3)
+#define DCIN_UV_RT_STS_BIT		BIT(2)
+#define DCIN_LT_3P6V_RT_STS_BIT		BIT(1)
+#define DCIN_COLLAPSE_RT_STS_BIT	BIT(0)
+
+#define DCIN_CMD_IL_REG				(DCIN_BASE + 0x40)
+#define WIRELESS_CHG_DIS_BIT			BIT(3)
+#define SHDN_N_CLEAR_CMD_BIT			BIT(2)
+#define SHDN_N_SET_CMD_BIT			BIT(1)
+#define DCIN_SUSPEND_BIT			BIT(0)
+
+#define DC_SPARE_REG				(DCIN_BASE + 0x58)
+#define DC_SPARE_MASK				GENMASK(3, 0)
+
+#define DCIN_ADAPTER_ALLOW_CFG_REG		(DCIN_BASE + 0x60)
+#define DCIN_ADAPTER_ALLOW_MASK			GENMASK(3, 0)
+
+#define DCIN_LOAD_CFG_REG			(DCIN_BASE + 0x65)
+#define DCIN_OV_CH_LOAD_OPTION_BIT		BIT(7)
+
+#define DCIN_CURRENT_LIMIT_CFG_REG		(DCIN_BASE + 0x70)
+#define DCIN_CURRENT_LIMIT_MASK			GENMASK(7, 0)
+
+#define DCIN_AICL_OPTIONS_CFG_REG		(DCIN_BASE + 0x80)
+#define SUSPEND_ON_COLLAPSE_DCIN_BIT		BIT(7)
+#define DCIN_AICL_HDC_EN_BIT			BIT(6)
+#define DCIN_AICL_START_AT_MAX_BIT		BIT(5)
+#define DCIN_AICL_RERUN_EN_BIT			BIT(4)
+#define DCIN_AICL_ADC_EN_BIT			BIT(3)
+#define DCIN_AICL_EN_BIT			BIT(2)
+#define DCIN_HV_COLLAPSE_RESPONSE_BIT		BIT(1)
+#define DCIN_LV_COLLAPSE_RESPONSE_BIT		BIT(0)
+
+#define DCIN_AICL_REF_SEL_CFG_REG		(DCIN_BASE + 0x81)
+#define DCIN_CONT_AICL_THRESHOLD_CFG_MASK	GENMASK(5, 0)
+
+#define DCIN_ICL_START_CFG_REG			(DCIN_BASE + 0x82)
+#define DCIN_ICL_START_CFG_BIT			BIT(0)
+
+#define DIV2_EN_GF_TIME_CFG_REG			(DCIN_BASE + 0x90)
+#define DIV2_EN_GF_TIME_CFG_MASK		GENMASK(1, 0)
+
+#define WIPWR_IRQ_TMR_CFG_REG			(DCIN_BASE + 0x91)
+#define WIPWR_IRQ_TMR_MASK			GENMASK(2, 0)
+
+#define ZIN_ICL_PT_REG				(DCIN_BASE + 0x92)
+#define ZIN_ICL_PT_MASK				GENMASK(7, 0)
+
+#define ZIN_ICL_LV_REG				(DCIN_BASE + 0x93)
+#define ZIN_ICL_LV_MASK				GENMASK(7, 0)
+
+#define ZIN_ICL_HV_REG				(DCIN_BASE + 0x94)
+#define ZIN_ICL_HV_MASK				GENMASK(7, 0)
+
+#define WI_PWR_OPTIONS_REG			(DCIN_BASE + 0x95)
+#define CHG_OK_BIT				BIT(7)
+#define WIPWR_UVLO_IRQ_OPT_BIT			BIT(6)
+#define BUCK_HOLDOFF_ENABLE_BIT			BIT(5)
+#define CHG_OK_HW_SW_SELECT_BIT			BIT(4)
+#define WIPWR_RST_ENABLE_BIT			BIT(3)
+#define DCIN_WIPWR_IRQ_SELECT_BIT		BIT(2)
+#define AICL_SWITCH_ENABLE_BIT			BIT(1)
+#define ZIN_ICL_ENABLE_BIT			BIT(0)
+
+#define ZIN_ICL_PT_HV_REG			(DCIN_BASE + 0x96)
+#define ZIN_ICL_PT_HV_MASK			GENMASK(7, 0)
+
+#define ZIN_ICL_MID_LV_REG			(DCIN_BASE + 0x97)
+#define ZIN_ICL_MID_LV_MASK			GENMASK(7, 0)
+
+#define ZIN_ICL_MID_HV_REG			(DCIN_BASE + 0x98)
+#define ZIN_ICL_MID_HV_MASK			GENMASK(7, 0)
+
+enum {
+	ZIN_ICL_PT_MAX_MV = 8000,
+	ZIN_ICL_PT_HV_MAX_MV = 9000,
+	ZIN_ICL_LV_MAX_MV = 5500,
+	ZIN_ICL_MID_LV_MAX_MV = 6500,
+	ZIN_ICL_MID_HV_MAX_MV = 8000,
+	ZIN_ICL_HV_MAX_MV = 11000,
+};
+
+#define DC_ENG_SSUPPLY_CFG2_REG			(DCIN_BASE + 0xC1)
+#define ENG_SSUPPLY_IVREF_OTG_SS_MASK		GENMASK(2, 0)
+#define OTG_SS_SLOW				0x3
+
+#define DC_ENG_SSUPPLY_CFG3_REG			(DCIN_BASE + 0xC2)
+#define ENG_SSUPPLY_HI_CAP_BIT			BIT(6)
+#define ENG_SSUPPLY_HI_RES_BIT			BIT(5)
+#define ENG_SSUPPLY_CFG_SKIP_TH_V0P2_BIT	BIT(3)
+#define ENG_SSUPPLY_CFG_SYSOV_TH_4P8_BIT	BIT(2)
+#define ENG_SSUPPLY_5V_OV_OPT_BIT		BIT(0)
+
+/* MISC Peripheral Registers */
+#define REVISION1_REG				(MISC_BASE + 0x00)
+#define DIG_MINOR_MASK				GENMASK(7, 0)
+
+#define REVISION2_REG				(MISC_BASE + 0x01)
+#define DIG_MAJOR_MASK				GENMASK(7, 0)
+
+#define REVISION3_REG				(MISC_BASE + 0x02)
+#define ANA_MINOR_MASK				GENMASK(7, 0)
+
+#define REVISION4_REG				(MISC_BASE + 0x03)
+#define ANA_MAJOR_MASK				GENMASK(7, 0)
+
+#define TEMP_RANGE_STATUS_REG			(MISC_BASE + 0x06)
+#define TEMP_RANGE_STATUS_7_BIT			BIT(7)
+#define THERM_REG_ACTIVE_BIT			BIT(6)
+#define TLIM_BIT				BIT(5)
+#define TEMP_RANGE_MASK				GENMASK(4, 1)
+#define ALERT_LEVEL_BIT				BIT(4)
+#define TEMP_ABOVE_RANGE_BIT			BIT(3)
+#define TEMP_WITHIN_RANGE_BIT			BIT(2)
+#define TEMP_BELOW_RANGE_BIT			BIT(1)
+#define THERMREG_DISABLED_BIT			BIT(0)
+
+#define ICL_STATUS_REG				(MISC_BASE + 0x07)
+#define INPUT_CURRENT_LIMIT_MASK		GENMASK(7, 0)
+
+#define ADAPTER_5V_ICL_STATUS_REG		(MISC_BASE + 0x08)
+#define ADAPTER_5V_ICL_MASK			GENMASK(7, 0)
+
+#define ADAPTER_9V_ICL_STATUS_REG		(MISC_BASE + 0x09)
+#define ADAPTER_9V_ICL_MASK			GENMASK(7, 0)
+
+#define AICL_STATUS_REG				(MISC_BASE + 0x0A)
+#define AICL_STATUS_7_BIT			BIT(7)
+#define SOFT_ILIMIT_BIT				BIT(6)
+#define HIGHEST_DC_BIT				BIT(5)
+#define USBIN_CH_COLLAPSE_BIT			BIT(4)
+#define DCIN_CH_COLLAPSE_BIT			BIT(3)
+#define ICL_IMIN_BIT				BIT(2)
+#define AICL_FAIL_BIT				BIT(1)
+#define AICL_DONE_BIT				BIT(0)
+
+#define POWER_PATH_STATUS_REG			(MISC_BASE + 0x0B)
+#define INPUT_SS_DONE_BIT			BIT(7)
+#define USBIN_SUSPEND_STS_BIT			BIT(6)
+#define DCIN_SUSPEND_STS_BIT			BIT(5)
+#define USE_USBIN_BIT				BIT(4)
+#define USE_DCIN_BIT				BIT(3)
+#define POWER_PATH_MASK				GENMASK(2, 1)
+#define VALID_INPUT_POWER_SOURCE_STS_BIT	BIT(0)
+
+#define WDOG_STATUS_REG				(MISC_BASE + 0x0C)
+#define WDOG_STATUS_7_BIT			BIT(7)
+#define WDOG_STATUS_6_BIT			BIT(6)
+#define WDOG_STATUS_5_BIT			BIT(5)
+#define WDOG_STATUS_4_BIT			BIT(4)
+#define WDOG_STATUS_3_BIT			BIT(3)
+#define WDOG_STATUS_2_BIT			BIT(2)
+#define WDOG_STATUS_1_BIT			BIT(1)
+#define BARK_BITE_STATUS_BIT			BIT(0)
+
+#define SYSOK_REASON_STATUS_REG			(MISC_BASE + 0x0D)
+#define SYSOK_REASON_DCIN_BIT			BIT(1)
+#define SYSOK_REASON_USBIN_BIT			BIT(0)
+
+/* MISC Interrupt Bits */
+#define SWITCHER_POWER_OK_RT_STS_BIT		BIT(7)
+#define TEMPERATURE_CHANGE_RT_STS_BIT		BIT(6)
+#define INPUT_CURRENT_LIMITING_RT_STS_BIT	BIT(5)
+#define HIGH_DUTY_CYCLE_RT_STS_BIT		BIT(4)
+#define AICL_DONE_RT_STS_BIT			BIT(3)
+#define AICL_FAIL_RT_STS_BIT			BIT(2)
+#define WDOG_BARK_RT_STS_BIT			BIT(1)
+#define WDOG_SNARL_RT_STS_BIT			BIT(0)
+
+#define WDOG_RST_REG				(MISC_BASE + 0x40)
+#define WDOG_RST_BIT				BIT(0)
+
+#define AFP_MODE_REG				(MISC_BASE + 0x41)
+#define AFP_MODE_EN_BIT				BIT(0)
+
+#define GSM_PA_ON_ADJ_EN_REG			(MISC_BASE + 0x42)
+#define GSM_PA_ON_ADJ_EN_BIT			BIT(0)
+
+#define BARK_BITE_WDOG_PET_REG			(MISC_BASE + 0x43)
+#define BARK_BITE_WDOG_PET_BIT			BIT(0)
+
+#define PHYON_CMD_REG				(MISC_BASE + 0x44)
+#define PHYON_CMD_BIT				BIT(0)
+
+#define SHDN_CMD_REG				(MISC_BASE + 0x45)
+#define SHDN_CMD_BIT				BIT(0)
+
+#define FINISH_COPY_COMMAND_REG			(MISC_BASE + 0x4F)
+#define START_COPY_BIT				BIT(0)
+
+#define WD_CFG_REG				(MISC_BASE + 0x51)
+#define WATCHDOG_TRIGGER_AFP_EN_BIT		BIT(7)
+#define BARK_WDOG_INT_EN_BIT			BIT(6)
+#define BITE_WDOG_INT_EN_BIT			BIT(5)
+#define SFT_AFTER_WDOG_IRQ_MASK			GENMASK(4, 3)
+#define WDOG_IRQ_SFT_BIT			BIT(2)
+#define WDOG_TIMER_EN_ON_PLUGIN_BIT		BIT(1)
+#define WDOG_TIMER_EN_BIT			BIT(0)
+
+#define MISC_CFG_REG				(MISC_BASE + 0x52)
+#define GSM_PA_ON_ADJ_SEL_BIT			BIT(0)
+#define TCC_DEBOUNCE_20MS_BIT			BIT(5)
+
+#define SNARL_BARK_BITE_WD_CFG_REG		(MISC_BASE + 0x53)
+#define BITE_WDOG_DISABLE_CHARGING_CFG_BIT	BIT(7)
+#define SNARL_WDOG_TIMEOUT_MASK			GENMASK(6, 4)
+#define BARK_WDOG_TIMEOUT_MASK			GENMASK(3, 2)
+#define BITE_WDOG_TIMEOUT_MASK			GENMASK(1, 0)
+
+#define PHYON_CFG_REG				(MISC_BASE + 0x54)
+#define USBPHYON_PUSHPULL_CFG_BIT		BIT(1)
+#define PHYON_SW_SEL_BIT			BIT(0)
+
+#define CHGR_TRIM_OPTIONS_7_0_REG		(MISC_BASE + 0x55)
+#define TLIM_DIS_TBIT_BIT			BIT(0)
+
+#define CH_OV_OPTION_CFG_REG			(MISC_BASE + 0x56)
+#define OV_OPTION_TBIT_BIT			BIT(0)
+
+#define AICL_CFG_REG				(MISC_BASE + 0x60)
+#define TREG_ALLOW_DECREASE_BIT			BIT(1)
+#define AICL_HIGH_DC_INC_BIT			BIT(0)
+
+#define AICL_RERUN_TIME_CFG_REG			(MISC_BASE + 0x61)
+#define AICL_RERUN_TIME_MASK			GENMASK(1, 0)
+
+#define AICL_RERUN_TEMP_TIME_CFG_REG		(MISC_BASE + 0x62)
+#define AICL_RERUN_TEMP_TIME_MASK		GENMASK(1, 0)
+
+#define THERMREG_SRC_CFG_REG			(MISC_BASE + 0x70)
+#define SKIN_ADC_CFG_BIT			BIT(3)
+#define THERMREG_SKIN_ADC_SRC_EN_BIT		BIT(2)
+#define THERMREG_DIE_ADC_SRC_EN_BIT		BIT(1)
+#define THERMREG_DIE_CMP_SRC_EN_BIT		BIT(0)
+
+#define TREG_DIE_CMP_INC_CYCLE_TIME_CFG_REG	(MISC_BASE + 0x71)
+#define TREG_DIE_CMP_INC_CYCLE_TIME_MASK	GENMASK(1, 0)
+
+#define TREG_DIE_CMP_DEC_CYCLE_TIME_CFG_REG	(MISC_BASE + 0x72)
+#define TREG_DIE_CMP_DEC_CYCLE_TIME_MASK	GENMASK(1, 0)
+
+#define TREG_DIE_ADC_INC_CYCLE_TIME_CFG_REG	(MISC_BASE + 0x73)
+#define TREG_DIE_ADC_INC_CYCLE_TIME_MASK	GENMASK(1, 0)
+
+#define TREG_DIE_ADC_DEC_CYCLE_TIME_CFG_REG	(MISC_BASE + 0x74)
+#define TREG_DIE_ADC_DEC_CYCLE_TIME_MASK	GENMASK(1, 0)
+
+#define TREG_SKIN_ADC_INC_CYCLE_TIME_CFG_REG	(MISC_BASE + 0x75)
+#define TREG_SKIN_ADC_INC_CYCLE_TIME_MASK	GENMASK(1, 0)
+
+#define TREG_SKIN_ADC_DEC_CYCLE_TIME_CFG_REG	(MISC_BASE + 0x76)
+#define TREG_SKIN_ADC_DEC_CYCLE_TIME_MASK	GENMASK(1, 0)
+
+#define BUCK_OPTIONS_CFG_REG			(MISC_BASE + 0x80)
+#define CHG_EN_PIN_SUSPEND_CFG_BIT		BIT(6)
+#define HICCUP_OPTIONS_MASK			GENMASK(5, 4)
+#define INPUT_CURRENT_LIMIT_SOFTSTART_EN_BIT	BIT(3)
+#define HV_HIGH_DUTY_CYCLE_PROTECT_EN_BIT	BIT(2)
+#define BUCK_OC_PROTECT_EN_BIT			BIT(1)
+#define INPUT_MISS_POLL_EN_BIT			BIT(0)
+
+#define ICL_SOFTSTART_RATE_CFG_REG		(MISC_BASE + 0x81)
+#define ICL_SOFTSTART_RATE_MASK			GENMASK(1, 0)
+
+#define ICL_SOFTSTOP_RATE_CFG_REG		(MISC_BASE + 0x82)
+#define ICL_SOFTSTOP_RATE_MASK			GENMASK(1, 0)
+
+#define VSYS_MIN_SEL_CFG_REG			(MISC_BASE + 0x83)
+#define VSYS_MIN_SEL_MASK			GENMASK(1, 0)
+
+#define TRACKING_VOLTAGE_SEL_CFG_REG		(MISC_BASE + 0x84)
+#define TRACKING_VOLTAGE_SEL_BIT		BIT(0)
+
+#define STAT_CFG_REG				(MISC_BASE + 0x90)
+#define STAT_SW_OVERRIDE_VALUE_BIT		BIT(7)
+#define STAT_SW_OVERRIDE_CFG_BIT		BIT(6)
+#define STAT_PARALLEL_OFF_DG_CFG_MASK		GENMASK(5, 4)
+#define STAT_POLARITY_CFG_BIT			BIT(3)
+#define STAT_PARALLEL_CFG_BIT			BIT(2)
+#define STAT_FUNCTION_CFG_BIT			BIT(1)
+#define STAT_IRQ_PULSING_EN_BIT			BIT(0)
+
+#define LBC_EN_CFG_REG				(MISC_BASE + 0x91)
+#define LBC_DURING_CHARGING_CFG_BIT		BIT(1)
+#define LBC_EN_BIT				BIT(0)
+
+#define LBC_PERIOD_CFG_REG			(MISC_BASE + 0x92)
+#define LBC_PERIOD_MASK				GENMASK(2, 0)
+
+#define LBC_DUTY_CYCLE_CFG_REG			(MISC_BASE + 0x93)
+#define LBC_DUTY_CYCLE_MASK			GENMASK(2, 0)
+
+#define SYSOK_CFG_REG				(MISC_BASE + 0x94)
+#define SYSOK_PUSHPULL_CFG_BIT			BIT(5)
+#define SYSOK_B_OR_C_SEL_BIT			BIT(4)
+#define SYSOK_POL_BIT				BIT(3)
+#define SYSOK_OPTIONS_MASK			GENMASK(2, 0)
+
+#define CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG	(MISC_BASE + 0xA0)
+#define CFG_BUCKBOOST_FREQ_SELECT_BOOST_REG	(MISC_BASE + 0xA1)
+
+/* CHGR FREQ Peripheral registers */
+#define FREQ_CLK_DIV_REG			(CHGR_FREQ_BASE + 0x50)
+
+#endif /* __SMB2_CHARGER_REG_H */
diff --git a/drivers/power/supply/qcom/smb1351-charger.c b/drivers/power/supply/qcom/smb1351-charger.c
new file mode 100644
index 0000000..0d1f2a6
--- /dev/null
+++ b/drivers/power/supply/qcom/smb1351-charger.c
@@ -0,0 +1,3335 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/i2c.h>
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
+#include <linux/of.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/pinctrl/consumer.h>
+
+/* Mask/Bit helpers */
+#define _SMB1351_MASK(BITS, POS) \
+	((unsigned char)(((1 << (BITS)) - 1) << (POS)))
+#define SMB1351_MASK(LEFT_BIT_POS, RIGHT_BIT_POS) \
+		_SMB1351_MASK((LEFT_BIT_POS) - (RIGHT_BIT_POS) + 1, \
+				(RIGHT_BIT_POS))
+
+/* Configuration registers */
+#define CHG_CURRENT_CTRL_REG			0x0
+#define FAST_CHG_CURRENT_MASK			SMB1351_MASK(7, 4)
+#define AC_INPUT_CURRENT_LIMIT_MASK		SMB1351_MASK(3, 0)
+
+#define CHG_OTH_CURRENT_CTRL_REG		0x1
+#define PRECHG_CURRENT_MASK			SMB1351_MASK(7, 5)
+#define ITERM_MASK				SMB1351_MASK(4, 2)
+#define USB_2_3_MODE_SEL_BIT			BIT(1)
+#define USB_2_3_MODE_SEL_BY_I2C			0
+#define USB_2_3_MODE_SEL_BY_PIN			0x2
+#define USB_5_1_CMD_POLARITY_BIT		BIT(0)
+#define USB_CMD_POLARITY_500_1_100_0		0
+#define USB_CMD_POLARITY_500_0_100_1		0x1
+
+#define VARIOUS_FUNC_REG			0x2
+#define SUSPEND_MODE_CTRL_BIT			BIT(7)
+#define SUSPEND_MODE_CTRL_BY_PIN		0
+#define SUSPEND_MODE_CTRL_BY_I2C		0x80
+#define BATT_TO_SYS_POWER_CTRL_BIT		BIT(6)
+#define MAX_SYS_VOLTAGE				BIT(5)
+#define AICL_EN_BIT				BIT(4)
+#define AICL_DET_TH_BIT				BIT(3)
+#define APSD_EN_BIT				BIT(2)
+#define BATT_OV_BIT				BIT(1)
+#define VCHG_FUNC_BIT				BIT(0)
+
+#define VFLOAT_REG				0x3
+#define PRECHG_TO_FAST_VOLTAGE_CFG_MASK		SMB1351_MASK(7, 6)
+#define VFLOAT_MASK				SMB1351_MASK(5, 0)
+
+#define CHG_CTRL_REG				0x4
+#define AUTO_RECHG_BIT				BIT(7)
+#define AUTO_RECHG_ENABLE			0
+#define AUTO_RECHG_DISABLE			0x80
+#define ITERM_EN_BIT				BIT(6)
+#define ITERM_ENABLE				0
+#define ITERM_DISABLE				0x40
+#define MAPPED_AC_INPUT_CURRENT_LIMIT_MASK	SMB1351_MASK(5, 4)
+#define AUTO_RECHG_TH_BIT			BIT(3)
+#define AUTO_RECHG_TH_50MV			0
+#define AUTO_RECHG_TH_100MV			0x8
+#define AFCV_MASK				SMB1351_MASK(2, 0)
+
+#define CHG_STAT_TIMERS_CTRL_REG		0x5
+#define STAT_OUTPUT_POLARITY_BIT		BIT(7)
+#define STAT_OUTPUT_MODE_BIT			BIT(6)
+#define STAT_OUTPUT_CTRL_BIT			BIT(5)
+#define OTH_CHG_IL_BIT				BIT(4)
+#define COMPLETE_CHG_TIMEOUT_MASK		SMB1351_MASK(3, 2)
+#define PRECHG_TIMEOUT_MASK			SMB1351_MASK(1, 0)
+
+#define CHG_PIN_EN_CTRL_REG			0x6
+#define LED_BLINK_FUNC_BIT			BIT(7)
+#define EN_PIN_CTRL_MASK			SMB1351_MASK(6, 5)
+#define EN_BY_I2C_0_DISABLE			0
+#define EN_BY_I2C_0_ENABLE			0x20
+#define EN_BY_PIN_HIGH_ENABLE			0x40
+#define EN_BY_PIN_LOW_ENABLE			0x60
+#define USBCS_CTRL_BIT				BIT(4)
+#define USBCS_CTRL_BY_I2C			0
+#define USBCS_CTRL_BY_PIN			0x10
+#define USBCS_INPUT_STATE_BIT			BIT(3)
+#define CHG_ERR_BIT				BIT(2)
+#define APSD_DONE_BIT				BIT(1)
+#define USB_FAIL_BIT				BIT(0)
+
+#define THERM_A_CTRL_REG			0x7
+#define MIN_SYS_VOLTAGE_MASK			SMB1351_MASK(7, 6)
+#define LOAD_BATT_10MA_FVC_BIT			BIT(5)
+#define THERM_MONITOR_BIT			BIT(4)
+#define THERM_MONITOR_EN			0
+#define SOFT_COLD_TEMP_LIMIT_MASK		SMB1351_MASK(3, 2)
+#define SOFT_HOT_TEMP_LIMIT_MASK		SMB1351_MASK(1, 0)
+
+#define WDOG_SAFETY_TIMER_CTRL_REG		0x8
+#define AICL_FAIL_OPTION_BIT			BIT(7)
+#define AICL_FAIL_TO_SUSPEND			0
+#define AICL_FAIL_TO_150_MA			0x80
+#define WDOG_TIMEOUT_MASK			SMB1351_MASK(6, 5)
+#define WDOG_IRQ_SAFETY_TIMER_MASK		SMB1351_MASK(4, 3)
+#define WDOG_IRQ_SAFETY_TIMER_EN_BIT		BIT(2)
+#define WDOG_OPTION_BIT				BIT(1)
+#define WDOG_TIMER_EN_BIT			BIT(0)
+
+#define OTG_USBIN_AICL_CTRL_REG			0x9
+#define OTG_ID_PIN_CTRL_MASK			SMB1351_MASK(7, 6)
+#define OTG_PIN_POLARITY_BIT			BIT(5)
+#define DCIN_IC_GLITCH_FILTER_HV_ADAPTER_MASK	SMB1351_MASK(4, 3)
+#define DCIN_IC_GLITCH_FILTER_LV_ADAPTER_BIT	BIT(2)
+#define USBIN_AICL_CFG1_BIT			BIT(1)
+#define USBIN_AICL_CFG0_BIT			BIT(0)
+
+#define OTG_TLIM_CTRL_REG			0xA
+#define SWITCH_FREQ_MASK			SMB1351_MASK(7, 6)
+#define THERM_LOOP_TEMP_SEL_MASK		SMB1351_MASK(5, 4)
+#define OTG_OC_LIMIT_MASK			SMB1351_MASK(3, 2)
+#define OTG_BATT_UVLO_TH_MASK			SMB1351_MASK(1, 0)
+
+#define HARD_SOFT_LIMIT_CELL_TEMP_REG		0xB
+#define HARD_LIMIT_COLD_TEMP_ALARM_TRIP_MASK	SMB1351_MASK(7, 6)
+#define HARD_LIMIT_HOT_TEMP_ALARM_TRIP_MASK	SMB1351_MASK(5, 4)
+#define SOFT_LIMIT_COLD_TEMP_ALARM_TRIP_MASK	SMB1351_MASK(3, 2)
+#define SOFT_LIMIT_HOT_TEMP_ALARM_TRIP_MASK	SMB1351_MASK(1, 0)
+
+#define FAULT_INT_REG				0xC
+#define HOT_COLD_HARD_LIMIT_BIT			BIT(7)
+#define HOT_COLD_SOFT_LIMIT_BIT			BIT(6)
+#define BATT_UVLO_IN_OTG_BIT			BIT(5)
+#define OTG_OC_BIT				BIT(4)
+#define INPUT_OVLO_BIT				BIT(3)
+#define INPUT_UVLO_BIT				BIT(2)
+#define AICL_DONE_FAIL_BIT			BIT(1)
+#define INTERNAL_OVER_TEMP_BIT			BIT(0)
+
+#define STATUS_INT_REG				0xD
+#define CHG_OR_PRECHG_TIMEOUT_BIT		BIT(7)
+#define RID_CHANGE_BIT				BIT(6)
+#define BATT_OVP_BIT				BIT(5)
+#define FAST_TERM_TAPER_RECHG_INHIBIT_BIT	BIT(4)
+#define WDOG_TIMER_BIT				BIT(3)
+#define POK_BIT					BIT(2)
+#define BATT_MISSING_BIT			BIT(1)
+#define BATT_LOW_BIT				BIT(0)
+
+#define VARIOUS_FUNC_2_REG			0xE
+#define CHG_HOLD_OFF_TIMER_AFTER_PLUGIN_BIT	BIT(7)
+#define CHG_INHIBIT_BIT				BIT(6)
+#define FAST_CHG_CC_IN_BATT_SOFT_LIMIT_MODE_BIT	BIT(5)
+#define FVCL_IN_BATT_SOFT_LIMIT_MODE_MASK	SMB1351_MASK(4, 3)
+#define HARD_TEMP_LIMIT_BEHAVIOR_BIT		BIT(2)
+#define PRECHG_TO_FASTCHG_BIT			BIT(1)
+#define STAT_PIN_CONFIG_BIT			BIT(0)
+
+#define FLEXCHARGER_REG				0x10
+#define AFVC_IRQ_BIT				BIT(7)
+#define CHG_CONFIG_MASK				SMB1351_MASK(6, 4)
+#define LOW_BATT_VOLTAGE_DET_TH_MASK		SMB1351_MASK(3, 0)
+
+#define VARIOUS_FUNC_3_REG			0x11
+#define SAFETY_TIMER_EN_MASK			SMB1351_MASK(7, 6)
+#define BLOCK_SUSPEND_DURING_VBATT_LOW_BIT	BIT(5)
+#define TIMEOUT_SEL_FOR_APSD_BIT		BIT(4)
+#define SDP_SUSPEND_BIT				BIT(3)
+#define QC_2P1_AUTO_INCREMENT_MODE_BIT		BIT(2)
+#define QC_2P1_AUTH_ALGO_BIT			BIT(1)
+#define DCD_EN_BIT				BIT(0)
+
+#define HVDCP_BATT_MISSING_CTRL_REG		0x12
+#define HVDCP_ADAPTER_SEL_MASK			SMB1351_MASK(7, 6)
+#define HVDCP_EN_BIT				BIT(5)
+#define HVDCP_AUTO_INCREMENT_LIMIT_BIT		BIT(4)
+#define BATT_MISSING_ON_INPUT_PLUGIN_BIT	BIT(3)
+#define BATT_MISSING_2P6S_POLLER_BIT		BIT(2)
+#define BATT_MISSING_ALGO_BIT			BIT(1)
+#define BATT_MISSING_THERM_PIN_SOURCE_BIT	BIT(0)
+
+#define PON_OPTIONS_REG				0x13
+#define SYSOK_INOK_POLARITY_BIT			BIT(7)
+#define SYSOK_OPTIONS_MASK			SMB1351_MASK(6, 4)
+#define INPUT_MISSING_POLLER_CONFIG_BIT		BIT(3)
+#define VBATT_LOW_DISABLED_OR_RESET_STATE_BIT	BIT(2)
+#define QC_2P1_AUTH_ALGO_IRQ_EN_BIT		BIT(0)
+
+#define OTG_MODE_POWER_OPTIONS_REG		0x14
+#define ADAPTER_CONFIG_MASK			SMB1351_MASK(7, 6)
+#define MAP_HVDCP_BIT				BIT(5)
+#define SDP_LOW_BATT_FORCE_USB5_OVER_USB1_BIT	BIT(4)
+#define OTG_HICCUP_MODE_BIT			BIT(2)
+#define INPUT_CURRENT_LIMIT_MASK		SMB1351_MASK(1, 0)
+
+#define CHARGER_I2C_CTRL_REG			0x15
+#define FULLON_MODE_EN_BIT			BIT(7)
+#define I2C_HS_MODE_EN_BIT			BIT(6)
+#define SYSON_LDO_OUTPUT_SEL_BIT		BIT(5)
+#define VBATT_TRACKING_VOLTAGE_DIFF_BIT		BIT(4)
+#define DISABLE_AFVC_WHEN_ENTER_TAPER_BIT	BIT(3)
+#define VCHG_IINV_BIT				BIT(2)
+#define AFVC_OVERRIDE_BIT			BIT(1)
+#define SYSOK_PIN_CONFIG_BIT			BIT(0)
+
+#define VERSION_REG				0x2E
+#define VERSION_MASK				BIT(1)
+
+/* Command registers */
+#define CMD_I2C_REG				0x30
+#define CMD_RELOAD_BIT				BIT(7)
+#define CMD_BQ_CFG_ACCESS_BIT			BIT(6)
+
+#define CMD_INPUT_LIMIT_REG			0x31
+#define CMD_OVERRIDE_BIT			BIT(7)
+#define CMD_SUSPEND_MODE_BIT			BIT(6)
+#define CMD_INPUT_CURRENT_MODE_BIT		BIT(3)
+#define CMD_INPUT_CURRENT_MODE_APSD		0
+#define CMD_INPUT_CURRENT_MODE_CMD		0x08
+#define CMD_USB_2_3_SEL_BIT			BIT(2)
+#define CMD_USB_2_MODE				0
+#define CMD_USB_3_MODE				0x4
+#define CMD_USB_1_5_AC_CTRL_MASK		SMB1351_MASK(1, 0)
+#define CMD_USB_100_MODE			0
+#define CMD_USB_500_MODE			0x2
+#define CMD_USB_AC_MODE				0x1
+
+#define CMD_CHG_REG				0x32
+#define CMD_DISABLE_THERM_MONITOR_BIT		BIT(4)
+#define CMD_TURN_OFF_STAT_PIN_BIT		BIT(3)
+#define CMD_PRE_TO_FAST_EN_BIT			BIT(2)
+#define CMD_CHG_EN_BIT				BIT(1)
+#define CMD_CHG_DISABLE				0
+#define CMD_CHG_ENABLE				0x2
+#define CMD_OTG_EN_BIT				BIT(0)
+
+#define CMD_DEAD_BATT_REG			0x33
+#define CMD_STOP_DEAD_BATT_TIMER_MASK		SMB1351_MASK(7, 0)
+
+#define CMD_HVDCP_REG				0x34
+#define CMD_APSD_RE_RUN_BIT			BIT(7)
+#define CMD_FORCE_HVDCP_2P0_BIT			BIT(5)
+#define CMD_HVDCP_MODE_MASK			SMB1351_MASK(5, 0)
+
+/* Status registers */
+#define STATUS_0_REG				0x36
+#define STATUS_AICL_BIT				BIT(7)
+#define STATUS_INPUT_CURRENT_LIMIT_MASK		SMB1351_MASK(6, 5)
+#define STATUS_DCIN_INPUT_CURRENT_LIMIT_MASK	SMB1351_MASK(4, 0)
+
+#define STATUS_1_REG				0x37
+#define STATUS_INPUT_RANGE_MASK			SMB1351_MASK(7, 4)
+#define STATUS_INPUT_USB_BIT			BIT(0)
+
+#define STATUS_2_REG				0x38
+#define STATUS_FAST_CHG_BIT			BIT(7)
+#define STATUS_HARD_LIMIT_BIT			BIT(6)
+#define STATUS_FLOAT_VOLTAGE_MASK		SMB1351_MASK(5, 0)
+
+#define STATUS_3_REG				0x39
+#define STATUS_CHG_BIT				BIT(7)
+#define STATUS_PRECHG_CURRENT_MASK		SMB1351_MASK(6, 4)
+#define STATUS_FAST_CHG_CURRENT_MASK		SMB1351_MASK(3, 0)
+
+#define STATUS_4_REG				0x3A
+#define STATUS_OTG_BIT				BIT(7)
+#define STATUS_AFVC_BIT				BIT(6)
+#define STATUS_DONE_BIT				BIT(5)
+#define STATUS_BATT_LESS_THAN_2V_BIT		BIT(4)
+#define STATUS_HOLD_OFF_BIT			BIT(3)
+#define STATUS_CHG_MASK				SMB1351_MASK(2, 1)
+#define STATUS_NO_CHARGING			0
+#define STATUS_FAST_CHARGING			0x4
+#define STATUS_PRE_CHARGING			0x2
+#define STATUS_TAPER_CHARGING			0x6
+#define STATUS_CHG_EN_STATUS_BIT		BIT(0)
+
+#define STATUS_5_REG				0x3B
+#define STATUS_SOURCE_DETECTED_MASK		SMB1351_MASK(7, 0)
+#define STATUS_PORT_CDP				0x80
+#define STATUS_PORT_DCP				0x40
+#define STATUS_PORT_OTHER			0x20
+#define STATUS_PORT_SDP				0x10
+#define STATUS_PORT_ACA_A			0x8
+#define STATUS_PORT_ACA_B			0x4
+#define STATUS_PORT_ACA_C			0x2
+#define STATUS_PORT_ACA_DOCK			0x1
+
+#define STATUS_6_REG				0x3C
+#define STATUS_DCD_TIMEOUT_BIT			BIT(7)
+#define STATUS_DCD_GOOD_DG_BIT			BIT(6)
+#define STATUS_OCD_GOOD_DG_BIT			BIT(5)
+#define STATUS_RID_ABD_DG_BIT			BIT(4)
+#define STATUS_RID_FLOAT_STATE_MACHINE_BIT	BIT(3)
+#define STATUS_RID_A_STATE_MACHINE_BIT		BIT(2)
+#define STATUS_RID_B_STATE_MACHINE_BIT		BIT(1)
+#define STATUS_RID_C_STATE_MACHINE_BIT		BIT(0)
+
+#define STATUS_7_REG				0x3D
+#define STATUS_HVDCP_MASK			SMB1351_MASK(7, 0)
+
+#define STATUS_8_REG				0x3E
+#define STATUS_USNIN_HV_INPUT_SEL_BIT		BIT(5)
+#define STATUS_USBIN_LV_UNDER_INPUT_SEL_BIT	BIT(4)
+#define STATUS_USBIN_LV_INPUT_SEL_BIT		BIT(3)
+
+/* Revision register */
+#define CHG_REVISION_REG			0x3F
+#define GUI_REVISION_MASK			SMB1351_MASK(7, 4)
+#define DEVICE_REVISION_MASK			SMB1351_MASK(3, 0)
+
+/* IRQ status registers */
+#define IRQ_A_REG				0x40
+#define IRQ_HOT_HARD_BIT			BIT(6)
+#define IRQ_COLD_HARD_BIT			BIT(4)
+#define IRQ_HOT_SOFT_BIT			BIT(2)
+#define IRQ_COLD_SOFT_BIT			BIT(0)
+
+#define IRQ_B_REG				0x41
+#define IRQ_BATT_TERMINAL_REMOVED_BIT		BIT(6)
+#define IRQ_BATT_MISSING_BIT			BIT(4)
+#define IRQ_LOW_BATT_VOLTAGE_BIT		BIT(2)
+#define IRQ_INTERNAL_TEMP_LIMIT_BIT		BIT(0)
+
+#define IRQ_C_REG				0x42
+#define IRQ_PRE_TO_FAST_VOLTAGE_BIT		BIT(6)
+#define IRQ_RECHG_BIT				BIT(4)
+#define IRQ_TAPER_BIT				BIT(2)
+#define IRQ_TERM_BIT				BIT(0)
+
+#define IRQ_D_REG				0x43
+#define IRQ_BATT_OV_BIT				BIT(6)
+#define IRQ_CHG_ERROR_BIT			BIT(4)
+#define IRQ_CHG_TIMEOUT_BIT			BIT(2)
+#define IRQ_PRECHG_TIMEOUT_BIT			BIT(0)
+
+#define IRQ_E_REG				0x44
+#define IRQ_USBIN_OV_BIT			BIT(6)
+#define IRQ_USBIN_UV_BIT			BIT(4)
+#define IRQ_AFVC_BIT				BIT(2)
+#define IRQ_POWER_OK_BIT			BIT(0)
+
+#define IRQ_F_REG				0x45
+#define IRQ_OTG_OVER_CURRENT_BIT		BIT(6)
+#define IRQ_OTG_FAIL_BIT			BIT(4)
+#define IRQ_RID_BIT				BIT(2)
+#define IRQ_OTG_OC_RETRY_BIT			BIT(0)
+
+#define IRQ_G_REG				0x46
+#define IRQ_SOURCE_DET_BIT			BIT(6)
+#define IRQ_AICL_DONE_BIT			BIT(4)
+#define IRQ_AICL_FAIL_BIT			BIT(2)
+#define IRQ_CHG_INHIBIT_BIT			BIT(0)
+
+#define IRQ_H_REG				0x47
+#define IRQ_IC_LIMIT_STATUS_BIT			BIT(5)
+#define IRQ_HVDCP_2P1_STATUS_BIT		BIT(4)
+#define IRQ_HVDCP_AUTH_DONE_BIT			BIT(2)
+#define IRQ_WDOG_TIMEOUT_BIT			BIT(0)
+
+/* constants */
+#define USB2_MIN_CURRENT_MA			100
+#define USB2_MAX_CURRENT_MA			500
+#define USB3_MIN_CURRENT_MA			150
+#define USB3_MAX_CURRENT_MA			900
+#define SMB1351_IRQ_REG_COUNT			8
+#define SMB1351_CHG_PRE_MIN_MA			100
+#define SMB1351_CHG_FAST_MIN_MA			1000
+#define SMB1351_CHG_FAST_MAX_MA			4500
+#define SMB1351_CHG_PRE_SHIFT			5
+#define SMB1351_CHG_FAST_SHIFT			4
+#define DEFAULT_BATT_CAPACITY			50
+#define DEFAULT_BATT_TEMP			250
+#define SUSPEND_CURRENT_MA			2
+
+#define CHG_ITERM_200MA				0x0
+#define CHG_ITERM_300MA				0x04
+#define CHG_ITERM_400MA				0x08
+#define CHG_ITERM_500MA				0x0C
+#define CHG_ITERM_600MA				0x10
+#define CHG_ITERM_700MA				0x14
+
+#define ADC_TM_WARM_COOL_THR_ENABLE		ADC_TM_HIGH_LOW_THR_ENABLE
+
+enum reason {
+	USER	= BIT(0),
+	THERMAL = BIT(1),
+	CURRENT = BIT(2),
+	SOC	= BIT(3),
+};
+
+static char *pm_batt_supplied_to[] = {
+	"bms",
+};
+
+struct smb1351_regulator {
+	struct regulator_desc	rdesc;
+	struct regulator_dev	*rdev;
+};
+
+enum chip_version {
+	SMB_UNKNOWN = 0,
+	SMB1350,
+	SMB1351,
+	SMB_MAX_TYPE,
+};
+
+static const char *smb1351_version_str[SMB_MAX_TYPE] = {
+	[SMB_UNKNOWN] = "Unknown",
+	[SMB1350] = "SMB1350",
+	[SMB1351] = "SMB1351",
+};
+
+struct smb1351_charger {
+	struct i2c_client	*client;
+	struct device		*dev;
+
+	bool			recharge_disabled;
+	int			recharge_mv;
+	bool			iterm_disabled;
+	int			iterm_ma;
+	int			vfloat_mv;
+	int			chg_present;
+	int			fake_battery_soc;
+	bool			chg_autonomous_mode;
+	bool			disable_apsd;
+	bool			using_pmic_therm;
+	bool			jeita_supported;
+	bool			battery_missing;
+	const char		*bms_psy_name;
+	bool			resume_completed;
+	bool			irq_waiting;
+	struct delayed_work	chg_remove_work;
+	struct delayed_work	hvdcp_det_work;
+
+	/* status tracking */
+	bool			batt_full;
+	bool			batt_hot;
+	bool			batt_cold;
+	bool			batt_warm;
+	bool			batt_cool;
+
+	int			battchg_disabled_status;
+	int			usb_suspended_status;
+	int			target_fastchg_current_max_ma;
+	int			fastchg_current_max_ma;
+	int			workaround_flags;
+
+	int			parallel_pin_polarity_setting;
+	int			parallel_mode;
+	bool			parallel_charger;
+	bool			parallel_charger_suspended;
+	bool			bms_controlled_charging;
+	bool			apsd_rerun;
+	bool			usbin_ov;
+	bool			chg_remove_work_scheduled;
+	bool			force_hvdcp_2p0;
+	enum chip_version	version;
+
+	/* psy */
+	struct power_supply	*usb_psy;
+	int			usb_psy_ma;
+	struct power_supply	*bms_psy;
+	struct power_supply_desc	batt_psy_d;
+	struct power_supply	*batt_psy;
+	struct power_supply	*parallel_psy;
+	struct power_supply_desc	parallel_psy_d;
+
+	struct smb1351_regulator	otg_vreg;
+	struct mutex		irq_complete;
+
+	struct dentry		*debug_root;
+	u32			peek_poke_address;
+
+	/* adc_tm parameters */
+	struct qpnp_vadc_chip	*vadc_dev;
+	struct qpnp_adc_tm_chip	*adc_tm_dev;
+	struct qpnp_adc_tm_btm_param	adc_param;
+
+	/* jeita parameters */
+	int			batt_hot_decidegc;
+	int			batt_cold_decidegc;
+	int			batt_warm_decidegc;
+	int			batt_cool_decidegc;
+	int			batt_missing_decidegc;
+	unsigned int		batt_warm_ma;
+	unsigned int		batt_warm_mv;
+	unsigned int		batt_cool_ma;
+	unsigned int		batt_cool_mv;
+
+	/* pinctrl parameters */
+	const char		*pinctrl_state_name;
+	struct pinctrl		*smb_pinctrl;
+};
+
+struct smb_irq_info {
+	const char		*name;
+	int (*smb_irq)(struct smb1351_charger *chip, u8 rt_stat);
+	int			high;
+	int			low;
+};
+
+struct irq_handler_info {
+	u8			stat_reg;
+	u8			val;
+	u8			prev_val;
+	struct smb_irq_info	irq_info[4];
+};
+
+/* USB input charge current */
+static int usb_chg_current[] = {
+	500, 685, 1000, 1100, 1200, 1300, 1500, 1600,
+	1700, 1800, 2000, 2200, 2500, 3000,
+};
+
+static int fast_chg_current[] = {
+	1000, 1200, 1400, 1600, 1800, 2000, 2200,
+	2400, 2600, 2800, 3000, 3400, 3600, 3800,
+	4000, 4640,
+};
+
+static int pre_chg_current[] = {
+	200, 300, 400, 500, 600, 700,
+};
+
+struct battery_status {
+	bool			batt_hot;
+	bool			batt_warm;
+	bool			batt_cool;
+	bool			batt_cold;
+	bool			batt_present;
+};
+
+enum {
+	BATT_HOT = 0,
+	BATT_WARM,
+	BATT_NORMAL,
+	BATT_COOL,
+	BATT_COLD,
+	BATT_MISSING,
+	BATT_STATUS_MAX,
+};
+
+static struct battery_status batt_s[] = {
+	[BATT_HOT] = {1, 0, 0, 0, 1},
+	[BATT_WARM] = {0, 1, 0, 0, 1},
+	[BATT_NORMAL] = {0, 0, 0, 0, 1},
+	[BATT_COOL] = {0, 0, 1, 0, 1},
+	[BATT_COLD] = {0, 0, 0, 1, 1},
+	[BATT_MISSING] = {0, 0, 0, 1, 0},
+};
+
+static int smb1351_read_reg(struct smb1351_charger *chip, int reg, u8 *val)
+{
+	s32 ret;
+
+	pm_stay_awake(chip->dev);
+	ret = i2c_smbus_read_byte_data(chip->client, reg);
+	if (ret < 0) {
+		pr_err("i2c read fail: can't read from %02x: %d\n", reg, ret);
+		pm_relax(chip->dev);
+		return ret;
+	}
+
+	*val = ret;
+
+	pm_relax(chip->dev);
+	pr_debug("Reading 0x%02x=0x%02x\n", reg, *val);
+	return 0;
+}
+
+static int smb1351_write_reg(struct smb1351_charger *chip, int reg, u8 val)
+{
+	s32 ret;
+
+	pm_stay_awake(chip->dev);
+	ret = i2c_smbus_write_byte_data(chip->client, reg, val);
+	if (ret < 0) {
+		pr_err("i2c write fail: can't write %02x to %02x: %d\n",
+			val, reg, ret);
+		pm_relax(chip->dev);
+		return ret;
+	}
+	pm_relax(chip->dev);
+	pr_debug("Writing 0x%02x=0x%02x\n", reg, val);
+	return 0;
+}
+
+static int smb1351_masked_write(struct smb1351_charger *chip, int reg,
+							u8 mask, u8 val)
+{
+	s32 rc;
+	u8 temp;
+
+	rc = smb1351_read_reg(chip, reg, &temp);
+	if (rc) {
+		pr_err("read failed: reg=%03X, rc=%d\n", reg, rc);
+		return rc;
+	}
+	temp &= ~mask;
+	temp |= val & mask;
+	rc = smb1351_write_reg(chip, reg, temp);
+	if (rc) {
+		pr_err("write failed: reg=%03X, rc=%d\n", reg, rc);
+		return rc;
+	}
+	return 0;
+}
+
+static int smb1351_enable_volatile_writes(struct smb1351_charger *chip)
+{
+	int rc;
+
+	rc = smb1351_masked_write(chip, CMD_I2C_REG, CMD_BQ_CFG_ACCESS_BIT,
+							CMD_BQ_CFG_ACCESS_BIT);
+	if (rc)
+		pr_err("Couldn't write CMD_BQ_CFG_ACCESS_BIT rc=%d\n", rc);
+
+	return rc;
+}
+
+static int smb1351_usb_suspend(struct smb1351_charger *chip, int reason,
+					bool suspend)
+{
+	int rc = 0;
+	int suspended;
+
+	suspended = chip->usb_suspended_status;
+
+	pr_debug("reason = %d requested_suspend = %d suspended_status = %d\n",
+						reason, suspend, suspended);
+
+	if (suspend == false)
+		suspended &= ~reason;
+	else
+		suspended |= reason;
+
+	pr_debug("new suspended_status = %d\n", suspended);
+
+	rc = smb1351_masked_write(chip, CMD_INPUT_LIMIT_REG,
+				CMD_SUSPEND_MODE_BIT,
+				suspended ? CMD_SUSPEND_MODE_BIT : 0);
+	if (rc)
+		pr_err("Couldn't suspend rc = %d\n", rc);
+	else
+		chip->usb_suspended_status = suspended;
+
+	return rc;
+}
+
+static int smb1351_battchg_disable(struct smb1351_charger *chip,
+					int reason, int disable)
+{
+	int rc = 0;
+	int disabled;
+
+	if (chip->chg_autonomous_mode) {
+		pr_debug("Charger in autonomous mode\n");
+		return 0;
+	}
+
+	disabled = chip->battchg_disabled_status;
+
+	pr_debug("reason = %d requested_disable = %d disabled_status = %d\n",
+						reason, disable, disabled);
+	if (disable == true)
+		disabled |= reason;
+	else
+		disabled &= ~reason;
+
+	pr_debug("new disabled_status = %d\n", disabled);
+
+	rc = smb1351_masked_write(chip, CMD_CHG_REG, CMD_CHG_EN_BIT,
+					disabled ? 0 : CMD_CHG_ENABLE);
+	if (rc)
+		pr_err("Couldn't %s charging rc=%d\n",
+					disable ? "disable" : "enable", rc);
+	else
+		chip->battchg_disabled_status = disabled;
+
+	return rc;
+}
+
+static int smb1351_fastchg_current_set(struct smb1351_charger *chip,
+					unsigned int fastchg_current)
+{
+	int i, rc;
+	bool is_pre_chg = false;
+
+
+	if ((fastchg_current < SMB1351_CHG_PRE_MIN_MA) ||
+		(fastchg_current > SMB1351_CHG_FAST_MAX_MA)) {
+		pr_err("bad pre_fastchg current mA=%d asked to set\n",
+					fastchg_current);
+		return -EINVAL;
+	}
+
+	/*
+	 * fast chg current could not support less than 1000mA
+	 * use pre chg to instead for the parallel charging
+	 */
+	if (fastchg_current < SMB1351_CHG_FAST_MIN_MA) {
+		is_pre_chg = true;
+		pr_debug("is_pre_chg true, current is %d\n", fastchg_current);
+	}
+
+	if (is_pre_chg) {
+		/* set prechg current */
+		for (i = ARRAY_SIZE(pre_chg_current) - 1; i >= 0; i--) {
+			if (pre_chg_current[i] <= fastchg_current)
+				break;
+		}
+		if (i < 0)
+			i = 0;
+		chip->fastchg_current_max_ma = pre_chg_current[i];
+		pr_debug("prechg setting %02x\n", i);
+
+		i = i << SMB1351_CHG_PRE_SHIFT;
+
+		rc = smb1351_masked_write(chip, CHG_OTH_CURRENT_CTRL_REG,
+				PRECHG_CURRENT_MASK, i);
+		if (rc)
+			pr_err("Couldn't write CHG_OTH_CURRENT_CTRL_REG rc=%d\n",
+									rc);
+
+		return smb1351_masked_write(chip, VARIOUS_FUNC_2_REG,
+				PRECHG_TO_FASTCHG_BIT, PRECHG_TO_FASTCHG_BIT);
+	} else {
+		if (chip->version == SMB_UNKNOWN)
+			return -EINVAL;
+
+		/* SMB1350 supports FCC upto 2600 mA */
+		if (chip->version == SMB1350 && fastchg_current > 2600)
+			fastchg_current = 2600;
+
+		/* set fastchg current */
+		for (i = ARRAY_SIZE(fast_chg_current) - 1; i >= 0; i--) {
+			if (fast_chg_current[i] <= fastchg_current)
+				break;
+		}
+		if (i < 0)
+			i = 0;
+		chip->fastchg_current_max_ma = fast_chg_current[i];
+
+		i = i << SMB1351_CHG_FAST_SHIFT;
+		pr_debug("fastchg limit=%d setting %02x\n",
+					chip->fastchg_current_max_ma, i);
+
+		/* make sure pre chg mode is disabled */
+		rc = smb1351_masked_write(chip, VARIOUS_FUNC_2_REG,
+					PRECHG_TO_FASTCHG_BIT, 0);
+		if (rc)
+			pr_err("Couldn't write VARIOUS_FUNC_2_REG rc=%d\n", rc);
+
+		return smb1351_masked_write(chip, CHG_CURRENT_CTRL_REG,
+					FAST_CHG_CURRENT_MASK, i);
+	}
+}
+
+#define MIN_FLOAT_MV		3500
+#define MAX_FLOAT_MV		4500
+#define VFLOAT_STEP_MV		20
+
+static int smb1351_float_voltage_set(struct smb1351_charger *chip,
+								int vfloat_mv)
+{
+	u8 temp;
+
+	if ((vfloat_mv < MIN_FLOAT_MV) || (vfloat_mv > MAX_FLOAT_MV)) {
+		pr_err("bad float voltage mv =%d asked to set\n", vfloat_mv);
+		return -EINVAL;
+	}
+
+	temp = (vfloat_mv - MIN_FLOAT_MV) / VFLOAT_STEP_MV;
+
+	return smb1351_masked_write(chip, VFLOAT_REG, VFLOAT_MASK, temp);
+}
+
+static int smb1351_iterm_set(struct smb1351_charger *chip, int iterm_ma)
+{
+	int rc;
+	u8 reg;
+
+	if (iterm_ma <= 200)
+		reg = CHG_ITERM_200MA;
+	else if (iterm_ma <= 300)
+		reg = CHG_ITERM_300MA;
+	else if (iterm_ma <= 400)
+		reg = CHG_ITERM_400MA;
+	else if (iterm_ma <= 500)
+		reg = CHG_ITERM_500MA;
+	else if (iterm_ma <= 600)
+		reg = CHG_ITERM_600MA;
+	else
+		reg = CHG_ITERM_700MA;
+
+	rc = smb1351_masked_write(chip, CHG_OTH_CURRENT_CTRL_REG,
+				ITERM_MASK, reg);
+	if (rc) {
+		pr_err("Couldn't set iterm rc = %d\n", rc);
+		return rc;
+	}
+	/* enable the iterm */
+	rc = smb1351_masked_write(chip, CHG_CTRL_REG,
+				ITERM_EN_BIT, ITERM_ENABLE);
+	if (rc) {
+		pr_err("Couldn't enable iterm rc = %d\n", rc);
+		return rc;
+	}
+	return 0;
+}
+
+static int smb1351_chg_otg_regulator_enable(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	struct smb1351_charger *chip = rdev_get_drvdata(rdev);
+
+	rc = smb1351_masked_write(chip, CMD_CHG_REG, CMD_OTG_EN_BIT,
+							CMD_OTG_EN_BIT);
+	if (rc)
+		pr_err("Couldn't enable  OTG mode rc=%d\n", rc);
+	return rc;
+}
+
+static int smb1351_chg_otg_regulator_disable(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	struct smb1351_charger *chip = rdev_get_drvdata(rdev);
+
+	rc = smb1351_masked_write(chip, CMD_CHG_REG, CMD_OTG_EN_BIT, 0);
+	if (rc)
+		pr_err("Couldn't disable OTG mode rc=%d\n", rc);
+	return rc;
+}
+
+static int smb1351_chg_otg_regulator_is_enable(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	u8 reg = 0;
+	struct smb1351_charger *chip = rdev_get_drvdata(rdev);
+
+	rc = smb1351_read_reg(chip, CMD_CHG_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read OTG enable bit rc=%d\n", rc);
+		return rc;
+	}
+
+	return (reg & CMD_OTG_EN_BIT) ? 1 : 0;
+}
+
+struct regulator_ops smb1351_chg_otg_reg_ops = {
+	.enable		= smb1351_chg_otg_regulator_enable,
+	.disable	= smb1351_chg_otg_regulator_disable,
+	.is_enabled	= smb1351_chg_otg_regulator_is_enable,
+};
+
+static int smb1351_regulator_init(struct smb1351_charger *chip)
+{
+	int rc = 0;
+	struct regulator_config cfg = {};
+
+	chip->otg_vreg.rdesc.owner = THIS_MODULE;
+	chip->otg_vreg.rdesc.type = REGULATOR_VOLTAGE;
+	chip->otg_vreg.rdesc.ops = &smb1351_chg_otg_reg_ops;
+	chip->otg_vreg.rdesc.name =
+		chip->dev->of_node->name;
+	chip->otg_vreg.rdesc.of_match =
+		chip->dev->of_node->name;
+
+	cfg.dev = chip->dev;
+	cfg.driver_data = chip;
+
+	chip->otg_vreg.rdev = regulator_register(
+					&chip->otg_vreg.rdesc, &cfg);
+	if (IS_ERR(chip->otg_vreg.rdev)) {
+		rc = PTR_ERR(chip->otg_vreg.rdev);
+		chip->otg_vreg.rdev = NULL;
+		if (rc != -EPROBE_DEFER)
+			pr_err("OTG reg failed, rc=%d\n", rc);
+	}
+	return rc;
+}
+
+static int smb_chip_get_version(struct smb1351_charger *chip)
+{
+	u8 ver;
+	int rc = 0;
+
+	if (chip->version == SMB_UNKNOWN) {
+		rc = smb1351_read_reg(chip, VERSION_REG, &ver);
+		if (rc) {
+			pr_err("Couldn't read version rc=%d\n", rc);
+			return rc;
+		}
+
+		/* If bit 1 is set, it is SMB1350 */
+		if (ver & VERSION_MASK)
+			chip->version = SMB1350;
+		else
+			chip->version = SMB1351;
+	}
+
+	return rc;
+}
+
+static int smb1351_hw_init(struct smb1351_charger *chip)
+{
+	int rc;
+	u8 reg = 0, mask = 0;
+
+	/* configure smb_pinctrl to enable irqs */
+	if (chip->pinctrl_state_name) {
+		chip->smb_pinctrl = pinctrl_get_select(chip->dev,
+						chip->pinctrl_state_name);
+		if (IS_ERR(chip->smb_pinctrl)) {
+			pr_err("Could not get/set %s pinctrl state rc = %ld\n",
+						chip->pinctrl_state_name,
+						PTR_ERR(chip->smb_pinctrl));
+			return PTR_ERR(chip->smb_pinctrl);
+		}
+	}
+
+	/*
+	 * If the charger is pre-configured for autonomous operation,
+	 * do not apply additional settings
+	 */
+	if (chip->chg_autonomous_mode) {
+		pr_debug("Charger configured for autonomous mode\n");
+		return 0;
+	}
+
+	rc = smb_chip_get_version(chip);
+	if (rc) {
+		pr_err("Couldn't get version rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = smb1351_enable_volatile_writes(chip);
+	if (rc) {
+		pr_err("Couldn't configure volatile writes rc=%d\n", rc);
+		return rc;
+	}
+
+	/* setup battery missing source */
+	reg = BATT_MISSING_THERM_PIN_SOURCE_BIT;
+	mask = BATT_MISSING_THERM_PIN_SOURCE_BIT;
+	rc = smb1351_masked_write(chip, HVDCP_BATT_MISSING_CTRL_REG,
+								mask, reg);
+	if (rc) {
+		pr_err("Couldn't set HVDCP_BATT_MISSING_CTRL_REG rc=%d\n", rc);
+		return rc;
+	}
+	/* setup defaults for CHG_PIN_EN_CTRL_REG */
+	reg = EN_BY_I2C_0_DISABLE | USBCS_CTRL_BY_I2C | CHG_ERR_BIT |
+		APSD_DONE_BIT | LED_BLINK_FUNC_BIT;
+	mask = EN_PIN_CTRL_MASK | USBCS_CTRL_BIT | CHG_ERR_BIT |
+		APSD_DONE_BIT | LED_BLINK_FUNC_BIT;
+	rc = smb1351_masked_write(chip, CHG_PIN_EN_CTRL_REG, mask, reg);
+	if (rc) {
+		pr_err("Couldn't set CHG_PIN_EN_CTRL_REG rc=%d\n", rc);
+		return rc;
+	}
+	/* setup USB 2.0/3.0 detection and USB 500/100 command polarity */
+	reg = USB_2_3_MODE_SEL_BY_I2C | USB_CMD_POLARITY_500_1_100_0;
+	mask = USB_2_3_MODE_SEL_BIT | USB_5_1_CMD_POLARITY_BIT;
+	rc = smb1351_masked_write(chip, CHG_OTH_CURRENT_CTRL_REG, mask, reg);
+	if (rc) {
+		pr_err("Couldn't set CHG_OTH_CURRENT_CTRL_REG rc=%d\n", rc);
+		return rc;
+	}
+	/* setup USB suspend, AICL and APSD  */
+	reg = SUSPEND_MODE_CTRL_BY_I2C | AICL_EN_BIT;
+	if (!chip->disable_apsd)
+		reg |= APSD_EN_BIT;
+	mask = SUSPEND_MODE_CTRL_BIT | AICL_EN_BIT | APSD_EN_BIT;
+	rc = smb1351_masked_write(chip, VARIOUS_FUNC_REG, mask, reg);
+	if (rc) {
+		pr_err("Couldn't set VARIOUS_FUNC_REG rc=%d\n",	rc);
+		return rc;
+	}
+	/* Fault and Status IRQ configuration */
+	reg = HOT_COLD_HARD_LIMIT_BIT | HOT_COLD_SOFT_LIMIT_BIT
+		| INPUT_OVLO_BIT | INPUT_UVLO_BIT | AICL_DONE_FAIL_BIT;
+	rc = smb1351_write_reg(chip, FAULT_INT_REG, reg);
+	if (rc) {
+		pr_err("Couldn't set FAULT_INT_REG rc=%d\n", rc);
+		return rc;
+	}
+	reg = CHG_OR_PRECHG_TIMEOUT_BIT | BATT_OVP_BIT |
+		FAST_TERM_TAPER_RECHG_INHIBIT_BIT |
+		BATT_MISSING_BIT | BATT_LOW_BIT;
+	rc = smb1351_write_reg(chip, STATUS_INT_REG, reg);
+	if (rc) {
+		pr_err("Couldn't set STATUS_INT_REG rc=%d\n", rc);
+		return rc;
+	}
+	/* setup THERM Monitor */
+	if (!chip->using_pmic_therm) {
+		rc = smb1351_masked_write(chip, THERM_A_CTRL_REG,
+			THERM_MONITOR_BIT, THERM_MONITOR_EN);
+		if (rc) {
+			pr_err("Couldn't set THERM_A_CTRL_REG rc=%d\n",	rc);
+			return rc;
+		}
+	}
+	/* set the fast charge current limit */
+	rc = smb1351_fastchg_current_set(chip,
+			chip->target_fastchg_current_max_ma);
+	if (rc) {
+		pr_err("Couldn't set fastchg current rc=%d\n", rc);
+		return rc;
+	}
+
+	/* set the float voltage */
+	if (chip->vfloat_mv != -EINVAL) {
+		rc = smb1351_float_voltage_set(chip, chip->vfloat_mv);
+		if (rc) {
+			pr_err("Couldn't set float voltage rc = %d\n", rc);
+			return rc;
+		}
+	}
+
+	/* set iterm */
+	if (chip->iterm_ma != -EINVAL) {
+		if (chip->iterm_disabled) {
+			pr_err("Error: Both iterm_disabled and iterm_ma set\n");
+			return -EINVAL;
+		}
+		rc = smb1351_iterm_set(chip, chip->iterm_ma);
+		if (rc) {
+			pr_err("Couldn't set iterm rc = %d\n", rc);
+			return rc;
+		}
+	} else  if (chip->iterm_disabled) {
+		rc = smb1351_masked_write(chip, CHG_CTRL_REG,
+					ITERM_EN_BIT, ITERM_DISABLE);
+		if (rc) {
+			pr_err("Couldn't set iterm rc = %d\n", rc);
+			return rc;
+		}
+	}
+
+	/* set recharge-threshold */
+	if (chip->recharge_mv != -EINVAL) {
+		if (chip->recharge_disabled) {
+			pr_err("Error: Both recharge_disabled and recharge_mv set\n");
+			return -EINVAL;
+		}
+
+		reg = AUTO_RECHG_ENABLE;
+		if (chip->recharge_mv > 50)
+			reg |= AUTO_RECHG_TH_100MV;
+		else
+			reg |= AUTO_RECHG_TH_50MV;
+
+		rc = smb1351_masked_write(chip, CHG_CTRL_REG,
+				AUTO_RECHG_BIT |
+				AUTO_RECHG_TH_BIT, reg);
+		if (rc) {
+			pr_err("Couldn't set rechg-cfg rc = %d\n", rc);
+			return rc;
+		}
+	} else if (chip->recharge_disabled) {
+		rc = smb1351_masked_write(chip, CHG_CTRL_REG,
+				AUTO_RECHG_BIT,
+				AUTO_RECHG_DISABLE);
+		if (rc) {
+			pr_err("Couldn't disable auto-rechg rc = %d\n", rc);
+			return rc;
+		}
+	}
+
+	/* enable/disable charging by suspending usb */
+	rc = smb1351_usb_suspend(chip, USER, chip->usb_suspended_status);
+	if (rc) {
+		pr_err("Unable to %s battery charging. rc=%d\n",
+			chip->usb_suspended_status ? "disable" : "enable",
+									rc);
+	}
+
+	return rc;
+}
+
+static enum power_supply_property smb1351_battery_properties[] = {
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_TEMP,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_MODEL_NAME,
+};
+
+static int smb1351_get_prop_batt_status(struct smb1351_charger *chip)
+{
+	int rc;
+	u8 reg = 0;
+
+	if (chip->batt_full)
+		return POWER_SUPPLY_STATUS_FULL;
+
+	rc = smb1351_read_reg(chip, STATUS_4_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read STATUS_4 rc = %d\n", rc);
+		return POWER_SUPPLY_STATUS_UNKNOWN;
+	}
+
+	pr_debug("STATUS_4_REG(0x3A)=%x\n", reg);
+
+	if (reg & STATUS_HOLD_OFF_BIT)
+		return POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+	if (reg & STATUS_CHG_MASK)
+		return POWER_SUPPLY_STATUS_CHARGING;
+
+	return POWER_SUPPLY_STATUS_DISCHARGING;
+}
+
+static int smb1351_get_prop_batt_present(struct smb1351_charger *chip)
+{
+	return !chip->battery_missing;
+}
+
+static int smb1351_get_prop_batt_capacity(struct smb1351_charger *chip)
+{
+	union power_supply_propval ret = {0, };
+
+	if (chip->fake_battery_soc >= 0)
+		return chip->fake_battery_soc;
+
+	if (chip->bms_psy) {
+		power_supply_get_property(chip->bms_psy,
+				POWER_SUPPLY_PROP_CAPACITY, &ret);
+		return ret.intval;
+	}
+	pr_debug("return DEFAULT_BATT_CAPACITY\n");
+	return DEFAULT_BATT_CAPACITY;
+}
+
+static int smb1351_get_prop_batt_temp(struct smb1351_charger *chip)
+{
+	union power_supply_propval ret = {0, };
+	int rc = 0;
+	struct qpnp_vadc_result results;
+
+	if (chip->bms_psy) {
+		power_supply_get_property(chip->bms_psy,
+				POWER_SUPPLY_PROP_TEMP, &ret);
+		return ret.intval;
+	}
+	if (chip->vadc_dev) {
+		rc = qpnp_vadc_read(chip->vadc_dev,
+				LR_MUX1_BATT_THERM, &results);
+		if (rc)
+			pr_debug("Unable to read adc batt temp rc=%d\n", rc);
+		else
+			return (int)results.physical;
+	}
+
+	pr_debug("return default temperature\n");
+	return DEFAULT_BATT_TEMP;
+}
+
+static int smb1351_get_prop_charge_type(struct smb1351_charger *chip)
+{
+	int rc;
+	u8 reg = 0;
+
+	rc = smb1351_read_reg(chip, STATUS_4_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read STATUS_4 rc = %d\n", rc);
+		return POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+	}
+
+	pr_debug("STATUS_4_REG(0x3A)=%x\n", reg);
+
+	reg &= STATUS_CHG_MASK;
+
+	if (reg == STATUS_FAST_CHARGING)
+		return POWER_SUPPLY_CHARGE_TYPE_FAST;
+	else if (reg == STATUS_TAPER_CHARGING)
+		return POWER_SUPPLY_CHARGE_TYPE_TAPER;
+	else if (reg == STATUS_PRE_CHARGING)
+		return POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+	else
+		return POWER_SUPPLY_CHARGE_TYPE_NONE;
+}
+
+static int smb1351_get_prop_batt_health(struct smb1351_charger *chip)
+{
+	union power_supply_propval ret = {0, };
+
+	if (chip->batt_hot)
+		ret.intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+	else if (chip->batt_cold)
+		ret.intval = POWER_SUPPLY_HEALTH_COLD;
+	else if (chip->batt_warm)
+		ret.intval = POWER_SUPPLY_HEALTH_WARM;
+	else if (chip->batt_cool)
+		ret.intval = POWER_SUPPLY_HEALTH_COOL;
+	else
+		ret.intval = POWER_SUPPLY_HEALTH_GOOD;
+
+	return ret.intval;
+}
+
+static int smb1351_set_usb_chg_current(struct smb1351_charger *chip,
+							int current_ma)
+{
+	int i, rc = 0;
+	u8 reg = 0, mask = 0;
+
+	pr_debug("USB current_ma = %d\n", current_ma);
+
+	if (chip->chg_autonomous_mode) {
+		pr_debug("Charger in autonomous mode\n");
+		return 0;
+	}
+
+	/* set suspend bit when urrent_ma <= 2 */
+	if (current_ma <= SUSPEND_CURRENT_MA) {
+		smb1351_usb_suspend(chip, CURRENT, true);
+		pr_debug("USB suspend\n");
+		return 0;
+	}
+
+	if (current_ma > SUSPEND_CURRENT_MA &&
+			current_ma < USB2_MIN_CURRENT_MA)
+		current_ma = USB2_MIN_CURRENT_MA;
+
+	if (current_ma == USB2_MIN_CURRENT_MA) {
+		/* USB 2.0 - 100mA */
+		reg = CMD_USB_2_MODE | CMD_USB_100_MODE;
+	} else if (current_ma == USB3_MIN_CURRENT_MA) {
+		/* USB 3.0 - 150mA */
+		reg = CMD_USB_3_MODE | CMD_USB_100_MODE;
+	} else if (current_ma == USB2_MAX_CURRENT_MA) {
+		/* USB 2.0 - 500mA */
+		reg = CMD_USB_2_MODE | CMD_USB_500_MODE;
+	} else if (current_ma == USB3_MAX_CURRENT_MA) {
+		/* USB 3.0 - 900mA */
+		reg = CMD_USB_3_MODE | CMD_USB_500_MODE;
+	} else if (current_ma > USB2_MAX_CURRENT_MA) {
+		/* HC mode  - if none of the above */
+		reg = CMD_USB_AC_MODE;
+
+		for (i = ARRAY_SIZE(usb_chg_current) - 1; i >= 0; i--) {
+			if (usb_chg_current[i] <= current_ma)
+				break;
+		}
+		if (i < 0)
+			i = 0;
+		rc = smb1351_masked_write(chip, CHG_CURRENT_CTRL_REG,
+						AC_INPUT_CURRENT_LIMIT_MASK, i);
+		if (rc) {
+			pr_err("Couldn't set input mA rc=%d\n", rc);
+			return rc;
+		}
+	}
+	/* control input current mode by command */
+	reg |= CMD_INPUT_CURRENT_MODE_CMD;
+	mask = CMD_INPUT_CURRENT_MODE_BIT | CMD_USB_2_3_SEL_BIT |
+		CMD_USB_1_5_AC_CTRL_MASK;
+	rc = smb1351_masked_write(chip, CMD_INPUT_LIMIT_REG, mask, reg);
+	if (rc) {
+		pr_err("Couldn't set charging mode rc = %d\n", rc);
+		return rc;
+	}
+
+	/* unset the suspend bit here */
+	smb1351_usb_suspend(chip, CURRENT, false);
+
+	return rc;
+}
+
+static int smb1351_batt_property_is_writeable(struct power_supply *psy,
+					enum power_supply_property psp)
+{
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+	case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+	case POWER_SUPPLY_PROP_CAPACITY:
+		return 1;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int smb1351_battery_set_property(struct power_supply *psy,
+					enum power_supply_property prop,
+					const union power_supply_propval *val)
+{
+	int rc;
+	struct smb1351_charger *chip = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_STATUS:
+		if (!chip->bms_controlled_charging)
+			return -EINVAL;
+		switch (val->intval) {
+		case POWER_SUPPLY_STATUS_FULL:
+			rc = smb1351_battchg_disable(chip, SOC, true);
+			if (rc) {
+				pr_err("Couldn't disable charging  rc = %d\n",
+									rc);
+			} else {
+				chip->batt_full = true;
+				pr_debug("status = FULL, batt_full = %d\n",
+							chip->batt_full);
+			}
+			break;
+		case POWER_SUPPLY_STATUS_DISCHARGING:
+			chip->batt_full = false;
+			power_supply_changed(chip->batt_psy);
+			pr_debug("status = DISCHARGING, batt_full = %d\n",
+							chip->batt_full);
+			break;
+		case POWER_SUPPLY_STATUS_CHARGING:
+			rc = smb1351_battchg_disable(chip, SOC, false);
+			if (rc) {
+				pr_err("Couldn't enable charging rc = %d\n",
+									rc);
+			} else {
+				chip->batt_full = false;
+				pr_debug("status = CHARGING, batt_full = %d\n",
+							chip->batt_full);
+			}
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		smb1351_usb_suspend(chip, USER, !val->intval);
+		break;
+	case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+		smb1351_battchg_disable(chip, USER, !val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		chip->fake_battery_soc = val->intval;
+		power_supply_changed(chip->batt_psy);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int smb1351_battery_get_property(struct power_supply *psy,
+				       enum power_supply_property prop,
+				       union power_supply_propval *val)
+{
+	struct smb1351_charger *chip = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_STATUS:
+		val->intval = smb1351_get_prop_batt_status(chip);
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = smb1351_get_prop_batt_present(chip);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		val->intval = smb1351_get_prop_batt_capacity(chip);
+		break;
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		val->intval = !chip->usb_suspended_status;
+		break;
+	case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+		val->intval = !chip->battchg_disabled_status;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		val->intval = smb1351_get_prop_charge_type(chip);
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		val->intval = smb1351_get_prop_batt_health(chip);
+		break;
+	case POWER_SUPPLY_PROP_TEMP:
+		val->intval = smb1351_get_prop_batt_temp(chip);
+		break;
+	case POWER_SUPPLY_PROP_TECHNOLOGY:
+		val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+		break;
+	case POWER_SUPPLY_PROP_MODEL_NAME:
+		val->strval = "smb1351";
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static enum power_supply_property smb1351_parallel_properties[] = {
+	POWER_SUPPLY_PROP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_PARALLEL_MODE,
+};
+
+static int smb1351_parallel_set_chg_suspend(struct smb1351_charger *chip,
+						int suspend)
+{
+	int rc;
+	u8 reg, mask = 0;
+
+	if (chip->parallel_charger_suspended == suspend) {
+		pr_debug("Skip same state request suspended = %d suspend=%d\n",
+				chip->parallel_charger_suspended, !suspend);
+		return 0;
+	}
+
+	if (!suspend) {
+		rc = smb_chip_get_version(chip);
+		if (rc) {
+			pr_err("Couldn't get version rc = %d\n", rc);
+			return rc;
+		}
+
+		rc = smb1351_enable_volatile_writes(chip);
+		if (rc) {
+			pr_err("Couldn't configure for volatile rc = %d\n", rc);
+			return rc;
+		}
+
+		/* set the float voltage */
+		if (chip->vfloat_mv != -EINVAL) {
+			rc = smb1351_float_voltage_set(chip, chip->vfloat_mv);
+			if (rc) {
+				pr_err("Couldn't set float voltage rc = %d\n",
+									rc);
+				return rc;
+			}
+		}
+
+		/* set recharge-threshold and enable auto recharge */
+		if (chip->recharge_mv != -EINVAL) {
+			reg = AUTO_RECHG_ENABLE;
+			if (chip->recharge_mv > 50)
+				reg |= AUTO_RECHG_TH_100MV;
+			else
+				reg |= AUTO_RECHG_TH_50MV;
+
+			rc = smb1351_masked_write(chip, CHG_CTRL_REG,
+					AUTO_RECHG_BIT |
+					AUTO_RECHG_TH_BIT, reg);
+			if (rc) {
+				pr_err("Couldn't set rechg-cfg rc = %d\n", rc);
+				return rc;
+			}
+		}
+
+		/* control USB suspend via command bits */
+		rc = smb1351_masked_write(chip, VARIOUS_FUNC_REG,
+					APSD_EN_BIT | SUSPEND_MODE_CTRL_BIT,
+						SUSPEND_MODE_CTRL_BY_I2C);
+		if (rc) {
+			pr_err("Couldn't set USB suspend rc=%d\n", rc);
+			return rc;
+		}
+
+		/*
+		 * When present is being set force USB suspend, start charging
+		 * only when POWER_SUPPLY_PROP_CURRENT_MAX is set.
+		 */
+		rc = smb1351_usb_suspend(chip, CURRENT, true);
+		if (rc) {
+			pr_err("failed to suspend rc=%d\n", rc);
+			return rc;
+		}
+		chip->usb_psy_ma = SUSPEND_CURRENT_MA;
+
+		/* set chg en by pin active low  */
+		reg = chip->parallel_pin_polarity_setting | USBCS_CTRL_BY_I2C;
+		rc = smb1351_masked_write(chip, CHG_PIN_EN_CTRL_REG,
+					EN_PIN_CTRL_MASK | USBCS_CTRL_BIT, reg);
+		if (rc) {
+			pr_err("Couldn't set en pin rc=%d\n", rc);
+			return rc;
+		}
+
+		/*
+		 * setup USB 2.0/3.0 detection and USB 500/100
+		 * command polarity
+		 */
+		reg = USB_2_3_MODE_SEL_BY_I2C | USB_CMD_POLARITY_500_1_100_0;
+		mask = USB_2_3_MODE_SEL_BIT | USB_5_1_CMD_POLARITY_BIT;
+		rc = smb1351_masked_write(chip,
+				CHG_OTH_CURRENT_CTRL_REG, mask, reg);
+		if (rc) {
+			pr_err("Couldn't set CHG_OTH_CURRENT_CTRL_REG rc=%d\n",
+					rc);
+			return rc;
+		}
+
+		rc = smb1351_fastchg_current_set(chip,
+					chip->target_fastchg_current_max_ma);
+		if (rc) {
+			pr_err("Couldn't set fastchg current rc=%d\n", rc);
+			return rc;
+		}
+		chip->parallel_charger_suspended = false;
+	} else {
+		rc = smb1351_usb_suspend(chip, CURRENT, true);
+		if (rc)
+			pr_debug("failed to suspend rc=%d\n", rc);
+
+		chip->usb_psy_ma = SUSPEND_CURRENT_MA;
+		chip->parallel_charger_suspended = true;
+	}
+
+	return 0;
+}
+
+static int smb1351_get_closest_usb_setpoint(int val)
+{
+	int i;
+
+	for (i = ARRAY_SIZE(usb_chg_current) - 1; i >= 0; i--) {
+		if (usb_chg_current[i] <= val)
+			break;
+	}
+	if (i < 0)
+		i = 0;
+
+	if (i >= ARRAY_SIZE(usb_chg_current) - 1)
+		return ARRAY_SIZE(usb_chg_current) - 1;
+
+	/* check what is closer, i or i + 1 */
+	if (abs(usb_chg_current[i] - val) < abs(usb_chg_current[i + 1] - val))
+		return i;
+	else
+		return i + 1;
+}
+
+static bool smb1351_is_input_current_limited(struct smb1351_charger *chip)
+{
+	int rc;
+	u8 reg;
+
+	rc = smb1351_read_reg(chip, IRQ_H_REG, &reg);
+	if (rc) {
+		pr_err("Failed to read IRQ_H_REG for ICL status: %d\n", rc);
+		return false;
+	}
+
+	return !!(reg & IRQ_IC_LIMIT_STATUS_BIT);
+}
+
+static bool smb1351_is_usb_present(struct smb1351_charger *chip)
+{
+	int rc;
+	union power_supply_propval val = {0, };
+
+	if (!chip->usb_psy)
+		chip->usb_psy = power_supply_get_by_name("usb");
+	if (!chip->usb_psy) {
+		pr_err("USB psy not found\n");
+		return false;
+	}
+
+	rc = power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_ONLINE, &val);
+	if (rc < 0) {
+		pr_err("Failed to get present property rc=%d\n", rc);
+		return false;
+	}
+
+	if (val.intval)
+		return true;
+
+	return false;
+}
+
+static int smb1351_parallel_set_property(struct power_supply *psy,
+				       enum power_supply_property prop,
+				       const union power_supply_propval *val)
+{
+	int rc = 0, index;
+	struct smb1351_charger *chip = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		/*
+		 *CHG EN is controlled by pin in the parallel charging.
+		 *Use suspend if disable charging by command.
+		 */
+		if (!chip->parallel_charger_suspended)
+			rc = smb1351_usb_suspend(chip, USER, !val->intval);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smb1351_parallel_set_chg_suspend(chip, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		chip->target_fastchg_current_max_ma =
+						val->intval / 1000;
+		if (!chip->parallel_charger_suspended)
+			rc = smb1351_fastchg_current_set(chip,
+					chip->target_fastchg_current_max_ma);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		index = smb1351_get_closest_usb_setpoint(val->intval / 1000);
+		chip->usb_psy_ma = usb_chg_current[index];
+		if (!chip->parallel_charger_suspended)
+			rc = smb1351_set_usb_chg_current(chip,
+						chip->usb_psy_ma);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		chip->vfloat_mv = val->intval / 1000;
+		if (!chip->parallel_charger_suspended)
+			rc = smb1351_float_voltage_set(chip, val->intval);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return rc;
+}
+
+static int smb1351_parallel_is_writeable(struct power_supply *psy,
+				       enum power_supply_property prop)
+{
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+static int smb1351_parallel_get_property(struct power_supply *psy,
+				       enum power_supply_property prop,
+				       union power_supply_propval *val)
+{
+	struct smb1351_charger *chip = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		val->intval = !chip->usb_suspended_status;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		if (!chip->parallel_charger_suspended)
+			val->intval = chip->usb_psy_ma * 1000;
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		if (!chip->parallel_charger_suspended)
+			val->intval = chip->vfloat_mv;
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+		/* Check if SMB1351 is present */
+		if (smb1351_is_usb_present(chip)) {
+			val->intval = smb1351_get_prop_charge_type(chip);
+			if (val->intval == POWER_SUPPLY_CHARGE_TYPE_UNKNOWN) {
+				pr_debug("Failed to charge type, charger may be absent\n");
+				return -ENODEV;
+			}
+		}
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		if (!chip->parallel_charger_suspended)
+			val->intval = chip->fastchg_current_max_ma * 1000;
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_STATUS:
+		if (!chip->parallel_charger_suspended)
+			val->intval = smb1351_get_prop_batt_status(chip);
+		else
+			val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+		if (!chip->parallel_charger_suspended)
+			val->intval =
+				smb1351_is_input_current_limited(chip) ? 1 : 0;
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_PARALLEL_MODE:
+		val->intval = chip->parallel_mode;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void smb1351_chg_set_appropriate_battery_current(
+				struct smb1351_charger *chip)
+{
+	int rc;
+	unsigned int current_max = chip->target_fastchg_current_max_ma;
+
+	if (chip->batt_cool)
+		current_max = min(current_max, chip->batt_cool_ma);
+	if (chip->batt_warm)
+		current_max = min(current_max, chip->batt_warm_ma);
+
+	pr_debug("setting %dmA", current_max);
+
+	rc = smb1351_fastchg_current_set(chip, current_max);
+	if (rc)
+		pr_err("Couldn't set charging current rc = %d\n", rc);
+}
+
+static void smb1351_chg_set_appropriate_vddmax(struct smb1351_charger *chip)
+{
+	int rc;
+	unsigned int vddmax = chip->vfloat_mv;
+
+	if (chip->batt_cool)
+		vddmax = min(vddmax, chip->batt_cool_mv);
+	if (chip->batt_warm)
+		vddmax = min(vddmax, chip->batt_warm_mv);
+
+	pr_debug("setting %dmV\n", vddmax);
+
+	rc = smb1351_float_voltage_set(chip, vddmax);
+	if (rc)
+		pr_err("Couldn't set float voltage rc = %d\n", rc);
+}
+
+static void smb1351_chg_ctrl_in_jeita(struct smb1351_charger *chip)
+{
+	union power_supply_propval ret = {0, };
+	int rc;
+
+	/* enable the iterm to prevent the reverse boost */
+	if (chip->iterm_disabled) {
+		if (chip->batt_cool || chip->batt_warm) {
+			rc = smb1351_iterm_set(chip, 100);
+			pr_debug("set the iterm due to JEITA\n");
+		} else {
+			rc = smb1351_masked_write(chip, CHG_CTRL_REG,
+						ITERM_EN_BIT, ITERM_DISABLE);
+			pr_debug("disable the iterm when exits warm/cool\n");
+		}
+		if (rc) {
+			pr_err("Couldn't set iterm rc = %d\n", rc);
+			return;
+		}
+	}
+	/*
+	 * When JEITA back to normal, the charging maybe disabled due to
+	 * the current termination. So re-enable the charging if the soc
+	 * is less than 100 in the normal mode. A 200ms delay is required
+	 * before the disabe and enable operation.
+	 */
+	if (chip->bms_psy) {
+		rc = power_supply_get_property(chip->bms_psy,
+				POWER_SUPPLY_PROP_CAPACITY, &ret);
+		if (rc) {
+			pr_err("Couldn't read the bms capacity rc = %d\n",
+									rc);
+			return;
+		}
+		if (!chip->batt_cool && !chip->batt_warm
+				&& !chip->batt_cold && !chip->batt_hot
+				&& ret.intval < 100) {
+			rc = smb1351_battchg_disable(chip, THERMAL, true);
+			if (rc) {
+				pr_err("Couldn't disable charging rc = %d\n",
+									rc);
+				return;
+			}
+			/* delay for resetting the charging */
+			msleep(200);
+			rc = smb1351_battchg_disable(chip, THERMAL, false);
+			if (rc) {
+				pr_err("Couldn't enable charging rc = %d\n",
+									rc);
+				return;
+			}
+
+			chip->batt_full = false;
+			pr_debug("re-enable charging, batt_full = %d\n",
+						chip->batt_full);
+			power_supply_changed(chip->batt_psy);
+		}
+	}
+}
+
+#define HYSTERESIS_DECIDEGC 20
+static void smb1351_chg_adc_notification(enum qpnp_tm_state state, void *ctx)
+{
+	struct smb1351_charger *chip = ctx;
+	struct battery_status *cur = NULL;
+	int temp;
+
+	if (state >= ADC_TM_STATE_NUM) {
+		pr_err("invalid state parameter %d\n", state);
+		return;
+	}
+
+	temp = smb1351_get_prop_batt_temp(chip);
+
+	pr_debug("temp = %d state = %s\n", temp,
+				state == ADC_TM_WARM_STATE ? "hot" : "cold");
+
+	/* reset the adc status request */
+	chip->adc_param.state_request = ADC_TM_WARM_COOL_THR_ENABLE;
+
+	/* temp from low to high */
+	if (state == ADC_TM_WARM_STATE) {
+		/* WARM -> HOT */
+		if (temp >= chip->batt_hot_decidegc) {
+			cur = &batt_s[BATT_HOT];
+			chip->adc_param.low_temp =
+				chip->batt_hot_decidegc - HYSTERESIS_DECIDEGC;
+			chip->adc_param.state_request =	ADC_TM_COOL_THR_ENABLE;
+		/* NORMAL -> WARM */
+		} else if (temp >= chip->batt_warm_decidegc &&
+					chip->jeita_supported) {
+			cur = &batt_s[BATT_WARM];
+			chip->adc_param.low_temp =
+				chip->batt_warm_decidegc - HYSTERESIS_DECIDEGC;
+			chip->adc_param.high_temp = chip->batt_hot_decidegc;
+		/* COOL -> NORMAL */
+		} else if (temp >= chip->batt_cool_decidegc &&
+					chip->jeita_supported) {
+			cur = &batt_s[BATT_NORMAL];
+			chip->adc_param.low_temp =
+				chip->batt_cool_decidegc - HYSTERESIS_DECIDEGC;
+			chip->adc_param.high_temp = chip->batt_warm_decidegc;
+		/* COLD -> COOL */
+		} else if (temp >= chip->batt_cold_decidegc) {
+			cur = &batt_s[BATT_COOL];
+			chip->adc_param.low_temp =
+				chip->batt_cold_decidegc - HYSTERESIS_DECIDEGC;
+			if (chip->jeita_supported)
+				chip->adc_param.high_temp =
+						chip->batt_cool_decidegc;
+			else
+				chip->adc_param.high_temp =
+						chip->batt_hot_decidegc;
+		/* MISSING -> COLD */
+		} else if (temp >= chip->batt_missing_decidegc) {
+			cur = &batt_s[BATT_COLD];
+			chip->adc_param.high_temp = chip->batt_cold_decidegc;
+			chip->adc_param.low_temp = chip->batt_missing_decidegc
+							- HYSTERESIS_DECIDEGC;
+		}
+	/* temp from high to low */
+	} else {
+		/* COLD -> MISSING */
+		if (temp <= chip->batt_missing_decidegc) {
+			cur = &batt_s[BATT_MISSING];
+			chip->adc_param.high_temp = chip->batt_missing_decidegc
+							+ HYSTERESIS_DECIDEGC;
+			chip->adc_param.state_request = ADC_TM_WARM_THR_ENABLE;
+		/* COOL -> COLD */
+		} else if (temp <= chip->batt_cold_decidegc) {
+			cur = &batt_s[BATT_COLD];
+			chip->adc_param.high_temp =
+				chip->batt_cold_decidegc + HYSTERESIS_DECIDEGC;
+			/* add low_temp to enable batt present check */
+			chip->adc_param.low_temp = chip->batt_missing_decidegc;
+		/* NORMAL -> COOL */
+		} else if (temp <= chip->batt_cool_decidegc &&
+					chip->jeita_supported) {
+			cur = &batt_s[BATT_COOL];
+			chip->adc_param.high_temp =
+				chip->batt_cool_decidegc + HYSTERESIS_DECIDEGC;
+			chip->adc_param.low_temp = chip->batt_cold_decidegc;
+		/* WARM -> NORMAL */
+		} else if (temp <= chip->batt_warm_decidegc &&
+					chip->jeita_supported) {
+			cur = &batt_s[BATT_NORMAL];
+			chip->adc_param.high_temp =
+				chip->batt_warm_decidegc + HYSTERESIS_DECIDEGC;
+			chip->adc_param.low_temp = chip->batt_cool_decidegc;
+		/* HOT -> WARM */
+		} else if (temp <= chip->batt_hot_decidegc) {
+			cur = &batt_s[BATT_WARM];
+			if (chip->jeita_supported)
+				chip->adc_param.low_temp =
+					chip->batt_warm_decidegc;
+			else
+				chip->adc_param.low_temp =
+					chip->batt_cold_decidegc;
+			chip->adc_param.high_temp =
+				chip->batt_hot_decidegc + HYSTERESIS_DECIDEGC;
+		}
+	}
+
+	if (!cur) {
+		pr_debug("Couldn't choose batt state, adc state=%d and temp=%d\n",
+			state, temp);
+		return;
+	}
+
+	if (cur->batt_present)
+		chip->battery_missing = false;
+	else
+		chip->battery_missing = true;
+
+	if (cur->batt_hot ^ chip->batt_hot ||
+			cur->batt_cold ^ chip->batt_cold) {
+		chip->batt_hot = cur->batt_hot;
+		chip->batt_cold = cur->batt_cold;
+		/* stop charging explicitly since we use PMIC thermal pin*/
+		if (cur->batt_hot || cur->batt_cold ||
+							chip->battery_missing)
+			smb1351_battchg_disable(chip, THERMAL, 1);
+		else
+			smb1351_battchg_disable(chip, THERMAL, 0);
+	}
+
+	if ((chip->batt_warm ^ cur->batt_warm ||
+				chip->batt_cool ^ cur->batt_cool)
+						&& chip->jeita_supported) {
+		chip->batt_warm = cur->batt_warm;
+		chip->batt_cool = cur->batt_cool;
+		smb1351_chg_set_appropriate_battery_current(chip);
+		smb1351_chg_set_appropriate_vddmax(chip);
+		smb1351_chg_ctrl_in_jeita(chip);
+	}
+
+	pr_debug("hot %d, cold %d, warm %d, cool %d, soft jeita supported %d, missing %d, low = %d deciDegC, high = %d deciDegC\n",
+		chip->batt_hot, chip->batt_cold, chip->batt_warm,
+		chip->batt_cool, chip->jeita_supported,
+		chip->battery_missing, chip->adc_param.low_temp,
+		chip->adc_param.high_temp);
+	if (qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->adc_param))
+		pr_err("request ADC error\n");
+}
+
+static int rerun_apsd(struct smb1351_charger *chip)
+{
+	int rc;
+
+	pr_debug("Reruning APSD\nDisabling APSD\n");
+
+	rc = smb1351_masked_write(chip, CMD_HVDCP_REG, CMD_APSD_RE_RUN_BIT,
+						CMD_APSD_RE_RUN_BIT);
+	if (rc)
+		pr_err("Couldn't re-run APSD algo\n");
+
+	return 0;
+}
+
+static void smb1351_hvdcp_det_work(struct work_struct *work)
+{
+	int rc;
+	u8 reg;
+	union power_supply_propval pval = {0, };
+	struct smb1351_charger *chip = container_of(work,
+						struct smb1351_charger,
+						hvdcp_det_work.work);
+
+	rc = smb1351_read_reg(chip, STATUS_7_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read STATUS_7_REG rc == %d\n", rc);
+		goto end;
+	}
+	pr_debug("STATUS_7_REG = 0x%02X\n", reg);
+
+	if (reg) {
+		pr_debug("HVDCP detected; notifying USB PSY\n");
+		pval.intval = POWER_SUPPLY_TYPE_USB_HVDCP;
+		power_supply_set_property(chip->usb_psy,
+			POWER_SUPPLY_PROP_TYPE, &pval);
+	}
+end:
+	pm_relax(chip->dev);
+}
+
+#define HVDCP_NOTIFY_MS 2500
+static int smb1351_apsd_complete_handler(struct smb1351_charger *chip,
+						u8 status)
+{
+	int rc;
+	u8 reg = 0;
+	union power_supply_propval prop = {0, };
+	enum power_supply_type type = POWER_SUPPLY_TYPE_UNKNOWN;
+
+	/*
+	 * If apsd is disabled, charger detection is done by
+	 * USB phy driver.
+	 */
+	if (chip->disable_apsd || chip->usbin_ov) {
+		pr_debug("APSD %s, status = %d\n",
+			chip->disable_apsd ? "disabled" : "enabled", !!status);
+		pr_debug("USBIN ov, status = %d\n", chip->usbin_ov);
+		return 0;
+	}
+
+	rc = smb1351_read_reg(chip, STATUS_5_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read STATUS_5 rc = %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("STATUS_5_REG(0x3B)=%x\n", reg);
+
+	switch (reg) {
+	case STATUS_PORT_ACA_DOCK:
+	case STATUS_PORT_ACA_C:
+	case STATUS_PORT_ACA_B:
+	case STATUS_PORT_ACA_A:
+		type = POWER_SUPPLY_TYPE_USB_ACA;
+		break;
+	case STATUS_PORT_CDP:
+		type = POWER_SUPPLY_TYPE_USB_CDP;
+		break;
+	case STATUS_PORT_DCP:
+		type = POWER_SUPPLY_TYPE_USB_DCP;
+		break;
+	case STATUS_PORT_SDP:
+		type = POWER_SUPPLY_TYPE_USB;
+		break;
+	case STATUS_PORT_OTHER:
+		type = POWER_SUPPLY_TYPE_USB_DCP;
+		break;
+	default:
+		type = POWER_SUPPLY_TYPE_USB;
+		break;
+	}
+
+	if (status) {
+		chip->chg_present = true;
+		pr_debug("APSD complete. USB type detected=%d chg_present=%d\n",
+						type, chip->chg_present);
+		if (!chip->battery_missing && !chip->apsd_rerun) {
+			if (type == POWER_SUPPLY_TYPE_USB) {
+				pr_debug("Setting usb psy dp=f dm=f SDP and rerun\n");
+				prop.intval = POWER_SUPPLY_DP_DM_DPF_DMF;
+				power_supply_set_property(chip->usb_psy,
+						POWER_SUPPLY_PROP_DP_DM, &prop);
+				chip->apsd_rerun = true;
+				rerun_apsd(chip);
+				return 0;
+			}
+			pr_debug("Set usb psy dp=f dm=f DCP and no rerun\n");
+			prop.intval = POWER_SUPPLY_DP_DM_DPF_DMF;
+			power_supply_set_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_DP_DM, &prop);
+		}
+		/*
+		 * If defined force hvdcp 2p0 property,
+		 * we force to hvdcp 2p0 in the APSD handler.
+		 */
+		if (chip->force_hvdcp_2p0) {
+			pr_debug("Force set to HVDCP 2.0 mode\n");
+			smb1351_masked_write(chip, VARIOUS_FUNC_3_REG,
+						QC_2P1_AUTH_ALGO_BIT, 0);
+			smb1351_masked_write(chip, CMD_HVDCP_REG,
+						CMD_FORCE_HVDCP_2P0_BIT,
+						CMD_FORCE_HVDCP_2P0_BIT);
+			type = POWER_SUPPLY_TYPE_USB_HVDCP;
+		} else if (type == POWER_SUPPLY_TYPE_USB_DCP) {
+			pr_debug("schedule hvdcp detection worker\n");
+			pm_stay_awake(chip->dev);
+			schedule_delayed_work(&chip->hvdcp_det_work,
+					msecs_to_jiffies(HVDCP_NOTIFY_MS));
+		}
+
+		prop.intval = type;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_TYPE, &prop);
+		/*
+		 * SMB is now done sampling the D+/D- lines,
+		 * indicate USB driver
+		 */
+		pr_debug("updating usb_psy present=%d\n", chip->chg_present);
+		prop.intval = chip->chg_present;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_PRESENT,
+				&prop);
+		chip->apsd_rerun = false;
+	} else if (!chip->apsd_rerun) {
+		/* Handle Charger removal */
+		prop.intval = POWER_SUPPLY_TYPE_UNKNOWN;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_TYPE, &prop);
+
+		chip->chg_present = false;
+		prop.intval = chip->chg_present;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_PRESENT,
+				&prop);
+
+		pr_debug("Set usb psy dm=r df=r\n");
+		prop.intval = POWER_SUPPLY_DP_DM_DPR_DMR;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_DP_DM, &prop);
+	}
+
+	return 0;
+}
+
+/*
+ * As source detect interrupt is not triggered on the falling edge,
+ * we need to schedule a work for checking source detect status after
+ * charger UV interrupt fired.
+ */
+#define FIRST_CHECK_DELAY	100
+#define SECOND_CHECK_DELAY	1000
+static void smb1351_chg_remove_work(struct work_struct *work)
+{
+	int rc;
+	u8 reg;
+	struct smb1351_charger *chip = container_of(work,
+				struct smb1351_charger, chg_remove_work.work);
+
+	rc = smb1351_read_reg(chip, IRQ_G_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read IRQ_G_REG rc = %d\n", rc);
+		goto end;
+	}
+
+	if (!(reg & IRQ_SOURCE_DET_BIT)) {
+		pr_debug("chg removed\n");
+		smb1351_apsd_complete_handler(chip, 0);
+	} else if (!chip->chg_remove_work_scheduled) {
+		chip->chg_remove_work_scheduled = true;
+		goto reschedule;
+	} else {
+		pr_debug("charger is present\n");
+	}
+end:
+	chip->chg_remove_work_scheduled = false;
+	pm_relax(chip->dev);
+	return;
+
+reschedule:
+	pr_debug("reschedule after 1s\n");
+	schedule_delayed_work(&chip->chg_remove_work,
+				msecs_to_jiffies(SECOND_CHECK_DELAY));
+}
+
+static int smb1351_usbin_uv_handler(struct smb1351_charger *chip, u8 status)
+{
+	union power_supply_propval pval = {0, };
+
+	/* use this to detect USB insertion only if !apsd */
+	if (chip->disable_apsd) {
+		/*
+		 * If APSD is disabled, src det interrupt won't trigger.
+		 * Hence use usbin_uv for removal and insertion notification
+		 */
+		if (status == 0) {
+			chip->chg_present = true;
+			pr_debug("updating usb_psy present=%d\n",
+						chip->chg_present);
+			pval.intval = POWER_SUPPLY_TYPE_USB;
+			power_supply_set_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_TYPE, &pval);
+
+			pval.intval = chip->chg_present;
+			power_supply_set_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_PRESENT,
+					&pval);
+		} else {
+			chip->chg_present = false;
+
+			pval.intval = POWER_SUPPLY_TYPE_UNKNOWN;
+			power_supply_set_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_TYPE, &pval);
+
+			pr_debug("updating usb_psy present=%d\n",
+							chip->chg_present);
+			pval.intval = chip->chg_present;
+			power_supply_set_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_PRESENT,
+					&pval);
+		}
+		return 0;
+	}
+
+	if (status) {
+		cancel_delayed_work_sync(&chip->hvdcp_det_work);
+		pm_relax(chip->dev);
+		pr_debug("schedule charger remove worker\n");
+		schedule_delayed_work(&chip->chg_remove_work,
+					msecs_to_jiffies(FIRST_CHECK_DELAY));
+		pm_stay_awake(chip->dev);
+	}
+
+	pr_debug("chip->chg_present = %d\n", chip->chg_present);
+
+	return 0;
+}
+
+static int smb1351_usbin_ov_handler(struct smb1351_charger *chip, u8 status)
+{
+	int rc;
+	u8 reg;
+	union power_supply_propval pval = {0, };
+
+	rc = smb1351_read_reg(chip, IRQ_E_REG, &reg);
+	if (rc)
+		pr_err("Couldn't read IRQ_E rc = %d\n", rc);
+
+	if (status != 0) {
+		chip->chg_present = false;
+		chip->usbin_ov = true;
+
+		pval.intval = POWER_SUPPLY_TYPE_UNKNOWN;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_TYPE, &pval);
+
+		pval.intval = chip->chg_present;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_PRESENT,
+				&pval);
+	} else {
+		chip->usbin_ov = false;
+		if (reg & IRQ_USBIN_UV_BIT)
+			pr_debug("Charger unplugged from OV\n");
+		else
+			smb1351_apsd_complete_handler(chip, 1);
+	}
+
+	if (chip->usb_psy) {
+		pval.intval = status ? POWER_SUPPLY_HEALTH_OVERVOLTAGE
+					: POWER_SUPPLY_HEALTH_GOOD;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_HEALTH, &pval);
+		pr_debug("chip ov status is %d\n", pval.intval);
+	}
+	pr_debug("chip->chg_present = %d\n", chip->chg_present);
+
+	return 0;
+}
+
+static int smb1351_fast_chg_handler(struct smb1351_charger *chip, u8 status)
+{
+	pr_debug("enter\n");
+	return 0;
+}
+
+static int smb1351_chg_term_handler(struct smb1351_charger *chip, u8 status)
+{
+	pr_debug("enter\n");
+	if (!chip->bms_controlled_charging)
+		chip->batt_full = !!status;
+	return 0;
+}
+
+static int smb1351_safety_timeout_handler(struct smb1351_charger *chip,
+						u8 status)
+{
+	pr_debug("safety_timeout triggered\n");
+	return 0;
+}
+
+static int smb1351_aicl_done_handler(struct smb1351_charger *chip, u8 status)
+{
+	pr_debug("aicl_done triggered\n");
+	return 0;
+}
+
+static int smb1351_hot_hard_handler(struct smb1351_charger *chip, u8 status)
+{
+	pr_debug("status = 0x%02x\n", status);
+	chip->batt_hot = !!status;
+	return 0;
+}
+static int smb1351_cold_hard_handler(struct smb1351_charger *chip, u8 status)
+{
+	pr_debug("status = 0x%02x\n", status);
+	chip->batt_cold = !!status;
+	return 0;
+}
+static int smb1351_hot_soft_handler(struct smb1351_charger *chip, u8 status)
+{
+	pr_debug("status = 0x%02x\n", status);
+	chip->batt_warm = !!status;
+	return 0;
+}
+static int smb1351_cold_soft_handler(struct smb1351_charger *chip, u8 status)
+{
+	pr_debug("status = 0x%02x\n", status);
+	chip->batt_cool = !!status;
+	return 0;
+}
+
+static int smb1351_battery_missing_handler(struct smb1351_charger *chip,
+						u8 status)
+{
+	if (status)
+		chip->battery_missing = true;
+	else
+		chip->battery_missing = false;
+
+	return 0;
+}
+
+static struct irq_handler_info handlers[] = {
+	[0] = {
+		.stat_reg	= IRQ_A_REG,
+		.val		= 0,
+		.prev_val	= 0,
+		.irq_info	= {
+			{	.name	 = "cold_soft",
+				.smb_irq = smb1351_cold_soft_handler,
+			},
+			{	.name	 = "hot_soft",
+				.smb_irq = smb1351_hot_soft_handler,
+			},
+			{	.name	 = "cold_hard",
+				.smb_irq = smb1351_cold_hard_handler,
+			},
+			{	.name	 = "hot_hard",
+				.smb_irq = smb1351_hot_hard_handler,
+			},
+		},
+	},
+	[1] = {
+		.stat_reg	= IRQ_B_REG,
+		.val		= 0,
+		.prev_val	= 0,
+		.irq_info	= {
+			{	.name	 = "internal_temp_limit",
+			},
+			{	.name	 = "vbatt_low",
+			},
+			{	.name	 = "battery_missing",
+				.smb_irq = smb1351_battery_missing_handler,
+			},
+			{	.name	 = "batt_therm_removed",
+			},
+		},
+	},
+	[2] = {
+		.stat_reg	= IRQ_C_REG,
+		.val		= 0,
+		.prev_val	= 0,
+		.irq_info	= {
+			{	.name	 = "chg_term",
+				.smb_irq = smb1351_chg_term_handler,
+			},
+			{	.name	 = "taper",
+			},
+			{	.name	 = "recharge",
+			},
+			{	.name	 = "fast_chg",
+				.smb_irq = smb1351_fast_chg_handler,
+			},
+		},
+	},
+	[3] = {
+		.stat_reg	= IRQ_D_REG,
+		.val		= 0,
+		.prev_val	= 0,
+		.irq_info	= {
+			{	.name	 = "prechg_timeout",
+			},
+			{	.name	 = "safety_timeout",
+				.smb_irq = smb1351_safety_timeout_handler,
+			},
+			{	.name	 = "chg_error",
+			},
+			{	.name	 = "batt_ov",
+			},
+		},
+	},
+	[4] = {
+		.stat_reg	= IRQ_E_REG,
+		.val		= 0,
+		.prev_val	= 0,
+		.irq_info	= {
+			{	.name	 = "power_ok",
+			},
+			{	.name	 = "afvc",
+			},
+			{	.name	 = "usbin_uv",
+				.smb_irq = smb1351_usbin_uv_handler,
+			},
+			{	.name	 = "usbin_ov",
+				.smb_irq = smb1351_usbin_ov_handler,
+			},
+		},
+	},
+	[5] = {
+		.stat_reg	= IRQ_F_REG,
+		.val		= 0,
+		.prev_val	= 0,
+		.irq_info	= {
+			{	.name	 = "otg_oc_retry",
+			},
+			{	.name	 = "rid",
+			},
+			{	.name	 = "otg_fail",
+			},
+			{	.name	 = "otg_oc",
+			},
+		},
+	},
+	[6] = {
+		.stat_reg	= IRQ_G_REG,
+		.val		= 0,
+		.prev_val	= 0,
+		.irq_info	= {
+			{	.name	 = "chg_inhibit",
+			},
+			{	.name	 = "aicl_fail",
+			},
+			{	.name	 = "aicl_done",
+				.smb_irq = smb1351_aicl_done_handler,
+			},
+			{	.name	 = "apsd_complete",
+				.smb_irq = smb1351_apsd_complete_handler,
+			},
+		},
+	},
+	[7] = {
+		.stat_reg	= IRQ_H_REG,
+		.val		= 0,
+		.prev_val	= 0,
+		.irq_info	= {
+			{	.name	 = "wdog_timeout",
+			},
+			{	.name	 = "hvdcp_auth_done",
+			},
+		},
+	},
+};
+
+#define IRQ_LATCHED_MASK	0x02
+#define IRQ_STATUS_MASK		0x01
+#define BITS_PER_IRQ		2
+static irqreturn_t smb1351_chg_stat_handler(int irq, void *dev_id)
+{
+	struct smb1351_charger *chip = dev_id;
+	int i, j;
+	u8 triggered;
+	u8 changed;
+	u8 rt_stat, prev_rt_stat;
+	int rc;
+	int handler_count = 0;
+
+	mutex_lock(&chip->irq_complete);
+
+	chip->irq_waiting = true;
+	if (!chip->resume_completed) {
+		pr_debug("IRQ triggered before device-resume\n");
+		disable_irq_nosync(irq);
+		mutex_unlock(&chip->irq_complete);
+		return IRQ_HANDLED;
+	}
+	chip->irq_waiting = false;
+
+	for (i = 0; i < ARRAY_SIZE(handlers); i++) {
+		rc = smb1351_read_reg(chip, handlers[i].stat_reg,
+						&handlers[i].val);
+		if (rc) {
+			pr_err("Couldn't read %d rc = %d\n",
+					handlers[i].stat_reg, rc);
+			continue;
+		}
+
+		for (j = 0; j < ARRAY_SIZE(handlers[i].irq_info); j++) {
+			triggered = handlers[i].val
+			       & (IRQ_LATCHED_MASK << (j * BITS_PER_IRQ));
+			rt_stat = handlers[i].val
+				& (IRQ_STATUS_MASK << (j * BITS_PER_IRQ));
+			prev_rt_stat = handlers[i].prev_val
+				& (IRQ_STATUS_MASK << (j * BITS_PER_IRQ));
+			changed = prev_rt_stat ^ rt_stat;
+
+			if (triggered || changed)
+				rt_stat ? handlers[i].irq_info[j].high++ :
+						handlers[i].irq_info[j].low++;
+
+			if ((triggered || changed)
+				&& handlers[i].irq_info[j].smb_irq != NULL) {
+				handler_count++;
+				rc = handlers[i].irq_info[j].smb_irq(chip,
+								rt_stat);
+				if (rc)
+					pr_err("Couldn't handle %d irq for reg 0x%02x rc = %d\n",
+						j, handlers[i].stat_reg, rc);
+			}
+		}
+		handlers[i].prev_val = handlers[i].val;
+	}
+
+	pr_debug("handler count = %d\n", handler_count);
+	if (handler_count) {
+		pr_debug("batt psy changed\n");
+		power_supply_changed(chip->batt_psy);
+	}
+
+	mutex_unlock(&chip->irq_complete);
+
+	return IRQ_HANDLED;
+}
+
+static void smb1351_external_power_changed(struct power_supply *psy)
+{
+	struct smb1351_charger *chip = power_supply_get_drvdata(psy);
+	union power_supply_propval prop = {0,};
+	int rc, current_limit = 0, online = 0;
+
+	if (chip->bms_psy_name)
+		chip->bms_psy =
+			power_supply_get_by_name((char *)chip->bms_psy_name);
+
+	rc = power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_ONLINE, &prop);
+	if (rc)
+		pr_err("Couldn't read USB online property, rc=%d\n", rc);
+	else
+		online = prop.intval;
+
+	rc = power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_CURRENT_MAX, &prop);
+	if (rc)
+		pr_err("Couldn't read USB current_max property, rc=%d\n", rc);
+	else
+		current_limit = prop.intval / 1000;
+
+	pr_debug("online = %d, current_limit = %d\n", online, current_limit);
+
+	smb1351_enable_volatile_writes(chip);
+	smb1351_set_usb_chg_current(chip, current_limit);
+
+	pr_debug("updating batt psy\n");
+}
+
+#define LAST_CNFG_REG	0x16
+static int show_cnfg_regs(struct seq_file *m, void *data)
+{
+	struct smb1351_charger *chip = m->private;
+	int rc;
+	u8 reg;
+	u8 addr;
+
+	for (addr = 0; addr <= LAST_CNFG_REG; addr++) {
+		rc = smb1351_read_reg(chip, addr, &reg);
+		if (!rc)
+			seq_printf(m, "0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	return 0;
+}
+
+static int cnfg_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct smb1351_charger *chip = inode->i_private;
+
+	return single_open(file, show_cnfg_regs, chip);
+}
+
+static const struct file_operations cnfg_debugfs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= cnfg_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#define FIRST_CMD_REG	0x30
+#define LAST_CMD_REG	0x34
+static int show_cmd_regs(struct seq_file *m, void *data)
+{
+	struct smb1351_charger *chip = m->private;
+	int rc;
+	u8 reg;
+	u8 addr;
+
+	for (addr = FIRST_CMD_REG; addr <= LAST_CMD_REG; addr++) {
+		rc = smb1351_read_reg(chip, addr, &reg);
+		if (!rc)
+			seq_printf(m, "0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	return 0;
+}
+
+static int cmd_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct smb1351_charger *chip = inode->i_private;
+
+	return single_open(file, show_cmd_regs, chip);
+}
+
+static const struct file_operations cmd_debugfs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= cmd_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#define FIRST_STATUS_REG	0x36
+#define LAST_STATUS_REG		0x3F
+static int show_status_regs(struct seq_file *m, void *data)
+{
+	struct smb1351_charger *chip = m->private;
+	int rc;
+	u8 reg;
+	u8 addr;
+
+	for (addr = FIRST_STATUS_REG; addr <= LAST_STATUS_REG; addr++) {
+		rc = smb1351_read_reg(chip, addr, &reg);
+		if (!rc)
+			seq_printf(m, "0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	return 0;
+}
+
+static int status_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct smb1351_charger *chip = inode->i_private;
+
+	return single_open(file, show_status_regs, chip);
+}
+
+static const struct file_operations status_debugfs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= status_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int show_irq_count(struct seq_file *m, void *data)
+{
+	int i, j, total = 0;
+
+	for (i = 0; i < ARRAY_SIZE(handlers); i++)
+		for (j = 0; j < 4; j++) {
+			seq_printf(m, "%s=%d\t(high=%d low=%d)\n",
+						handlers[i].irq_info[j].name,
+						handlers[i].irq_info[j].high
+						+ handlers[i].irq_info[j].low,
+						handlers[i].irq_info[j].high,
+						handlers[i].irq_info[j].low);
+			total += (handlers[i].irq_info[j].high
+					+ handlers[i].irq_info[j].low);
+		}
+
+	seq_printf(m, "\n\tTotal = %d\n", total);
+
+	return 0;
+}
+
+static int irq_count_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct smb1351_charger *chip = inode->i_private;
+
+	return single_open(file, show_irq_count, chip);
+}
+
+static const struct file_operations irq_count_debugfs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= irq_count_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int get_reg(void *data, u64 *val)
+{
+	struct smb1351_charger *chip = data;
+	int rc;
+	u8 temp;
+
+	rc = smb1351_read_reg(chip, chip->peek_poke_address, &temp);
+	if (rc) {
+		pr_err("Couldn't read reg %x rc = %d\n",
+			chip->peek_poke_address, rc);
+		return -EAGAIN;
+	}
+	*val = temp;
+	return 0;
+}
+
+static int set_reg(void *data, u64 val)
+{
+	struct smb1351_charger *chip = data;
+	int rc;
+	u8 temp;
+
+	temp = (u8) val;
+	rc = smb1351_write_reg(chip, chip->peek_poke_address, temp);
+	if (rc) {
+		pr_err("Couldn't write 0x%02x to 0x%02x rc= %d\n",
+			temp, chip->peek_poke_address, rc);
+		return -EAGAIN;
+	}
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(poke_poke_debug_ops, get_reg, set_reg, "0x%02llx\n");
+
+static int force_irq_set(void *data, u64 val)
+{
+	struct smb1351_charger *chip = data;
+
+	smb1351_chg_stat_handler(chip->client->irq, data);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_irq_ops, NULL, force_irq_set, "0x%02llx\n");
+
+#ifdef DEBUG
+static void dump_regs(struct smb1351_charger *chip)
+{
+	int rc;
+	u8 reg;
+	u8 addr;
+
+	for (addr = 0; addr <= LAST_CNFG_REG; addr++) {
+		rc = smb1351_read_reg(chip, addr, &reg);
+		if (rc)
+			pr_err("Couldn't read 0x%02x rc = %d\n", addr, rc);
+		else
+			pr_debug("0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	for (addr = FIRST_STATUS_REG; addr <= LAST_STATUS_REG; addr++) {
+		rc = smb1351_read_reg(chip, addr, &reg);
+		if (rc)
+			pr_err("Couldn't read 0x%02x rc = %d\n", addr, rc);
+		else
+			pr_debug("0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	for (addr = FIRST_CMD_REG; addr <= LAST_CMD_REG; addr++) {
+		rc = smb1351_read_reg(chip, addr, &reg);
+		if (rc)
+			pr_err("Couldn't read 0x%02x rc = %d\n", addr, rc);
+		else
+			pr_debug("0x%02x = 0x%02x\n", addr, reg);
+	}
+}
+#else
+static void dump_regs(struct smb1351_charger *chip)
+{
+}
+#endif
+
+static int smb1351_parse_dt(struct smb1351_charger *chip)
+{
+	int rc;
+	struct device_node *node = chip->dev->of_node;
+
+	if (!node) {
+		pr_err("device tree info. missing\n");
+		return -EINVAL;
+	}
+
+	chip->usb_suspended_status = of_property_read_bool(node,
+					"qcom,charging-disabled");
+
+	chip->chg_autonomous_mode = of_property_read_bool(node,
+					"qcom,chg-autonomous-mode");
+
+	chip->disable_apsd = of_property_read_bool(node, "qcom,disable-apsd");
+
+	chip->using_pmic_therm = of_property_read_bool(node,
+						"qcom,using-pmic-therm");
+	chip->bms_controlled_charging  = of_property_read_bool(node,
+					"qcom,bms-controlled-charging");
+	chip->force_hvdcp_2p0 = of_property_read_bool(node,
+					"qcom,force-hvdcp-2p0");
+
+	rc = of_property_read_string(node, "qcom,bms-psy-name",
+						&chip->bms_psy_name);
+	if (rc)
+		chip->bms_psy_name = NULL;
+
+	rc = of_property_read_u32(node, "qcom,fastchg-current-max-ma",
+					&chip->target_fastchg_current_max_ma);
+	if (rc)
+		chip->target_fastchg_current_max_ma = SMB1351_CHG_FAST_MAX_MA;
+
+	chip->iterm_disabled = of_property_read_bool(node,
+					"qcom,iterm-disabled");
+
+	rc = of_property_read_u32(node, "qcom,iterm-ma", &chip->iterm_ma);
+	if (rc)
+		chip->iterm_ma = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,float-voltage-mv",
+						&chip->vfloat_mv);
+	if (rc)
+		chip->vfloat_mv = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,recharge-mv",
+						&chip->recharge_mv);
+	if (rc)
+		chip->recharge_mv = -EINVAL;
+
+	chip->recharge_disabled = of_property_read_bool(node,
+					"qcom,recharge-disabled");
+
+	/* thermal and jeita support */
+	rc = of_property_read_u32(node, "qcom,batt-cold-decidegc",
+						&chip->batt_cold_decidegc);
+	if (rc < 0)
+		chip->batt_cold_decidegc = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,batt-hot-decidegc",
+						&chip->batt_hot_decidegc);
+	if (rc < 0)
+		chip->batt_hot_decidegc = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,batt-warm-decidegc",
+						&chip->batt_warm_decidegc);
+
+	rc |= of_property_read_u32(node, "qcom,batt-cool-decidegc",
+						&chip->batt_cool_decidegc);
+
+	if (!rc) {
+		rc = of_property_read_u32(node, "qcom,batt-cool-mv",
+						&chip->batt_cool_mv);
+
+		rc |= of_property_read_u32(node, "qcom,batt-warm-mv",
+						&chip->batt_warm_mv);
+
+		rc |= of_property_read_u32(node, "qcom,batt-cool-ma",
+						&chip->batt_cool_ma);
+
+		rc |= of_property_read_u32(node, "qcom,batt-warm-ma",
+						&chip->batt_warm_ma);
+		if (rc)
+			chip->jeita_supported = false;
+		else
+			chip->jeita_supported = true;
+	}
+
+	pr_debug("jeita_supported = %d\n", chip->jeita_supported);
+
+	rc = of_property_read_u32(node, "qcom,batt-missing-decidegc",
+						&chip->batt_missing_decidegc);
+
+	chip->pinctrl_state_name = of_get_property(node, "pinctrl-names", NULL);
+
+	return 0;
+}
+
+static int smb1351_determine_initial_state(struct smb1351_charger *chip)
+{
+	int rc;
+	u8 reg = 0;
+
+	/*
+	 * It is okay to read the interrupt status here since
+	 * interrupts aren't requested. Reading interrupt status
+	 * clears the interrupt so be careful to read interrupt
+	 * status only in interrupt handling code
+	 */
+
+	rc = smb1351_read_reg(chip, IRQ_B_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read IRQ_B rc = %d\n", rc);
+		goto fail_init_status;
+	}
+
+	chip->battery_missing = (reg & IRQ_BATT_MISSING_BIT) ? true : false;
+
+	rc = smb1351_read_reg(chip, IRQ_C_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read IRQ_C rc = %d\n", rc);
+		goto fail_init_status;
+	}
+	chip->batt_full = (reg & IRQ_TERM_BIT) ? true : false;
+
+	rc = smb1351_read_reg(chip, IRQ_A_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read irq A rc = %d\n", rc);
+		return rc;
+	}
+
+	if (reg & IRQ_HOT_HARD_BIT)
+		chip->batt_hot = true;
+	if (reg & IRQ_COLD_HARD_BIT)
+		chip->batt_cold = true;
+	if (reg & IRQ_HOT_SOFT_BIT)
+		chip->batt_warm = true;
+	if (reg & IRQ_COLD_SOFT_BIT)
+		chip->batt_cool = true;
+
+	rc = smb1351_read_reg(chip, IRQ_E_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read IRQ_E rc = %d\n", rc);
+		goto fail_init_status;
+	}
+
+	if (reg & IRQ_USBIN_UV_BIT) {
+		smb1351_usbin_uv_handler(chip, 1);
+	} else {
+		smb1351_usbin_uv_handler(chip, 0);
+		smb1351_apsd_complete_handler(chip, 1);
+	}
+
+	rc = smb1351_read_reg(chip, IRQ_G_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read IRQ_G rc = %d\n", rc);
+		goto fail_init_status;
+	}
+
+	if (reg & IRQ_SOURCE_DET_BIT)
+		smb1351_apsd_complete_handler(chip, 1);
+
+	return 0;
+
+fail_init_status:
+	pr_err("Couldn't determine initial status\n");
+	return rc;
+}
+
+static int is_parallel_charger(struct i2c_client *client)
+{
+	struct device_node *node = client->dev.of_node;
+
+	return of_property_read_bool(node, "qcom,parallel-charger");
+}
+
+static int create_debugfs_entries(struct smb1351_charger *chip)
+{
+	struct dentry *ent;
+
+	chip->debug_root = debugfs_create_dir("smb1351", NULL);
+	if (!chip->debug_root) {
+		pr_err("Couldn't create debug dir\n");
+	} else {
+		ent = debugfs_create_file("config_registers", S_IFREG | 0444,
+					  chip->debug_root, chip,
+					  &cnfg_debugfs_ops);
+		if (!ent)
+			pr_err("Couldn't create cnfg debug file\n");
+
+		ent = debugfs_create_file("status_registers", S_IFREG | 0444,
+					  chip->debug_root, chip,
+					  &status_debugfs_ops);
+		if (!ent)
+			pr_err("Couldn't create status debug file\n");
+
+		ent = debugfs_create_file("cmd_registers", S_IFREG | 0444,
+					  chip->debug_root, chip,
+					  &cmd_debugfs_ops);
+		if (!ent)
+			pr_err("Couldn't create cmd debug file\n");
+
+		ent = debugfs_create_x32("address", S_IFREG | 0644,
+					  chip->debug_root,
+					  &(chip->peek_poke_address));
+		if (!ent)
+			pr_err("Couldn't create address debug file\n");
+
+		ent = debugfs_create_file("data", S_IFREG | 0644,
+					  chip->debug_root, chip,
+					  &poke_poke_debug_ops);
+		if (!ent)
+			pr_err("Couldn't create data debug file\n");
+
+		ent = debugfs_create_file("force_irq",
+					  S_IFREG | 0644,
+					  chip->debug_root, chip,
+					  &force_irq_ops);
+		if (!ent)
+			pr_err("Couldn't create data debug file\n");
+
+		ent = debugfs_create_file("irq_count", S_IFREG | 0444,
+					  chip->debug_root, chip,
+					  &irq_count_debugfs_ops);
+		if (!ent)
+			pr_err("Couldn't create count debug file\n");
+	}
+	return 0;
+}
+
+static int smb1351_main_charger_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	int rc;
+	struct smb1351_charger *chip;
+	struct power_supply *usb_psy;
+	struct power_supply_config batt_psy_cfg = {};
+	u8 reg = 0;
+
+	usb_psy = power_supply_get_by_name("usb");
+	if (!usb_psy) {
+		pr_debug("USB psy not found; deferring probe\n");
+		return -EPROBE_DEFER;
+	}
+
+	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->client = client;
+	chip->dev = &client->dev;
+	chip->usb_psy = usb_psy;
+	chip->fake_battery_soc = -EINVAL;
+	INIT_DELAYED_WORK(&chip->chg_remove_work, smb1351_chg_remove_work);
+	INIT_DELAYED_WORK(&chip->hvdcp_det_work, smb1351_hvdcp_det_work);
+	device_init_wakeup(chip->dev, true);
+
+	/* probe the device to check if its actually connected */
+	rc = smb1351_read_reg(chip, CHG_REVISION_REG, &reg);
+	if (rc) {
+		pr_err("Failed to detect smb1351, device may be absent\n");
+		return -ENODEV;
+	}
+	pr_debug("smb1351 chip revision is %d\n", reg);
+
+	rc = smb1351_parse_dt(chip);
+	if (rc) {
+		pr_err("Couldn't parse DT nodes rc=%d\n", rc);
+		return rc;
+	}
+
+	/* using vadc and adc_tm for implementing pmic therm */
+	if (chip->using_pmic_therm) {
+		chip->vadc_dev = qpnp_get_vadc(chip->dev, "chg");
+		if (IS_ERR(chip->vadc_dev)) {
+			rc = PTR_ERR(chip->vadc_dev);
+			if (rc != -EPROBE_DEFER)
+				pr_err("vadc property missing\n");
+			return rc;
+		}
+		chip->adc_tm_dev = qpnp_get_adc_tm(chip->dev, "chg");
+		if (IS_ERR(chip->adc_tm_dev)) {
+			rc = PTR_ERR(chip->adc_tm_dev);
+			if (rc != -EPROBE_DEFER)
+				pr_err("adc_tm property missing\n");
+			return rc;
+		}
+	}
+
+	i2c_set_clientdata(client, chip);
+
+	chip->batt_psy_d.name		= "battery";
+	chip->batt_psy_d.type		= POWER_SUPPLY_TYPE_BATTERY;
+	chip->batt_psy_d.get_property	= smb1351_battery_get_property;
+	chip->batt_psy_d.set_property	= smb1351_battery_set_property;
+	chip->batt_psy_d.property_is_writeable =
+					smb1351_batt_property_is_writeable;
+	chip->batt_psy_d.properties	= smb1351_battery_properties;
+	chip->batt_psy_d.num_properties	=
+				ARRAY_SIZE(smb1351_battery_properties);
+	chip->batt_psy_d.external_power_changed =
+					smb1351_external_power_changed;
+
+	chip->resume_completed = true;
+	mutex_init(&chip->irq_complete);
+
+	batt_psy_cfg.drv_data = chip;
+	batt_psy_cfg.supplied_to = pm_batt_supplied_to;
+	batt_psy_cfg.num_supplicants = ARRAY_SIZE(pm_batt_supplied_to);
+	chip->batt_psy = devm_power_supply_register(chip->dev,
+			&chip->batt_psy_d,
+			&batt_psy_cfg);
+	if (IS_ERR(chip->batt_psy)) {
+		pr_err("Couldn't register batt psy rc=%ld\n",
+				PTR_ERR(chip->batt_psy));
+		return rc;
+	}
+
+	dump_regs(chip);
+
+	rc = smb1351_regulator_init(chip);
+	if (rc) {
+		pr_err("Couldn't initialize smb1351 ragulator rc=%d\n", rc);
+		goto fail_smb1351_regulator_init;
+	}
+
+	rc = smb1351_hw_init(chip);
+	if (rc) {
+		pr_err("Couldn't initialize hardware rc=%d\n", rc);
+		goto fail_smb1351_hw_init;
+	}
+
+	rc = smb1351_determine_initial_state(chip);
+	if (rc) {
+		pr_err("Couldn't determine initial state rc=%d\n", rc);
+		goto fail_smb1351_hw_init;
+	}
+
+	/* STAT irq configuration */
+	if (client->irq) {
+		rc = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+				smb1351_chg_stat_handler,
+				IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+				"smb1351_chg_stat_irq", chip);
+		if (rc) {
+			pr_err("Failed STAT irq=%d request rc = %d\n",
+				client->irq, rc);
+			goto fail_smb1351_hw_init;
+		}
+		enable_irq_wake(client->irq);
+	}
+
+	if (chip->using_pmic_therm) {
+		if (!chip->jeita_supported) {
+			/* add hot/cold temperature monitor */
+			chip->adc_param.low_temp = chip->batt_cold_decidegc;
+			chip->adc_param.high_temp = chip->batt_hot_decidegc;
+		} else {
+			chip->adc_param.low_temp = chip->batt_cool_decidegc;
+			chip->adc_param.high_temp = chip->batt_warm_decidegc;
+		}
+		chip->adc_param.timer_interval = ADC_MEAS1_INTERVAL_500MS;
+		chip->adc_param.state_request = ADC_TM_WARM_COOL_THR_ENABLE;
+		chip->adc_param.btm_ctx = chip;
+		chip->adc_param.threshold_notification =
+				smb1351_chg_adc_notification;
+		chip->adc_param.channel = LR_MUX1_BATT_THERM;
+
+		rc = qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
+							&chip->adc_param);
+		if (rc) {
+			pr_err("requesting ADC error %d\n", rc);
+			goto fail_smb1351_hw_init;
+		}
+	}
+
+	create_debugfs_entries(chip);
+
+	dump_regs(chip);
+
+	pr_info("smb1351 successfully probed. charger=%d, batt=%d version=%s\n",
+			chip->chg_present,
+			smb1351_get_prop_batt_present(chip),
+			smb1351_version_str[chip->version]);
+	return 0;
+
+fail_smb1351_hw_init:
+	regulator_unregister(chip->otg_vreg.rdev);
+fail_smb1351_regulator_init:
+	return rc;
+}
+
+static int smb1351_parallel_charger_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	int rc;
+	struct smb1351_charger *chip;
+	struct device_node *node = client->dev.of_node;
+	struct power_supply_config parallel_psy_cfg = {};
+
+	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->client = client;
+	chip->dev = &client->dev;
+	chip->parallel_charger = true;
+	chip->parallel_charger_suspended = true;
+
+	chip->usb_suspended_status = of_property_read_bool(node,
+					"qcom,charging-disabled");
+	rc = of_property_read_u32(node, "qcom,float-voltage-mv",
+						&chip->vfloat_mv);
+	if (rc)
+		chip->vfloat_mv = -EINVAL;
+	rc = of_property_read_u32(node, "qcom,recharge-mv",
+						&chip->recharge_mv);
+	if (rc)
+		chip->recharge_mv = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,parallel-en-pin-polarity",
+					&chip->parallel_pin_polarity_setting);
+	if (rc)
+		chip->parallel_pin_polarity_setting = EN_BY_PIN_LOW_ENABLE;
+	else
+		chip->parallel_pin_polarity_setting =
+				chip->parallel_pin_polarity_setting ?
+				EN_BY_PIN_HIGH_ENABLE : EN_BY_PIN_LOW_ENABLE;
+
+	if (of_property_read_bool(node,
+				"qcom,parallel-external-current-sense"))
+		chip->parallel_mode = POWER_SUPPLY_PL_USBIN_USBIN_EXT;
+	else
+		chip->parallel_mode = POWER_SUPPLY_PL_USBIN_USBIN;
+
+	i2c_set_clientdata(client, chip);
+
+	chip->parallel_psy_d.name = "parallel";
+	chip->parallel_psy_d.type = POWER_SUPPLY_TYPE_PARALLEL;
+	chip->parallel_psy_d.get_property = smb1351_parallel_get_property;
+	chip->parallel_psy_d.set_property = smb1351_parallel_set_property;
+	chip->parallel_psy_d.properties	= smb1351_parallel_properties;
+	chip->parallel_psy_d.property_is_writeable
+				= smb1351_parallel_is_writeable;
+	chip->parallel_psy_d.num_properties
+				= ARRAY_SIZE(smb1351_parallel_properties);
+
+	parallel_psy_cfg.drv_data = chip;
+	parallel_psy_cfg.num_supplicants = 0;
+	chip->parallel_psy = devm_power_supply_register(chip->dev,
+			&chip->parallel_psy_d,
+			&parallel_psy_cfg);
+	if (IS_ERR(chip->parallel_psy)) {
+		pr_err("Couldn't register parallel psy rc=%ld\n",
+				PTR_ERR(chip->parallel_psy));
+		return rc;
+	}
+
+	chip->resume_completed = true;
+	mutex_init(&chip->irq_complete);
+
+	create_debugfs_entries(chip);
+
+	pr_info("smb1351 parallel successfully probed.\n");
+
+	return 0;
+}
+
+static int smb1351_charger_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	if (is_parallel_charger(client))
+		return smb1351_parallel_charger_probe(client, id);
+	else
+		return smb1351_main_charger_probe(client, id);
+}
+
+static int smb1351_charger_remove(struct i2c_client *client)
+{
+	struct smb1351_charger *chip = i2c_get_clientdata(client);
+
+	cancel_delayed_work_sync(&chip->chg_remove_work);
+
+	mutex_destroy(&chip->irq_complete);
+	debugfs_remove_recursive(chip->debug_root);
+	return 0;
+}
+
+static int smb1351_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smb1351_charger *chip = i2c_get_clientdata(client);
+
+	/* no suspend resume activities for parallel charger */
+	if (chip->parallel_charger)
+		return 0;
+
+	mutex_lock(&chip->irq_complete);
+	chip->resume_completed = false;
+	mutex_unlock(&chip->irq_complete);
+
+	return 0;
+}
+
+static int smb1351_suspend_noirq(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smb1351_charger *chip = i2c_get_clientdata(client);
+
+	/* no suspend resume activities for parallel charger */
+	if (chip->parallel_charger)
+		return 0;
+
+	if (chip->irq_waiting) {
+		pr_err_ratelimited("Aborting suspend, an interrupt was detected while suspending\n");
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static int smb1351_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smb1351_charger *chip = i2c_get_clientdata(client);
+
+	/* no suspend resume activities for parallel charger */
+	if (chip->parallel_charger)
+		return 0;
+
+	mutex_lock(&chip->irq_complete);
+	chip->resume_completed = true;
+	if (chip->irq_waiting) {
+		mutex_unlock(&chip->irq_complete);
+		smb1351_chg_stat_handler(client->irq, chip);
+		enable_irq(client->irq);
+	} else {
+		mutex_unlock(&chip->irq_complete);
+	}
+	return 0;
+}
+
+static const struct dev_pm_ops smb1351_pm_ops = {
+	.suspend	= smb1351_suspend,
+	.suspend_noirq	= smb1351_suspend_noirq,
+	.resume		= smb1351_resume,
+};
+
+static const struct of_device_id smb1351_match_table[] = {
+	{ .compatible = "qcom,smb1351-charger",},
+	{ },
+};
+
+static const struct i2c_device_id smb1351_charger_id[] = {
+	{"smb1351-charger", 0},
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, smb1351_charger_id);
+
+static struct i2c_driver smb1351_charger_driver = {
+	.driver		= {
+		.name		= "smb1351-charger",
+		.owner		= THIS_MODULE,
+		.of_match_table	= smb1351_match_table,
+		.pm		= &smb1351_pm_ops,
+	},
+	.probe		= smb1351_charger_probe,
+	.remove		= smb1351_charger_remove,
+	.id_table	= smb1351_charger_id,
+};
+
+module_i2c_driver(smb1351_charger_driver);
+
+MODULE_DESCRIPTION("smb1351 Charger");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("i2c:smb1351-charger");
diff --git a/drivers/power/supply/qcom/smb135x-charger.c b/drivers/power/supply/qcom/smb135x-charger.c
new file mode 100644
index 0000000..803dd6e
--- /dev/null
+++ b/drivers/power/supply/qcom/smb135x-charger.c
@@ -0,0 +1,4578 @@
+/* Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/i2c.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/power_supply.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
+#include <linux/pinctrl/consumer.h>
+
+#define SMB135X_BITS_PER_REG	8
+
+/* Mask/Bit helpers */
+#define _SMB135X_MASK(BITS, POS) \
+	((unsigned char)(((1 << (BITS)) - 1) << (POS)))
+#define SMB135X_MASK(LEFT_BIT_POS, RIGHT_BIT_POS) \
+		_SMB135X_MASK((LEFT_BIT_POS) - (RIGHT_BIT_POS) + 1, \
+				(RIGHT_BIT_POS))
+
+/* Config registers */
+#define CFG_3_REG			0x03
+#define CHG_ITERM_50MA			0x08
+#define CHG_ITERM_100MA			0x10
+#define CHG_ITERM_150MA			0x18
+#define CHG_ITERM_200MA			0x20
+#define CHG_ITERM_250MA			0x28
+#define CHG_ITERM_300MA			0x00
+#define CHG_ITERM_500MA			0x30
+#define CHG_ITERM_600MA			0x38
+#define CHG_ITERM_MASK			SMB135X_MASK(5, 3)
+
+#define CFG_4_REG			0x04
+#define CHG_INHIBIT_MASK		SMB135X_MASK(7, 6)
+#define CHG_INHIBIT_50MV_VAL		0x00
+#define CHG_INHIBIT_100MV_VAL		0x40
+#define CHG_INHIBIT_200MV_VAL		0x80
+#define CHG_INHIBIT_300MV_VAL		0xC0
+
+#define CFG_5_REG			0x05
+#define RECHARGE_200MV_BIT		BIT(2)
+#define USB_2_3_BIT			BIT(5)
+
+#define CFG_A_REG			0x0A
+#define DCIN_INPUT_MASK			SMB135X_MASK(4, 0)
+
+#define CFG_C_REG			0x0C
+#define USBIN_INPUT_MASK		SMB135X_MASK(4, 0)
+#define USBIN_ADAPTER_ALLOWANCE_MASK	SMB135X_MASK(7, 5)
+#define ALLOW_5V_ONLY			0x00
+#define ALLOW_5V_OR_9V			0x20
+#define ALLOW_5V_TO_9V			0x40
+#define ALLOW_9V_ONLY			0x60
+
+#define CFG_D_REG			0x0D
+
+#define CFG_E_REG			0x0E
+#define POLARITY_100_500_BIT		BIT(2)
+#define USB_CTRL_BY_PIN_BIT		BIT(1)
+#define HVDCP_5_9_BIT			BIT(4)
+
+#define CFG_11_REG			0x11
+#define PRIORITY_BIT			BIT(7)
+#define AUTO_SRC_DET_EN_BIT			BIT(0)
+
+#define USBIN_DCIN_CFG_REG		0x12
+#define USBIN_SUSPEND_VIA_COMMAND_BIT	BIT(6)
+
+#define CFG_14_REG			0x14
+#define CHG_EN_BY_PIN_BIT		BIT(7)
+#define CHG_EN_ACTIVE_LOW_BIT		BIT(6)
+#define CHG_EN_ACTIVE_HIGH_BIT		0x0
+#define PRE_TO_FAST_REQ_CMD_BIT		BIT(5)
+#define DISABLE_CURRENT_TERM_BIT	BIT(3)
+#define DISABLE_AUTO_RECHARGE_BIT	BIT(2)
+#define EN_CHG_INHIBIT_BIT		BIT(0)
+
+#define CFG_16_REG			0x16
+#define SAFETY_TIME_EN_BIT		BIT(5)
+#define SAFETY_TIME_EN_SHIFT		5
+#define SAFETY_TIME_MINUTES_MASK	SMB135X_MASK(3, 2)
+#define SAFETY_TIME_MINUTES_SHIFT	2
+
+#define CFG_17_REG			0x17
+#define CHG_STAT_DISABLE_BIT		BIT(0)
+#define CHG_STAT_ACTIVE_HIGH_BIT	BIT(1)
+#define CHG_STAT_IRQ_ONLY_BIT		BIT(4)
+
+#define CFG_19_REG			0x19
+#define BATT_MISSING_ALGO_BIT		BIT(2)
+#define BATT_MISSING_THERM_BIT		BIT(1)
+
+#define CFG_1A_REG			0x1A
+#define HOT_SOFT_VFLOAT_COMP_EN_BIT	BIT(3)
+#define COLD_SOFT_VFLOAT_COMP_EN_BIT	BIT(2)
+#define HOT_SOFT_CURRENT_COMP_EN_BIT	BIT(1)
+#define COLD_SOFT_CURRENT_COMP_EN_BIT	BIT(0)
+
+#define CFG_1B_REG			0x1B
+#define COLD_HARD_MASK			SMB135X_MASK(7, 6)
+#define COLD_HARD_SHIFT			6
+#define HOT_HARD_MASK			SMB135X_MASK(5, 4)
+#define HOT_HARD_SHIFT			4
+#define COLD_SOFT_MASK			SMB135X_MASK(3, 2)
+#define COLD_SOFT_SHIFT			2
+#define HOT_SOFT_MASK			SMB135X_MASK(1, 0)
+#define HOT_SOFT_SHIFT			0
+
+#define VFLOAT_REG			0x1E
+
+#define VERSION1_REG			0x2A
+#define VERSION1_MASK			SMB135X_MASK(7,	6)
+#define VERSION1_SHIFT			6
+#define VERSION2_REG			0x32
+#define VERSION2_MASK			SMB135X_MASK(1,	0)
+#define VERSION3_REG			0x34
+
+/* Irq Config registers */
+#define IRQ_CFG_REG			0x07
+#define IRQ_BAT_HOT_COLD_HARD_BIT	BIT(7)
+#define IRQ_BAT_HOT_COLD_SOFT_BIT	BIT(6)
+#define IRQ_OTG_OVER_CURRENT_BIT	BIT(4)
+#define IRQ_USBIN_UV_BIT		BIT(2)
+#define IRQ_INTERNAL_TEMPERATURE_BIT	BIT(0)
+
+#define IRQ2_CFG_REG			0x08
+#define IRQ2_SAFETY_TIMER_BIT		BIT(7)
+#define IRQ2_CHG_ERR_BIT		BIT(6)
+#define IRQ2_CHG_PHASE_CHANGE_BIT	BIT(4)
+#define IRQ2_CHG_INHIBIT_BIT		BIT(3)
+#define IRQ2_POWER_OK_BIT		BIT(2)
+#define IRQ2_BATT_MISSING_BIT		BIT(1)
+#define IRQ2_VBAT_LOW_BIT		BIT(0)
+
+#define IRQ3_CFG_REG			0x09
+#define IRQ3_RID_DETECT_BIT		BIT(4)
+#define IRQ3_SRC_DETECT_BIT		BIT(2)
+#define IRQ3_DCIN_UV_BIT		BIT(0)
+
+#define USBIN_OTG_REG			0x0F
+#define OTG_CNFG_MASK			SMB135X_MASK(3,	2)
+#define OTG_CNFG_PIN_CTRL		0x04
+#define OTG_CNFG_COMMAND_CTRL		0x08
+#define OTG_CNFG_AUTO_CTRL		0x0C
+
+/* Command Registers */
+#define CMD_I2C_REG			0x40
+#define ALLOW_VOLATILE_BIT		BIT(6)
+
+#define CMD_INPUT_LIMIT			0x41
+#define USB_SHUTDOWN_BIT		BIT(6)
+#define DC_SHUTDOWN_BIT			BIT(5)
+#define USE_REGISTER_FOR_CURRENT	BIT(2)
+#define USB_100_500_AC_MASK		SMB135X_MASK(1, 0)
+#define USB_100_VAL			0x02
+#define USB_500_VAL			0x00
+#define USB_AC_VAL			0x01
+
+#define CMD_CHG_REG			0x42
+#define CMD_CHG_EN			BIT(1)
+#define OTG_EN				BIT(0)
+
+/* Status registers */
+#define STATUS_1_REG			0x47
+#define USING_USB_BIT			BIT(1)
+#define USING_DC_BIT			BIT(0)
+
+#define STATUS_2_REG			0x48
+#define HARD_LIMIT_STS_BIT		BIT(6)
+
+#define STATUS_4_REG			0x4A
+#define BATT_NET_CHG_CURRENT_BIT	BIT(7)
+#define BATT_LESS_THAN_2V		BIT(4)
+#define CHG_HOLD_OFF_BIT		BIT(3)
+#define CHG_TYPE_MASK			SMB135X_MASK(2, 1)
+#define CHG_TYPE_SHIFT			1
+#define BATT_NOT_CHG_VAL		0x0
+#define BATT_PRE_CHG_VAL		0x1
+#define BATT_FAST_CHG_VAL		0x2
+#define BATT_TAPER_CHG_VAL		0x3
+#define CHG_EN_BIT			BIT(0)
+
+#define STATUS_5_REG			0x4B
+#define CDP_BIT				BIT(7)
+#define DCP_BIT				BIT(6)
+#define OTHER_BIT			BIT(5)
+#define SDP_BIT				BIT(4)
+#define ACA_A_BIT			BIT(3)
+#define ACA_B_BIT			BIT(2)
+#define ACA_C_BIT			BIT(1)
+#define ACA_DOCK_BIT			BIT(0)
+
+#define STATUS_6_REG			0x4C
+#define RID_FLOAT_BIT			BIT(3)
+#define RID_A_BIT			BIT(2)
+#define RID_B_BIT			BIT(1)
+#define RID_C_BIT			BIT(0)
+
+#define STATUS_7_REG			0x4D
+
+#define STATUS_8_REG			0x4E
+#define USBIN_9V			BIT(5)
+#define USBIN_UNREG			BIT(4)
+#define USBIN_LV			BIT(3)
+#define DCIN_9V				BIT(2)
+#define DCIN_UNREG			BIT(1)
+#define DCIN_LV				BIT(0)
+
+#define STATUS_9_REG			0x4F
+#define REV_MASK			SMB135X_MASK(3, 0)
+
+/* Irq Status registers */
+#define IRQ_A_REG			0x50
+#define IRQ_A_HOT_HARD_BIT		BIT(6)
+#define IRQ_A_COLD_HARD_BIT		BIT(4)
+#define IRQ_A_HOT_SOFT_BIT		BIT(2)
+#define IRQ_A_COLD_SOFT_BIT		BIT(0)
+
+#define IRQ_B_REG			0x51
+#define IRQ_B_BATT_TERMINAL_BIT		BIT(6)
+#define IRQ_B_BATT_MISSING_BIT		BIT(4)
+#define IRQ_B_VBAT_LOW_BIT		BIT(2)
+#define IRQ_B_TEMPERATURE_BIT		BIT(0)
+
+#define IRQ_C_REG			0x52
+#define IRQ_C_TERM_BIT			BIT(0)
+#define IRQ_C_FASTCHG_BIT		BIT(6)
+
+#define IRQ_D_REG			0x53
+#define IRQ_D_TIMEOUT_BIT		BIT(2)
+
+#define IRQ_E_REG			0x54
+#define IRQ_E_DC_OV_BIT			BIT(6)
+#define IRQ_E_DC_UV_BIT			BIT(4)
+#define IRQ_E_USB_OV_BIT		BIT(2)
+#define IRQ_E_USB_UV_BIT		BIT(0)
+
+#define IRQ_F_REG			0x55
+#define IRQ_F_POWER_OK_BIT		BIT(0)
+
+#define IRQ_G_REG			0x56
+#define IRQ_G_SRC_DETECT_BIT		BIT(6)
+
+enum {
+	WRKARND_USB100_BIT = BIT(0),
+	WRKARND_APSD_FAIL = BIT(1),
+};
+
+enum {
+	REV_1 = 1,	/* Rev 1.0 */
+	REV_1_1 = 2,	/* Rev 1.1 */
+	REV_2 = 3,		/* Rev 2 */
+	REV_2_1 = 5,	/* Rev 2.1 */
+	REV_MAX,
+};
+
+static char *revision_str[] = {
+	[REV_1] = "rev1",
+	[REV_1_1] = "rev1.1",
+	[REV_2] = "rev2",
+	[REV_2_1] = "rev2.1",
+};
+
+enum {
+	V_SMB1356,
+	V_SMB1357,
+	V_SMB1358,
+	V_SMB1359,
+	V_MAX,
+};
+
+static int version_data[] = {
+	[V_SMB1356] = V_SMB1356,
+	[V_SMB1357] = V_SMB1357,
+	[V_SMB1358] = V_SMB1358,
+	[V_SMB1359] = V_SMB1359,
+};
+
+static char *version_str[] = {
+	[V_SMB1356] = "smb1356",
+	[V_SMB1357] = "smb1357",
+	[V_SMB1358] = "smb1358",
+	[V_SMB1359] = "smb1359",
+};
+
+enum {
+	USER = BIT(0),
+	THERMAL = BIT(1),
+	CURRENT = BIT(2),
+};
+
+enum path_type {
+	USB,
+	DC,
+};
+
+static int chg_time[] = {
+	192,
+	384,
+	768,
+	1536,
+};
+
+static char *pm_batt_supplied_to[] = {
+	"bms",
+};
+
+struct smb135x_regulator {
+	struct regulator_desc	rdesc;
+	struct regulator_dev	*rdev;
+};
+
+struct smb135x_chg {
+	struct i2c_client		*client;
+	struct device			*dev;
+	struct mutex			read_write_lock;
+
+	u8				revision;
+	int				version;
+
+	bool				chg_enabled;
+	bool				chg_disabled_permanently;
+
+	bool				usb_present;
+	bool				dc_present;
+	bool				usb_slave_present;
+	bool				dc_ov;
+
+	bool				bmd_algo_disabled;
+	bool				iterm_disabled;
+	int				iterm_ma;
+	int				vfloat_mv;
+	int				safety_time;
+	int				resume_delta_mv;
+	int				fake_battery_soc;
+	struct dentry			*debug_root;
+	int				usb_current_arr_size;
+	int				*usb_current_table;
+	int				dc_current_arr_size;
+	int				*dc_current_table;
+	bool				inhibit_disabled;
+	int				fastchg_current_arr_size;
+	int				*fastchg_current_table;
+	int				fastchg_ma;
+	u8				irq_cfg_mask[3];
+	int				otg_oc_count;
+	struct delayed_work		reset_otg_oc_count_work;
+	struct mutex			otg_oc_count_lock;
+	struct delayed_work		hvdcp_det_work;
+
+	bool				parallel_charger;
+	bool				parallel_charger_present;
+	bool				bms_controlled_charging;
+	u32				parallel_pin_polarity_setting;
+
+	/* psy */
+	struct power_supply		*usb_psy;
+	int				usb_psy_ma;
+	int				real_usb_psy_ma;
+	struct power_supply_desc	batt_psy_d;
+	struct power_supply		*batt_psy;
+	struct power_supply_desc	dc_psy_d;
+	struct power_supply		*dc_psy;
+	struct power_supply_desc	parallel_psy_d;
+	struct power_supply		*parallel_psy;
+	struct power_supply		*bms_psy;
+	int				dc_psy_type;
+	int				dc_psy_ma;
+	const char			*bms_psy_name;
+
+	/* status tracking */
+	bool				chg_done_batt_full;
+	bool				batt_present;
+	bool				batt_hot;
+	bool				batt_cold;
+	bool				batt_warm;
+	bool				batt_cool;
+
+	bool				resume_completed;
+	bool				irq_waiting;
+	u32				usb_suspended;
+	u32				dc_suspended;
+	struct mutex			path_suspend_lock;
+
+	u32				peek_poke_address;
+	struct smb135x_regulator	otg_vreg;
+	int				skip_writes;
+	int				skip_reads;
+	u32				workaround_flags;
+	bool				soft_vfloat_comp_disabled;
+	bool				soft_current_comp_disabled;
+	struct mutex			irq_complete;
+	struct regulator		*therm_bias_vreg;
+	struct regulator		*usb_pullup_vreg;
+	struct delayed_work		wireless_insertion_work;
+
+	unsigned int			thermal_levels;
+	unsigned int			therm_lvl_sel;
+	unsigned int			*thermal_mitigation;
+	unsigned int			gamma_setting_num;
+	unsigned int			*gamma_setting;
+	struct mutex			current_change_lock;
+
+	const char			*pinctrl_state_name;
+	struct pinctrl			*smb_pinctrl;
+
+	bool				apsd_rerun;
+	bool				id_line_not_connected;
+};
+
+#define RETRY_COUNT 5
+int retry_sleep_ms[RETRY_COUNT] = {
+	10, 20, 30, 40, 50
+};
+
+static int __smb135x_read(struct smb135x_chg *chip, int reg,
+				u8 *val)
+{
+	s32 ret;
+	int retry_count = 0;
+
+retry:
+	ret = i2c_smbus_read_byte_data(chip->client, reg);
+	if (ret < 0 && retry_count < RETRY_COUNT) {
+		/* sleep for few ms before retrying */
+		msleep(retry_sleep_ms[retry_count++]);
+		goto retry;
+	}
+	if (ret < 0) {
+		dev_err(chip->dev,
+			"i2c read fail: can't read from %02x: %d\n", reg, ret);
+		return ret;
+	}
+	*val = ret;
+
+	return 0;
+}
+
+static int __smb135x_write(struct smb135x_chg *chip, int reg,
+						u8 val)
+{
+	s32 ret;
+	int retry_count = 0;
+
+retry:
+	ret = i2c_smbus_write_byte_data(chip->client, reg, val);
+	if (ret < 0 && retry_count < RETRY_COUNT) {
+		/* sleep for few ms before retrying */
+		msleep(retry_sleep_ms[retry_count++]);
+		goto retry;
+	}
+	if (ret < 0) {
+		dev_err(chip->dev,
+			"i2c write fail: can't write %02x to %02x: %d\n",
+			val, reg, ret);
+		return ret;
+	}
+	pr_debug("Writing 0x%02x=0x%02x\n", reg, val);
+	return 0;
+}
+
+static int smb135x_read(struct smb135x_chg *chip, int reg,
+				u8 *val)
+{
+	int rc;
+
+	if (chip->skip_reads) {
+		*val = 0;
+		return 0;
+	}
+	mutex_lock(&chip->read_write_lock);
+	pm_stay_awake(chip->dev);
+	rc = __smb135x_read(chip, reg, val);
+	pm_relax(chip->dev);
+	mutex_unlock(&chip->read_write_lock);
+
+	return rc;
+}
+
+static int smb135x_write(struct smb135x_chg *chip, int reg,
+						u8 val)
+{
+	int rc;
+
+	if (chip->skip_writes)
+		return 0;
+
+	mutex_lock(&chip->read_write_lock);
+	pm_stay_awake(chip->dev);
+	rc = __smb135x_write(chip, reg, val);
+	pm_relax(chip->dev);
+	mutex_unlock(&chip->read_write_lock);
+
+	return rc;
+}
+
+static int smb135x_masked_write(struct smb135x_chg *chip, int reg,
+						u8 mask, u8 val)
+{
+	s32 rc;
+	u8 temp;
+
+	if (chip->skip_writes || chip->skip_reads)
+		return 0;
+
+	mutex_lock(&chip->read_write_lock);
+	rc = __smb135x_read(chip, reg, &temp);
+	if (rc < 0) {
+		dev_err(chip->dev, "read failed: reg=%03X, rc=%d\n", reg, rc);
+		goto out;
+	}
+	temp &= ~mask;
+	temp |= val & mask;
+	rc = __smb135x_write(chip, reg, temp);
+	if (rc < 0) {
+		dev_err(chip->dev,
+			"write failed: reg=%03X, rc=%d\n", reg, rc);
+	}
+out:
+	mutex_unlock(&chip->read_write_lock);
+	return rc;
+}
+
+static int read_revision(struct smb135x_chg *chip, u8 *revision)
+{
+	int rc;
+	u8 reg;
+
+	rc = smb135x_read(chip, STATUS_9_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read status 9 rc = %d\n", rc);
+		return rc;
+	}
+	*revision = (reg & REV_MASK);
+	return 0;
+}
+
+static int read_version1(struct smb135x_chg *chip, u8 *version)
+{
+	int rc;
+	u8 reg;
+
+	rc = smb135x_read(chip, VERSION1_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read version 1 rc = %d\n", rc);
+		return rc;
+	}
+	*version = (reg & VERSION1_MASK) >> VERSION1_SHIFT;
+	return 0;
+}
+
+static int read_version2(struct smb135x_chg *chip, u8 *version)
+{
+	int rc;
+	u8 reg;
+
+	rc = smb135x_read(chip, VERSION2_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read version 2 rc = %d\n", rc);
+		return rc;
+	}
+	*version = (reg & VERSION2_MASK);
+	return 0;
+}
+
+static int read_version3(struct smb135x_chg *chip, u8 *version)
+{
+	int rc;
+	u8 reg;
+
+	rc = smb135x_read(chip, VERSION3_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read version 3 rc = %d\n", rc);
+		return rc;
+	}
+	*version = reg;
+	return 0;
+}
+
+#define TRIM_23_REG		0x23
+#define CHECK_USB100_GOOD_BIT	BIT(1)
+static bool is_usb100_broken(struct smb135x_chg *chip)
+{
+	int rc;
+	u8 reg;
+
+	rc = smb135x_read(chip, TRIM_23_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read status 9 rc = %d\n", rc);
+		return rc;
+	}
+	return !!(reg & CHECK_USB100_GOOD_BIT);
+}
+
+static bool is_usb_slave_present(struct smb135x_chg *chip)
+{
+	bool usb_slave_present;
+	u8 reg;
+	int rc;
+
+	if (chip->id_line_not_connected)
+		return false;
+
+	rc = smb135x_read(chip, STATUS_6_REG, &reg);
+	if (rc < 0) {
+		pr_err("Couldn't read stat 6 rc = %d\n", rc);
+		return false;
+	}
+
+	if ((reg & (RID_FLOAT_BIT | RID_A_BIT | RID_B_BIT | RID_C_BIT)) == 0)
+		usb_slave_present = 1;
+	else
+		usb_slave_present = 0;
+
+	pr_debug("stat6= 0x%02x slave_present = %d\n", reg, usb_slave_present);
+	return usb_slave_present;
+}
+
+static char *usb_type_str[] = {
+	"ACA_DOCK",	/* bit 0 */
+	"ACA_C",	/* bit 1 */
+	"ACA_B",	/* bit 2 */
+	"ACA_A",	/* bit 3 */
+	"SDP",		/* bit 4 */
+	"OTHER",	/* bit 5 */
+	"DCP",		/* bit 6 */
+	"CDP",		/* bit 7 */
+	"NONE",		/* bit 8  error case */
+};
+
+/* helper to return the string of USB type */
+static char *get_usb_type_name(u8 stat_5)
+{
+	unsigned long stat = stat_5;
+
+	return usb_type_str[find_first_bit(&stat, SMB135X_BITS_PER_REG)];
+}
+
+static enum power_supply_type usb_type_enum[] = {
+	POWER_SUPPLY_TYPE_USB_ACA,	/* bit 0 */
+	POWER_SUPPLY_TYPE_USB_ACA,	/* bit 1 */
+	POWER_SUPPLY_TYPE_USB_ACA,	/* bit 2 */
+	POWER_SUPPLY_TYPE_USB_ACA,	/* bit 3 */
+	POWER_SUPPLY_TYPE_USB,		/* bit 4 */
+	POWER_SUPPLY_TYPE_UNKNOWN,	/* bit 5 */
+	POWER_SUPPLY_TYPE_USB_DCP,	/* bit 6 */
+	POWER_SUPPLY_TYPE_USB_CDP,	/* bit 7 */
+	POWER_SUPPLY_TYPE_UNKNOWN,	/* bit 8 error case, report UNKNWON */
+};
+
+/* helper to return enum power_supply_type of USB type */
+static enum power_supply_type get_usb_supply_type(u8 stat_5)
+{
+	unsigned long stat = stat_5;
+
+	return usb_type_enum[find_first_bit(&stat, SMB135X_BITS_PER_REG)];
+}
+
+static enum power_supply_property smb135x_battery_properties[] = {
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
+};
+
+static int smb135x_get_prop_batt_status(struct smb135x_chg *chip)
+{
+	int rc;
+	int status = POWER_SUPPLY_STATUS_DISCHARGING;
+	u8 reg = 0;
+	u8 chg_type;
+
+	if (chip->chg_done_batt_full)
+		return POWER_SUPPLY_STATUS_FULL;
+
+	rc = smb135x_read(chip, STATUS_4_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Unable to read STATUS_4_REG rc = %d\n", rc);
+		return POWER_SUPPLY_STATUS_UNKNOWN;
+	}
+
+	if (reg & CHG_HOLD_OFF_BIT) {
+		/*
+		 * when chg hold off happens the battery is
+		 * not charging
+		 */
+		status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+		goto out;
+	}
+
+	chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT;
+
+	if (chg_type == BATT_NOT_CHG_VAL)
+		status = POWER_SUPPLY_STATUS_DISCHARGING;
+	else
+		status = POWER_SUPPLY_STATUS_CHARGING;
+out:
+	pr_debug("STATUS_4_REG=%x\n", reg);
+	return status;
+}
+
+static int smb135x_get_prop_batt_present(struct smb135x_chg *chip)
+{
+	int rc;
+	u8 reg;
+
+	rc = smb135x_read(chip, STATUS_4_REG, &reg);
+	if (rc < 0)
+		return 0;
+
+	/* treat battery gone if less than 2V */
+	if (reg & BATT_LESS_THAN_2V)
+		return 0;
+
+	return chip->batt_present;
+}
+
+static int smb135x_get_prop_charge_type(struct smb135x_chg *chip)
+{
+	int rc;
+	u8 reg;
+	u8 chg_type;
+
+	rc = smb135x_read(chip, STATUS_4_REG, &reg);
+	if (rc < 0)
+		return POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+
+	chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT;
+	if (chg_type == BATT_NOT_CHG_VAL)
+		return POWER_SUPPLY_CHARGE_TYPE_NONE;
+	else if (chg_type == BATT_FAST_CHG_VAL)
+		return POWER_SUPPLY_CHARGE_TYPE_FAST;
+	else if (chg_type == BATT_PRE_CHG_VAL)
+		return POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+	else if (chg_type == BATT_TAPER_CHG_VAL)
+		return POWER_SUPPLY_CHARGE_TYPE_TAPER;
+
+	return POWER_SUPPLY_CHARGE_TYPE_NONE;
+}
+
+#define DEFAULT_BATT_CAPACITY	50
+static int smb135x_get_prop_batt_capacity(struct smb135x_chg *chip)
+{
+	union power_supply_propval ret = {0, };
+
+	if (chip->fake_battery_soc >= 0)
+		return chip->fake_battery_soc;
+	if (chip->bms_psy) {
+		power_supply_get_property(chip->bms_psy,
+				POWER_SUPPLY_PROP_CAPACITY, &ret);
+		return ret.intval;
+	}
+
+	return DEFAULT_BATT_CAPACITY;
+}
+
+static int smb135x_get_prop_batt_health(struct smb135x_chg *chip)
+{
+	union power_supply_propval ret = {0, };
+
+	if (chip->batt_hot)
+		ret.intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+	else if (chip->batt_cold)
+		ret.intval = POWER_SUPPLY_HEALTH_COLD;
+	else if (chip->batt_warm)
+		ret.intval = POWER_SUPPLY_HEALTH_WARM;
+	else if (chip->batt_cool)
+		ret.intval = POWER_SUPPLY_HEALTH_COOL;
+	else
+		ret.intval = POWER_SUPPLY_HEALTH_GOOD;
+
+	return ret.intval;
+}
+
+static int smb135x_enable_volatile_writes(struct smb135x_chg *chip)
+{
+	int rc;
+
+	rc = smb135x_masked_write(chip, CMD_I2C_REG,
+			ALLOW_VOLATILE_BIT, ALLOW_VOLATILE_BIT);
+	if (rc < 0)
+		dev_err(chip->dev,
+			"Couldn't set VOLATILE_W_PERM_BIT rc=%d\n", rc);
+
+	return rc;
+}
+
+static int usb_current_table_smb1356[] = {
+	180,
+	240,
+	270,
+	285,
+	300,
+	330,
+	360,
+	390,
+	420,
+	540,
+	570,
+	600,
+	660,
+	720,
+	840,
+	900,
+	960,
+	1080,
+	1110,
+	1128,
+	1146,
+	1170,
+	1182,
+	1200,
+	1230,
+	1260,
+	1380,
+	1440,
+	1560,
+	1620,
+	1680,
+	1800
+};
+
+static int fastchg_current_table[] = {
+	300,
+	400,
+	450,
+	475,
+	500,
+	550,
+	600,
+	650,
+	700,
+	900,
+	950,
+	1000,
+	1100,
+	1200,
+	1400,
+	2700,
+	1500,
+	1600,
+	1800,
+	1850,
+	1880,
+	1910,
+	2800,
+	1950,
+	1970,
+	2000,
+	2050,
+	2100,
+	2300,
+	2400,
+	2500,
+	3000
+};
+
+static int usb_current_table_smb1357_smb1358[] = {
+	300,
+	400,
+	450,
+	475,
+	500,
+	550,
+	600,
+	650,
+	700,
+	900,
+	950,
+	1000,
+	1100,
+	1200,
+	1400,
+	1450,
+	1500,
+	1600,
+	1800,
+	1850,
+	1880,
+	1910,
+	1930,
+	1950,
+	1970,
+	2000,
+	2050,
+	2100,
+	2300,
+	2400,
+	2500,
+	3000
+};
+
+static int usb_current_table_smb1359[] = {
+	300,
+	400,
+	450,
+	475,
+	500,
+	550,
+	600,
+	650,
+	700,
+	900,
+	950,
+	1000,
+	1100,
+	1200,
+	1400,
+	1450,
+	1500,
+	1600,
+	1800,
+	1850,
+	1880,
+	1910,
+	1930,
+	1950,
+	1970,
+	2000,
+	2050,
+	2100,
+	2300,
+	2400,
+	2500
+};
+
+static int dc_current_table_smb1356[] = {
+	180,
+	240,
+	270,
+	285,
+	300,
+	330,
+	360,
+	390,
+	420,
+	540,
+	570,
+	600,
+	660,
+	720,
+	840,
+	870,
+	900,
+	960,
+	1080,
+	1110,
+	1128,
+	1146,
+	1158,
+	1170,
+	1182,
+	1200,
+};
+
+static int dc_current_table[] = {
+	300,
+	400,
+	450,
+	475,
+	500,
+	550,
+	600,
+	650,
+	700,
+	900,
+	950,
+	1000,
+	1100,
+	1200,
+	1400,
+	1450,
+	1500,
+	1600,
+	1800,
+	1850,
+	1880,
+	1910,
+	1930,
+	1950,
+	1970,
+	2000,
+};
+
+#define CURRENT_100_MA		100
+#define CURRENT_150_MA		150
+#define CURRENT_500_MA		500
+#define CURRENT_900_MA		900
+#define SUSPEND_CURRENT_MA	2
+
+static int __smb135x_usb_suspend(struct smb135x_chg *chip, bool suspend)
+{
+	int rc;
+
+	rc = smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+			USB_SHUTDOWN_BIT, suspend ? USB_SHUTDOWN_BIT : 0);
+	if (rc < 0)
+		dev_err(chip->dev, "Couldn't set cfg 11 rc = %d\n", rc);
+	return rc;
+}
+
+static int __smb135x_dc_suspend(struct smb135x_chg *chip, bool suspend)
+{
+	int rc = 0;
+
+	rc = smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+			DC_SHUTDOWN_BIT, suspend ? DC_SHUTDOWN_BIT : 0);
+	if (rc < 0)
+		dev_err(chip->dev, "Couldn't set cfg 11 rc = %d\n", rc);
+	return rc;
+}
+
+static int smb135x_path_suspend(struct smb135x_chg *chip, enum path_type path,
+						int reason, bool suspend)
+{
+	int rc = 0;
+	int suspended;
+	int *path_suspended;
+	int (*func)(struct smb135x_chg *chip, bool suspend);
+
+	mutex_lock(&chip->path_suspend_lock);
+	if (path == USB) {
+		suspended = chip->usb_suspended;
+		path_suspended = &chip->usb_suspended;
+		func = __smb135x_usb_suspend;
+	} else {
+		suspended = chip->dc_suspended;
+		path_suspended = &chip->dc_suspended;
+		func = __smb135x_dc_suspend;
+	}
+
+	if (suspend == false)
+		suspended &= ~reason;
+	else
+		suspended |= reason;
+
+	if (*path_suspended && !suspended)
+		rc = func(chip, 0);
+	if (!(*path_suspended) && suspended)
+		rc = func(chip, 1);
+
+	if (rc)
+		dev_err(chip->dev, "Couldn't set/unset suspend for %s path rc = %d\n",
+					path == USB ? "usb" : "dc",
+					rc);
+	else
+		*path_suspended = suspended;
+
+	mutex_unlock(&chip->path_suspend_lock);
+	return rc;
+}
+
+static int smb135x_get_usb_chg_current(struct smb135x_chg *chip)
+{
+	if (chip->usb_suspended)
+		return SUSPEND_CURRENT_MA;
+	else
+		return chip->real_usb_psy_ma;
+}
+#define FCC_MASK			SMB135X_MASK(4, 0)
+#define CFG_1C_REG			0x1C
+static int smb135x_get_fastchg_current(struct smb135x_chg *chip)
+{
+	u8 reg;
+	int rc;
+
+	rc = smb135x_read(chip, CFG_1C_REG, &reg);
+	if (rc < 0) {
+		pr_debug("cannot read 1c rc = %d\n", rc);
+		return 0;
+	}
+	reg &= FCC_MASK;
+	if (reg < 0 || chip->fastchg_current_arr_size == 0
+			|| reg > chip->fastchg_current_table[
+				chip->fastchg_current_arr_size - 1]) {
+		dev_err(chip->dev, "Current table out of range\n");
+		return -EINVAL;
+	}
+	return chip->fastchg_current_table[reg];
+}
+
+static int smb135x_set_fastchg_current(struct smb135x_chg *chip,
+							int current_ma)
+{
+	int i, rc, diff, best, best_diff;
+	u8 reg;
+
+	/*
+	 * if there is no array loaded or if the smallest current limit is
+	 * above the requested current, then do nothing
+	 */
+	if (chip->fastchg_current_arr_size == 0) {
+		dev_err(chip->dev, "no table loaded\n");
+		return -EINVAL;
+	} else if ((current_ma - chip->fastchg_current_table[0]) < 0) {
+		dev_err(chip->dev, "invalid current requested\n");
+		return -EINVAL;
+	}
+
+	/* use the closest setting under the requested current */
+	best = 0;
+	best_diff = current_ma - chip->fastchg_current_table[best];
+
+	for (i = 1; i < chip->fastchg_current_arr_size; i++) {
+		diff = current_ma - chip->fastchg_current_table[i];
+		if (diff >= 0 && diff < best_diff) {
+			best_diff = diff;
+			best = i;
+		}
+	}
+	i = best;
+
+	reg = i & FCC_MASK;
+	rc = smb135x_masked_write(chip, CFG_1C_REG, FCC_MASK, reg);
+	if (rc < 0)
+		dev_err(chip->dev, "cannot write to config c rc = %d\n", rc);
+	pr_debug("fastchg current set to %dma\n",
+			chip->fastchg_current_table[i]);
+	return rc;
+}
+
+static int smb135x_set_high_usb_chg_current(struct smb135x_chg *chip,
+							int current_ma)
+{
+	int i, rc;
+	u8 usb_cur_val;
+
+	for (i = chip->usb_current_arr_size - 1; i >= 0; i--) {
+		if (current_ma >= chip->usb_current_table[i])
+			break;
+	}
+	if (i < 0) {
+		dev_err(chip->dev,
+			"Cannot find %dma current_table using %d\n",
+			current_ma, CURRENT_150_MA);
+		rc = smb135x_masked_write(chip, CFG_5_REG,
+						USB_2_3_BIT, USB_2_3_BIT);
+		rc |= smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+				USB_100_500_AC_MASK, USB_100_VAL);
+		if (rc < 0)
+			dev_err(chip->dev, "Couldn't set %dmA rc=%d\n",
+					CURRENT_150_MA, rc);
+		else
+			chip->real_usb_psy_ma = CURRENT_150_MA;
+		return rc;
+	}
+
+	usb_cur_val = i & USBIN_INPUT_MASK;
+	rc = smb135x_masked_write(chip, CFG_C_REG,
+				USBIN_INPUT_MASK, usb_cur_val);
+	if (rc < 0) {
+		dev_err(chip->dev, "cannot write to config c rc = %d\n", rc);
+		return rc;
+	}
+
+	rc = smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+					USB_100_500_AC_MASK, USB_AC_VAL);
+	if (rc < 0)
+		dev_err(chip->dev, "Couldn't write cfg 5 rc = %d\n", rc);
+	else
+		chip->real_usb_psy_ma = chip->usb_current_table[i];
+	return rc;
+}
+
+#define MAX_VERSION			0xF
+#define USB_100_PROBLEM_VERSION		0x2
+/* if APSD results are used
+ *	if SDP is detected it will look at 500mA setting
+ *		if set it will draw 500mA
+ *		if unset it will draw 100mA
+ *	if CDP/DCP it will look at 0x0C setting
+ *		i.e. values in 0x41[1, 0] does not matter
+ */
+static int smb135x_set_usb_chg_current(struct smb135x_chg *chip,
+							int current_ma)
+{
+	int rc;
+
+	pr_debug("USB current_ma = %d\n", current_ma);
+
+	if (chip->workaround_flags & WRKARND_USB100_BIT) {
+		pr_info("USB requested = %dmA using %dmA\n", current_ma,
+						CURRENT_500_MA);
+		current_ma = CURRENT_500_MA;
+	}
+
+	if (current_ma == 0)
+		/* choose the lowest available value of 100mA */
+		current_ma = CURRENT_100_MA;
+
+	if (current_ma == SUSPEND_CURRENT_MA) {
+		/* force suspend bit */
+		rc = smb135x_path_suspend(chip, USB, CURRENT, true);
+		chip->real_usb_psy_ma = SUSPEND_CURRENT_MA;
+		goto out;
+	}
+	if (current_ma < CURRENT_150_MA) {
+		/* force 100mA */
+		rc = smb135x_masked_write(chip, CFG_5_REG, USB_2_3_BIT, 0);
+		rc |= smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+				USB_100_500_AC_MASK, USB_100_VAL);
+		rc |= smb135x_path_suspend(chip, USB, CURRENT, false);
+		chip->real_usb_psy_ma = CURRENT_100_MA;
+		goto out;
+	}
+	/* specific current values */
+	if (current_ma == CURRENT_150_MA) {
+		rc = smb135x_masked_write(chip, CFG_5_REG,
+						USB_2_3_BIT, USB_2_3_BIT);
+		rc |= smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+				USB_100_500_AC_MASK, USB_100_VAL);
+		rc |= smb135x_path_suspend(chip, USB, CURRENT, false);
+		chip->real_usb_psy_ma = CURRENT_150_MA;
+		goto out;
+	}
+	if (current_ma == CURRENT_500_MA) {
+		rc = smb135x_masked_write(chip, CFG_5_REG, USB_2_3_BIT, 0);
+		rc |= smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+				USB_100_500_AC_MASK, USB_500_VAL);
+		rc |= smb135x_path_suspend(chip, USB, CURRENT, false);
+		chip->real_usb_psy_ma = CURRENT_500_MA;
+		goto out;
+	}
+	if (current_ma == CURRENT_900_MA) {
+		rc = smb135x_masked_write(chip, CFG_5_REG,
+						USB_2_3_BIT, USB_2_3_BIT);
+		rc |= smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+				USB_100_500_AC_MASK, USB_500_VAL);
+		rc |= smb135x_path_suspend(chip, USB, CURRENT, false);
+		chip->real_usb_psy_ma = CURRENT_900_MA;
+		goto out;
+	}
+
+	rc = smb135x_set_high_usb_chg_current(chip, current_ma);
+	rc |= smb135x_path_suspend(chip, USB, CURRENT, false);
+out:
+	if (rc < 0)
+		dev_err(chip->dev,
+			"Couldn't set %dmA rc = %d\n", current_ma, rc);
+	return rc;
+}
+
+static int smb135x_set_dc_chg_current(struct smb135x_chg *chip,
+							int current_ma)
+{
+	int i, rc;
+	u8 dc_cur_val;
+
+	for (i = chip->dc_current_arr_size - 1; i >= 0; i--) {
+		if (chip->dc_psy_ma >= chip->dc_current_table[i])
+			break;
+	}
+	dc_cur_val = i & DCIN_INPUT_MASK;
+	rc = smb135x_masked_write(chip, CFG_A_REG,
+				DCIN_INPUT_MASK, dc_cur_val);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't set dc charge current rc = %d\n",
+				rc);
+		return rc;
+	}
+	return 0;
+}
+
+static int smb135x_set_appropriate_current(struct smb135x_chg *chip,
+						enum path_type path)
+{
+	int therm_ma, current_ma;
+	int path_current = (path == USB) ? chip->usb_psy_ma : chip->dc_psy_ma;
+	int (*func)(struct smb135x_chg *chip, int current_ma);
+	int rc = 0;
+
+	if (!chip->usb_psy && path == USB)
+		return 0;
+
+	/*
+	 * If battery is absent do not modify the current at all, these
+	 * would be some appropriate values set by the bootloader or default
+	 * configuration and since it is the only source of power we should
+	 * not change it
+	 */
+	if (!chip->batt_present) {
+		pr_debug("ignoring current request since battery is absent\n");
+		return 0;
+	}
+
+	if (path == USB) {
+		path_current = chip->usb_psy_ma;
+		func = smb135x_set_usb_chg_current;
+	} else {
+		path_current = chip->dc_psy_ma;
+		func = smb135x_set_dc_chg_current;
+		if (chip->dc_psy_type == -EINVAL)
+			func = NULL;
+	}
+
+	if (chip->therm_lvl_sel > 0
+			&& chip->therm_lvl_sel < (chip->thermal_levels - 1))
+		/*
+		 * consider thermal limit only when it is active and not at
+		 * the highest level
+		 */
+		therm_ma = chip->thermal_mitigation[chip->therm_lvl_sel];
+	else
+		therm_ma = path_current;
+
+	current_ma = min(therm_ma, path_current);
+	if (func != NULL)
+		rc = func(chip, current_ma);
+	if (rc < 0)
+		dev_err(chip->dev, "Couldn't set %s current to min(%d, %d)rc = %d\n",
+				path == USB ? "usb" : "dc",
+				therm_ma, path_current,
+				rc);
+	return rc;
+}
+
+static int smb135x_charging_enable(struct smb135x_chg *chip, int enable)
+{
+	int rc;
+
+	rc = smb135x_masked_write(chip, CMD_CHG_REG,
+				CMD_CHG_EN, enable ? CMD_CHG_EN : 0);
+	if (rc < 0) {
+		dev_err(chip->dev,
+			"Couldn't set CHG_ENABLE_BIT enable = %d rc = %d\n",
+			enable, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int __smb135x_charging(struct smb135x_chg *chip, int enable)
+{
+	int rc = 0;
+
+	pr_debug("charging enable = %d\n", enable);
+
+	if (chip->chg_disabled_permanently) {
+		pr_debug("charging is disabled permanetly\n");
+		return -EINVAL;
+	}
+
+	rc = smb135x_charging_enable(chip, enable);
+	if (rc < 0) {
+		dev_err(chip->dev,
+			"Couldn't %s charging  rc = %d\n",
+			enable ? "enable" : "disable", rc);
+		return rc;
+	}
+	chip->chg_enabled = enable;
+
+	/* set the suspended status */
+	rc = smb135x_path_suspend(chip, DC, USER, !enable);
+	if (rc < 0) {
+		dev_err(chip->dev,
+			"Couldn't set dc suspend to %d rc = %d\n",
+			enable, rc);
+		return rc;
+	}
+	rc = smb135x_path_suspend(chip, USB, USER, !enable);
+	if (rc < 0) {
+		dev_err(chip->dev,
+			"Couldn't set usb suspend to %d rc = %d\n",
+			enable, rc);
+		return rc;
+	}
+
+	pr_debug("charging %s\n",
+			enable ?  "enabled" : "disabled running from batt");
+	return rc;
+}
+
+static int smb135x_charging(struct smb135x_chg *chip, int enable)
+{
+	int rc = 0;
+
+	pr_debug("charging enable = %d\n", enable);
+
+	__smb135x_charging(chip, enable);
+
+	if (chip->usb_psy) {
+		pr_debug("usb psy changed\n");
+		power_supply_changed(chip->usb_psy);
+	}
+	if (chip->dc_psy_type != -EINVAL) {
+		pr_debug("dc psy changed\n");
+		power_supply_changed(chip->dc_psy);
+	}
+	pr_debug("charging %s\n",
+			enable ?  "enabled" : "disabled running from batt");
+	return rc;
+}
+
+static int smb135x_system_temp_level_set(struct smb135x_chg *chip,
+								int lvl_sel)
+{
+	int rc = 0;
+	int prev_therm_lvl;
+
+	if (!chip->thermal_mitigation) {
+		pr_err("Thermal mitigation not supported\n");
+		return -EINVAL;
+	}
+
+	if (lvl_sel < 0) {
+		pr_err("Unsupported level selected %d\n", lvl_sel);
+		return -EINVAL;
+	}
+
+	if (lvl_sel >= chip->thermal_levels) {
+		pr_err("Unsupported level selected %d forcing %d\n", lvl_sel,
+				chip->thermal_levels - 1);
+		lvl_sel = chip->thermal_levels - 1;
+	}
+
+	if (lvl_sel == chip->therm_lvl_sel)
+		return 0;
+
+	mutex_lock(&chip->current_change_lock);
+	prev_therm_lvl = chip->therm_lvl_sel;
+	chip->therm_lvl_sel = lvl_sel;
+	if (chip->therm_lvl_sel == (chip->thermal_levels - 1)) {
+		/*
+		 * Disable charging if highest value selected by
+		 * setting the DC and USB path in suspend
+		 */
+		rc = smb135x_path_suspend(chip, DC, THERMAL, true);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't set dc suspend rc %d\n", rc);
+			goto out;
+		}
+		rc = smb135x_path_suspend(chip, USB, THERMAL, true);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't set usb suspend rc %d\n", rc);
+			goto out;
+		}
+		goto out;
+	}
+
+	smb135x_set_appropriate_current(chip, USB);
+	smb135x_set_appropriate_current(chip, DC);
+
+	if (prev_therm_lvl == chip->thermal_levels - 1) {
+		/*
+		 * If previously highest value was selected charging must have
+		 * been disabed. Enable charging by taking the DC and USB path
+		 * out of suspend.
+		 */
+		rc = smb135x_path_suspend(chip, DC, THERMAL, false);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't set dc suspend rc %d\n", rc);
+			goto out;
+		}
+		rc = smb135x_path_suspend(chip, USB, THERMAL, false);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't set usb suspend rc %d\n", rc);
+			goto out;
+		}
+	}
+out:
+	mutex_unlock(&chip->current_change_lock);
+	return rc;
+}
+
+static int smb135x_battery_set_property(struct power_supply *psy,
+				       enum power_supply_property prop,
+				       const union power_supply_propval *val)
+{
+	int rc = 0, update_psy = 0;
+	struct smb135x_chg *chip = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_STATUS:
+		if (!chip->bms_controlled_charging) {
+			rc = -EINVAL;
+			break;
+		}
+		switch (val->intval) {
+		case POWER_SUPPLY_STATUS_FULL:
+			rc = smb135x_charging_enable(chip, false);
+			if (rc < 0) {
+				dev_err(chip->dev, "Couldn't disable charging rc = %d\n",
+						rc);
+			} else {
+				chip->chg_done_batt_full = true;
+				update_psy = 1;
+				dev_dbg(chip->dev, "status = FULL chg_done_batt_full = %d",
+						chip->chg_done_batt_full);
+			}
+			break;
+		case POWER_SUPPLY_STATUS_DISCHARGING:
+			chip->chg_done_batt_full = false;
+			update_psy = 1;
+			dev_dbg(chip->dev, "status = DISCHARGING chg_done_batt_full = %d",
+					chip->chg_done_batt_full);
+			break;
+		case POWER_SUPPLY_STATUS_CHARGING:
+			rc = smb135x_charging_enable(chip, true);
+			if (rc < 0) {
+				dev_err(chip->dev, "Couldn't enable charging rc = %d\n",
+						rc);
+			} else {
+				chip->chg_done_batt_full = false;
+				dev_dbg(chip->dev, "status = CHARGING chg_done_batt_full = %d",
+						chip->chg_done_batt_full);
+			}
+			break;
+		default:
+			update_psy = 0;
+			rc = -EINVAL;
+		}
+		break;
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		smb135x_charging(chip, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		chip->fake_battery_soc = val->intval;
+		update_psy = 1;
+		break;
+	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+		smb135x_system_temp_level_set(chip, val->intval);
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	if (!rc && update_psy)
+		power_supply_changed(chip->batt_psy);
+	return rc;
+}
+
+static int smb135x_battery_is_writeable(struct power_supply *psy,
+				       enum power_supply_property prop)
+{
+	int rc;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+	case POWER_SUPPLY_PROP_CAPACITY:
+	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+		rc = 1;
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+	return rc;
+}
+
+static int smb135x_battery_get_property(struct power_supply *psy,
+				       enum power_supply_property prop,
+				       union power_supply_propval *val)
+{
+	struct smb135x_chg *chip = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_STATUS:
+		val->intval = smb135x_get_prop_batt_status(chip);
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = smb135x_get_prop_batt_present(chip);
+		break;
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		val->intval = chip->chg_enabled;
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		val->intval = smb135x_get_prop_charge_type(chip);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		val->intval = smb135x_get_prop_batt_capacity(chip);
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		val->intval = smb135x_get_prop_batt_health(chip);
+		break;
+	case POWER_SUPPLY_PROP_TECHNOLOGY:
+		val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+		break;
+	case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+		val->intval = chip->therm_lvl_sel;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static enum power_supply_property smb135x_dc_properties[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_HEALTH,
+};
+
+static int smb135x_dc_get_property(struct power_supply *psy,
+				       enum power_supply_property prop,
+				       union power_supply_propval *val)
+{
+	struct smb135x_chg *chip = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = chip->dc_present;
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		val->intval = chip->chg_enabled ? chip->dc_present : 0;
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		val->intval = chip->dc_present;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#define MIN_FLOAT_MV	3600
+#define MAX_FLOAT_MV	4500
+
+#define MID_RANGE_FLOAT_MV_MIN		3600
+#define MID_RANGE_FLOAT_MIN_VAL		0x05
+#define MID_RANGE_FLOAT_STEP_MV		20
+
+#define HIGH_RANGE_FLOAT_MIN_MV		4340
+#define HIGH_RANGE_FLOAT_MIN_VAL	0x2A
+#define HIGH_RANGE_FLOAT_STEP_MV	10
+
+#define VHIGH_RANGE_FLOAT_MIN_MV	4400
+#define VHIGH_RANGE_FLOAT_MIN_VAL	0x2E
+#define VHIGH_RANGE_FLOAT_STEP_MV	20
+static int smb135x_float_voltage_set(struct smb135x_chg *chip, int vfloat_mv)
+{
+	u8 temp;
+
+	if ((vfloat_mv < MIN_FLOAT_MV) || (vfloat_mv > MAX_FLOAT_MV)) {
+		dev_err(chip->dev, "bad float voltage mv =%d asked to set\n",
+					vfloat_mv);
+		return -EINVAL;
+	}
+
+	if (vfloat_mv <= HIGH_RANGE_FLOAT_MIN_MV) {
+		/* mid range */
+		temp = MID_RANGE_FLOAT_MIN_VAL
+			+ (vfloat_mv - MID_RANGE_FLOAT_MV_MIN)
+				/ MID_RANGE_FLOAT_STEP_MV;
+	} else if (vfloat_mv < VHIGH_RANGE_FLOAT_MIN_MV) {
+		/* high range */
+		temp = HIGH_RANGE_FLOAT_MIN_VAL
+			+ (vfloat_mv - HIGH_RANGE_FLOAT_MIN_MV)
+				/ HIGH_RANGE_FLOAT_STEP_MV;
+	} else {
+		/* very high range */
+		temp = VHIGH_RANGE_FLOAT_MIN_VAL
+			+ (vfloat_mv - VHIGH_RANGE_FLOAT_MIN_MV)
+				/ VHIGH_RANGE_FLOAT_STEP_MV;
+	}
+
+	return smb135x_write(chip, VFLOAT_REG, temp);
+}
+
+static int smb135x_set_resume_threshold(struct smb135x_chg *chip,
+		int resume_delta_mv)
+{
+	int rc;
+	u8 reg;
+
+	if (!chip->inhibit_disabled) {
+		if (resume_delta_mv < 100)
+			reg = CHG_INHIBIT_50MV_VAL;
+		else if (resume_delta_mv < 200)
+			reg = CHG_INHIBIT_100MV_VAL;
+		else if (resume_delta_mv < 300)
+			reg = CHG_INHIBIT_200MV_VAL;
+		else
+			reg = CHG_INHIBIT_300MV_VAL;
+
+		rc = smb135x_masked_write(chip, CFG_4_REG, CHG_INHIBIT_MASK,
+						reg);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't set inhibit val rc = %d\n",
+						rc);
+			return rc;
+		}
+	}
+
+	if (resume_delta_mv < 200)
+		reg = 0;
+	else
+		reg = RECHARGE_200MV_BIT;
+
+	rc = smb135x_masked_write(chip, CFG_5_REG, RECHARGE_200MV_BIT, reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't set recharge  rc = %d\n", rc);
+		return rc;
+	}
+	return 0;
+}
+
+static enum power_supply_property smb135x_parallel_properties[] = {
+	POWER_SUPPLY_PROP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+};
+
+static bool smb135x_is_input_current_limited(struct smb135x_chg *chip)
+{
+	int rc;
+	u8 reg;
+
+	rc = smb135x_read(chip, STATUS_2_REG, &reg);
+	if (rc) {
+		pr_debug("Couldn't read _REG for ICL status rc = %d\n", rc);
+		return false;
+	}
+
+	return !!(reg & HARD_LIMIT_STS_BIT);
+}
+
+static int smb135x_parallel_set_chg_present(struct smb135x_chg *chip,
+						int present)
+{
+	u8 val;
+	int rc;
+
+	if (present == chip->parallel_charger_present) {
+		pr_debug("present %d -> %d, skipping\n",
+				chip->parallel_charger_present, present);
+		return 0;
+	}
+
+	if (present) {
+		/* Check if SMB135x is present */
+		rc = smb135x_read(chip, VERSION1_REG, &val);
+		if (rc) {
+			pr_debug("Failed to detect smb135x-parallel charger may be absent\n");
+			return -ENODEV;
+		}
+
+		rc = smb135x_enable_volatile_writes(chip);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't configure for volatile rc = %d\n",
+				rc);
+			return rc;
+		}
+
+		/* set the float voltage */
+		if (chip->vfloat_mv != -EINVAL) {
+			rc = smb135x_float_voltage_set(chip, chip->vfloat_mv);
+			if (rc < 0) {
+				dev_err(chip->dev,
+					"Couldn't set float voltage rc = %d\n",
+					rc);
+				return rc;
+			}
+		}
+
+		/* resume threshold */
+		if (chip->resume_delta_mv != -EINVAL) {
+			smb135x_set_resume_threshold(chip,
+					chip->resume_delta_mv);
+		}
+
+		rc = smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+					USE_REGISTER_FOR_CURRENT,
+					USE_REGISTER_FOR_CURRENT);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't set input limit cmd rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* set chg en by pin active low and enable auto recharge */
+		rc = smb135x_masked_write(chip, CFG_14_REG,
+				CHG_EN_BY_PIN_BIT | CHG_EN_ACTIVE_LOW_BIT
+				| DISABLE_AUTO_RECHARGE_BIT,
+				CHG_EN_BY_PIN_BIT |
+				chip->parallel_pin_polarity_setting);
+
+		/* set bit 0 = 100mA bit 1 = 500mA and set register control */
+		rc = smb135x_masked_write(chip, CFG_E_REG,
+				POLARITY_100_500_BIT | USB_CTRL_BY_PIN_BIT,
+				POLARITY_100_500_BIT);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't set usbin cfg rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* control USB suspend via command bits */
+		rc = smb135x_masked_write(chip, USBIN_DCIN_CFG_REG,
+			USBIN_SUSPEND_VIA_COMMAND_BIT,
+			USBIN_SUSPEND_VIA_COMMAND_BIT);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't set cfg rc=%d\n", rc);
+			return rc;
+		}
+
+		/* set the fastchg_current to the lowest setting */
+		if (chip->fastchg_current_arr_size > 0)
+			rc = smb135x_set_fastchg_current(chip,
+					chip->fastchg_current_table[0]);
+
+		/*
+		 * enforce chip->chg_enabled since this could be the first
+		 * time we have i2c access to the charger after
+		 * chip->chg_enabled has been modified
+		 */
+		smb135x_charging(chip, chip->chg_enabled);
+	}
+
+	chip->parallel_charger_present = present;
+	/*
+	 * When present is being set force USB suspend, start charging
+	 * only when CURRENT_MAX is set.
+	 *
+	 * Usually the chip will be shutdown (no i2c access to the chip)
+	 * when USB is removed, however there could be situations when
+	 * it is not.  To cover for USB reinsetions in such situations
+	 * force USB suspend when present is being unset.
+	 * It is likely that i2c access could fail here - do not return error.
+	 * (It is not possible to detect whether the chip is in shutdown state
+	 * or not except for the i2c error).
+	 */
+	chip->usb_psy_ma = SUSPEND_CURRENT_MA;
+	rc = smb135x_path_suspend(chip, USB, CURRENT, true);
+
+	if (present) {
+		if (rc) {
+			dev_err(chip->dev,
+				"Couldn't set usb suspend to true rc = %d\n",
+				rc);
+			return rc;
+		}
+		/* Check if the USB is configured for suspend. If not, do it */
+		mutex_lock(&chip->path_suspend_lock);
+		rc = smb135x_read(chip, CMD_INPUT_LIMIT, &val);
+		if (rc) {
+			dev_err(chip->dev,
+				"Couldn't read 0x%02x rc:%d\n", CMD_INPUT_LIMIT,
+				rc);
+			mutex_unlock(&chip->path_suspend_lock);
+			return rc;
+		} else if (!(val & BIT(6))) {
+			rc = __smb135x_usb_suspend(chip, 1);
+		}
+		mutex_unlock(&chip->path_suspend_lock);
+		if (rc) {
+			dev_err(chip->dev,
+				"Couldn't set usb to suspend rc:%d\n", rc);
+			return rc;
+		}
+	} else {
+		chip->real_usb_psy_ma = SUSPEND_CURRENT_MA;
+	}
+	return 0;
+}
+
+static int smb135x_parallel_set_property(struct power_supply *psy,
+				       enum power_supply_property prop,
+				       const union power_supply_propval *val)
+{
+	int rc = 0;
+	struct smb135x_chg *chip = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		if (chip->parallel_charger_present)
+			smb135x_charging(chip, val->intval);
+		else
+			chip->chg_enabled = val->intval;
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		rc = smb135x_parallel_set_chg_present(chip, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		if (chip->parallel_charger_present) {
+			rc = smb135x_set_fastchg_current(chip,
+						val->intval / 1000);
+		}
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		if (chip->parallel_charger_present) {
+			chip->usb_psy_ma = val->intval / 1000;
+			rc = smb135x_set_usb_chg_current(chip,
+							chip->usb_psy_ma);
+		}
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		if (chip->parallel_charger_present &&
+			(chip->vfloat_mv != val->intval)) {
+			rc = smb135x_float_voltage_set(chip, val->intval);
+			if (!rc)
+				chip->vfloat_mv = val->intval;
+		} else {
+			chip->vfloat_mv = val->intval;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+	return rc;
+}
+
+static int smb135x_parallel_is_writeable(struct power_supply *psy,
+				       enum power_supply_property prop)
+{
+	int rc;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		rc = 1;
+		break;
+	default:
+		rc = 0;
+		break;
+	}
+	return rc;
+}
+static int smb135x_parallel_get_property(struct power_supply *psy,
+				       enum power_supply_property prop,
+				       union power_supply_propval *val)
+{
+	struct smb135x_chg *chip = power_supply_get_drvdata(psy);
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		val->intval = chip->chg_enabled;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		if (chip->parallel_charger_present)
+			val->intval = smb135x_get_usb_chg_current(chip) * 1000;
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		val->intval = chip->vfloat_mv;
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = chip->parallel_charger_present;
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		if (chip->parallel_charger_present)
+			val->intval = smb135x_get_fastchg_current(chip) * 1000;
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_STATUS:
+		if (chip->parallel_charger_present)
+			val->intval = smb135x_get_prop_batt_status(chip);
+		else
+			val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+		if (chip->parallel_charger_present)
+			val->intval = smb135x_is_input_current_limited(chip);
+		else
+			val->intval = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void smb135x_external_power_changed(struct power_supply *psy)
+{
+	struct smb135x_chg *chip = power_supply_get_drvdata(psy);
+	union power_supply_propval prop = {0,};
+	int rc, current_limit = 0;
+
+	if (!chip->usb_psy)
+		return;
+
+	if (chip->bms_psy_name)
+		chip->bms_psy =
+			power_supply_get_by_name((char *)chip->bms_psy_name);
+
+	rc = power_supply_get_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_CURRENT_MAX, &prop);
+	if (rc < 0)
+		dev_err(chip->dev,
+			"could not read USB current_max property, rc=%d\n", rc);
+	else
+		current_limit = prop.intval / 1000;
+
+	pr_debug("current_limit = %d\n", current_limit);
+
+	if (chip->usb_psy_ma != current_limit) {
+		mutex_lock(&chip->current_change_lock);
+		chip->usb_psy_ma = current_limit;
+		rc = smb135x_set_appropriate_current(chip, USB);
+		mutex_unlock(&chip->current_change_lock);
+		if (rc < 0)
+			dev_err(chip->dev, "Couldn't set usb current rc = %d\n",
+					rc);
+	}
+
+	rc = power_supply_get_property(chip->usb_psy,
+			POWER_SUPPLY_PROP_ONLINE, &prop);
+	if (rc < 0)
+		dev_err(chip->dev,
+			"could not read USB ONLINE property, rc=%d\n", rc);
+
+	/* update online property */
+	rc = 0;
+	if (chip->usb_present && chip->chg_enabled && chip->usb_psy_ma != 0) {
+		if (prop.intval == 0) {
+			prop.intval = 1;
+			rc = power_supply_set_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_ONLINE, &prop);
+		}
+	} else {
+		if (prop.intval == 1) {
+			prop.intval = 0;
+			power_supply_set_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_ONLINE, &prop);
+		}
+	}
+	if (rc < 0)
+		dev_err(chip->dev, "could not set usb online, rc=%d\n", rc);
+}
+
+static bool elapsed_msec_greater(struct timeval *start_time,
+				struct timeval *end_time, int ms)
+{
+	int msec_elapsed;
+
+	msec_elapsed = (end_time->tv_sec - start_time->tv_sec) * 1000 +
+		DIV_ROUND_UP(end_time->tv_usec - start_time->tv_usec, 1000);
+
+	return (msec_elapsed > ms);
+}
+
+#define MAX_STEP_MS		10
+static int smb135x_chg_otg_enable(struct smb135x_chg *chip)
+{
+	int rc = 0;
+	int restart_count = 0;
+	struct timeval time_a, time_b, time_c, time_d;
+	u8 reg;
+
+	if (chip->revision == REV_2) {
+		/*
+		 * Workaround for a hardware bug where the OTG needs to be
+		 * enabled disabled and enabled for it to be actually enabled.
+		 * The time between each step should be atmost MAX_STEP_MS
+		 *
+		 * Note that if enable-disable executes within the timeframe
+		 * but the final enable takes more than MAX_STEP_ME, we treat
+		 * it as the first enable and try disabling again. We don't
+		 * want to issue enable back to back.
+		 *
+		 * Notice the instances when time is captured and the
+		 * successive steps.
+		 * timeA-enable-timeC-disable-timeB-enable-timeD.
+		 * When
+		 * (timeB - timeA) < MAX_STEP_MS AND
+		 *			(timeC - timeD) < MAX_STEP_MS
+		 * then it is guaranteed that the successive steps
+		 * must have executed within MAX_STEP_MS
+		 */
+		do_gettimeofday(&time_a);
+restart_from_enable:
+		/* first step - enable otg */
+		rc = smb135x_masked_write(chip, CMD_CHG_REG, OTG_EN, OTG_EN);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't enable OTG mode rc=%d\n",
+					rc);
+			return rc;
+		}
+
+restart_from_disable:
+		/* second step - disable otg */
+		do_gettimeofday(&time_c);
+		rc = smb135x_masked_write(chip, CMD_CHG_REG, OTG_EN, 0);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't enable OTG mode rc=%d\n",
+					rc);
+			return rc;
+		}
+		do_gettimeofday(&time_b);
+
+		if (elapsed_msec_greater(&time_a, &time_b, MAX_STEP_MS)) {
+			restart_count++;
+			if (restart_count > 10) {
+				dev_err(chip->dev,
+						"Couldn't enable OTG restart_count=%d\n",
+						restart_count);
+				return -EAGAIN;
+			}
+			time_a = time_b;
+			pr_debug("restarting from first enable\n");
+			goto restart_from_enable;
+		}
+
+		/* third step (first step in case of a failure) - enable otg */
+		time_a = time_b;
+		rc = smb135x_masked_write(chip, CMD_CHG_REG, OTG_EN, OTG_EN);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't enable OTG mode rc=%d\n",
+					rc);
+			return rc;
+		}
+		do_gettimeofday(&time_d);
+
+		if (elapsed_msec_greater(&time_c, &time_d, MAX_STEP_MS)) {
+			restart_count++;
+			if (restart_count > 10) {
+				dev_err(chip->dev,
+						"Couldn't enable OTG restart_count=%d\n",
+						restart_count);
+				return -EAGAIN;
+			}
+			pr_debug("restarting from disable\n");
+			goto restart_from_disable;
+		}
+	} else {
+		rc = smb135x_read(chip, CMD_CHG_REG, &reg);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't read cmd reg rc=%d\n",
+					rc);
+			return rc;
+		}
+		if (reg & OTG_EN) {
+			/* if it is set, disable it before re-enabling it */
+			rc = smb135x_masked_write(chip, CMD_CHG_REG, OTG_EN, 0);
+			if (rc < 0) {
+				dev_err(chip->dev, "Couldn't disable OTG mode rc=%d\n",
+						rc);
+				return rc;
+			}
+		}
+		rc = smb135x_masked_write(chip, CMD_CHG_REG, OTG_EN, OTG_EN);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't enable OTG mode rc=%d\n",
+					rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int smb135x_chg_otg_regulator_enable(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	struct smb135x_chg *chip = rdev_get_drvdata(rdev);
+
+	chip->otg_oc_count = 0;
+	rc = smb135x_chg_otg_enable(chip);
+	if (rc)
+		dev_err(chip->dev, "Couldn't enable otg regulator rc=%d\n", rc);
+
+	return rc;
+}
+
+static int smb135x_chg_otg_regulator_disable(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	struct smb135x_chg *chip = rdev_get_drvdata(rdev);
+
+	mutex_lock(&chip->otg_oc_count_lock);
+	cancel_delayed_work_sync(&chip->reset_otg_oc_count_work);
+	mutex_unlock(&chip->otg_oc_count_lock);
+	rc = smb135x_masked_write(chip, CMD_CHG_REG, OTG_EN, 0);
+	if (rc < 0)
+		dev_err(chip->dev, "Couldn't disable OTG mode rc=%d\n", rc);
+	return rc;
+}
+
+static int smb135x_chg_otg_regulator_is_enable(struct regulator_dev *rdev)
+{
+	int rc = 0;
+	u8 reg = 0;
+	struct smb135x_chg *chip = rdev_get_drvdata(rdev);
+
+	rc = smb135x_read(chip, CMD_CHG_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev,
+				"Couldn't read OTG enable bit rc=%d\n", rc);
+		return rc;
+	}
+
+	return  (reg & OTG_EN) ? 1 : 0;
+}
+
+struct regulator_ops smb135x_chg_otg_reg_ops = {
+	.enable		= smb135x_chg_otg_regulator_enable,
+	.disable	= smb135x_chg_otg_regulator_disable,
+	.is_enabled	= smb135x_chg_otg_regulator_is_enable,
+};
+
+static int smb135x_set_current_tables(struct smb135x_chg *chip)
+{
+	switch (chip->version) {
+	case V_SMB1356:
+		chip->usb_current_table = usb_current_table_smb1356;
+		chip->usb_current_arr_size
+			= ARRAY_SIZE(usb_current_table_smb1356);
+		chip->dc_current_table = dc_current_table_smb1356;
+		chip->dc_current_arr_size
+			= ARRAY_SIZE(dc_current_table_smb1356);
+		chip->fastchg_current_table = NULL;
+		chip->fastchg_current_arr_size = 0;
+		break;
+	case V_SMB1357:
+		chip->usb_current_table = usb_current_table_smb1357_smb1358;
+		chip->usb_current_arr_size
+			= ARRAY_SIZE(usb_current_table_smb1357_smb1358);
+		chip->dc_current_table = dc_current_table;
+		chip->dc_current_arr_size = ARRAY_SIZE(dc_current_table);
+		chip->fastchg_current_table = fastchg_current_table;
+		chip->fastchg_current_arr_size
+			= ARRAY_SIZE(fastchg_current_table);
+		break;
+	case V_SMB1358:
+		chip->usb_current_table = usb_current_table_smb1357_smb1358;
+		chip->usb_current_arr_size
+			= ARRAY_SIZE(usb_current_table_smb1357_smb1358);
+		chip->dc_current_table = dc_current_table;
+		chip->dc_current_arr_size = ARRAY_SIZE(dc_current_table);
+		chip->fastchg_current_table = fastchg_current_table;
+		chip->fastchg_current_arr_size
+			= ARRAY_SIZE(fastchg_current_table);
+		break;
+	case V_SMB1359:
+		chip->usb_current_table = usb_current_table_smb1359;
+		chip->usb_current_arr_size
+			= ARRAY_SIZE(usb_current_table_smb1359);
+		chip->dc_current_table = dc_current_table;
+		chip->dc_current_arr_size = ARRAY_SIZE(dc_current_table);
+		chip->fastchg_current_table = NULL;
+		chip->fastchg_current_arr_size = 0;
+		break;
+	}
+	return 0;
+}
+
+#define SMB1356_VERSION3_BIT	BIT(7)
+#define SMB1357_VERSION1_VAL	0x01
+#define SMB1358_VERSION1_VAL	0x02
+#define SMB1359_VERSION1_VAL	0x00
+#define SMB1357_VERSION2_VAL	0x01
+#define SMB1358_VERSION2_VAL	0x02
+#define SMB1359_VERSION2_VAL	0x00
+static int smb135x_chip_version_and_revision(struct smb135x_chg *chip)
+{
+	int rc;
+	u8 version1, version2, version3;
+
+	/* read the revision */
+	rc = read_revision(chip, &chip->revision);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read revision rc = %d\n", rc);
+		return rc;
+	}
+
+	if (chip->revision >= REV_MAX || revision_str[chip->revision] == NULL) {
+		dev_err(chip->dev, "Bad revision found = %d\n", chip->revision);
+		return -EINVAL;
+	}
+
+	/* check if it is smb1356 */
+	rc = read_version3(chip, &version3);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read version3 rc = %d\n", rc);
+		return rc;
+	}
+
+	if (version3 & SMB1356_VERSION3_BIT) {
+		chip->version = V_SMB1356;
+		goto wrkarnd_and_input_current_values;
+	}
+
+	/* check if it is smb1357, smb1358 or smb1359 based on revision */
+	if (chip->revision <= REV_1_1) {
+		rc = read_version1(chip, &version1);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't read version 1 rc = %d\n", rc);
+			return rc;
+		}
+		switch (version1) {
+		case SMB1357_VERSION1_VAL:
+			chip->version = V_SMB1357;
+			break;
+		case SMB1358_VERSION1_VAL:
+			chip->version = V_SMB1358;
+			break;
+		case SMB1359_VERSION1_VAL:
+			chip->version = V_SMB1359;
+			break;
+		default:
+			dev_err(chip->dev,
+				"Unknown version 1 = 0x%02x rc = %d\n",
+				version1, rc);
+			return rc;
+		}
+	} else {
+		rc = read_version2(chip, &version2);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't read version 2 rc = %d\n", rc);
+			return rc;
+		}
+		switch (version2) {
+		case SMB1357_VERSION2_VAL:
+			chip->version = V_SMB1357;
+			break;
+		case SMB1358_VERSION2_VAL:
+			chip->version = V_SMB1358;
+			break;
+		case SMB1359_VERSION2_VAL:
+			chip->version = V_SMB1359;
+			break;
+		default:
+			dev_err(chip->dev,
+					"Unknown version 2 = 0x%02x rc = %d\n",
+					version2, rc);
+			return rc;
+		}
+	}
+
+wrkarnd_and_input_current_values:
+	if (is_usb100_broken(chip))
+		chip->workaround_flags |= WRKARND_USB100_BIT;
+	/*
+	 * Rev v1.0 and v1.1 of SMB135x fails charger type detection
+	 * (apsd) due to interference on the D+/- lines by the USB phy.
+	 * Set the workaround flag to disable charger type reporting
+	 * for this revision.
+	 */
+	if (chip->revision <= REV_1_1)
+		chip->workaround_flags |= WRKARND_APSD_FAIL;
+
+	pr_debug("workaround_flags = %x\n", chip->workaround_flags);
+
+	return smb135x_set_current_tables(chip);
+}
+
+static int smb135x_regulator_init(struct smb135x_chg *chip)
+{
+	int rc = 0;
+	struct regulator_config cfg = {};
+
+	chip->otg_vreg.rdesc.owner = THIS_MODULE;
+	chip->otg_vreg.rdesc.type = REGULATOR_VOLTAGE;
+	chip->otg_vreg.rdesc.ops = &smb135x_chg_otg_reg_ops;
+	chip->otg_vreg.rdesc.name = chip->dev->of_node->name;
+	chip->otg_vreg.rdesc.of_match = chip->dev->of_node->name;
+	cfg.dev = chip->dev;
+	cfg.driver_data = chip;
+
+	chip->otg_vreg.rdev = regulator_register(&chip->otg_vreg.rdesc, &cfg);
+	if (IS_ERR(chip->otg_vreg.rdev)) {
+		rc = PTR_ERR(chip->otg_vreg.rdev);
+		chip->otg_vreg.rdev = NULL;
+		if (rc != -EPROBE_DEFER)
+			dev_err(chip->dev,
+				"OTG reg failed, rc=%d\n", rc);
+	}
+
+	return rc;
+}
+
+static void smb135x_regulator_deinit(struct smb135x_chg *chip)
+{
+	if (chip->otg_vreg.rdev)
+		regulator_unregister(chip->otg_vreg.rdev);
+}
+
+static void wireless_insertion_work(struct work_struct *work)
+{
+	struct smb135x_chg *chip =
+		container_of(work, struct smb135x_chg,
+				wireless_insertion_work.work);
+
+	/* unsuspend dc */
+	smb135x_path_suspend(chip, DC, CURRENT, false);
+}
+
+static int hot_hard_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+	chip->batt_hot = !!rt_stat;
+	return 0;
+}
+static int cold_hard_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+	chip->batt_cold = !!rt_stat;
+	return 0;
+}
+static int hot_soft_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+	chip->batt_warm = !!rt_stat;
+	return 0;
+}
+static int cold_soft_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+	chip->batt_cool = !!rt_stat;
+	return 0;
+}
+static int battery_missing_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+	chip->batt_present = !rt_stat;
+	return 0;
+}
+static int vbat_low_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_warn("vbat low\n");
+	return 0;
+}
+static int chg_hot_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_warn("chg hot\n");
+	return 0;
+}
+static int chg_term_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+
+	/*
+	 * This handler gets called even when the charger based termination
+	 * is disabled (due to change in RT status). However, in a bms
+	 * controlled design the battery status should not be updated.
+	 */
+	if (!chip->iterm_disabled)
+		chip->chg_done_batt_full = !!rt_stat;
+	return 0;
+}
+
+static int taper_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+	return 0;
+}
+
+static int fast_chg_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+
+	if (rt_stat & IRQ_C_FASTCHG_BIT)
+		chip->chg_done_batt_full = false;
+
+	return 0;
+}
+
+static int recharge_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	int rc;
+
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+
+	if (chip->bms_controlled_charging) {
+		rc = smb135x_charging_enable(chip, true);
+		if (rc < 0)
+			dev_err(chip->dev, "Couldn't enable charging rc = %d\n",
+					rc);
+	}
+
+	return 0;
+}
+
+static int safety_timeout_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_warn("safety timeout rt_stat = 0x%02x\n", rt_stat);
+	return 0;
+}
+
+/**
+ * power_ok_handler() - called when the switcher turns on or turns off
+ * @chip: pointer to smb135x_chg chip
+ * @rt_stat: the status bit indicating switcher turning on or off
+ */
+static int power_ok_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+	return 0;
+}
+
+static int rid_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	bool usb_slave_present;
+	union power_supply_propval pval = {0, };
+
+	usb_slave_present = is_usb_slave_present(chip);
+
+	if (chip->usb_slave_present ^ usb_slave_present) {
+		chip->usb_slave_present = usb_slave_present;
+		if (chip->usb_psy) {
+			pr_debug("setting usb psy usb_otg = %d\n",
+					chip->usb_slave_present);
+			pval.intval = chip->usb_slave_present;
+			power_supply_set_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_USB_OTG, &pval);
+		}
+	}
+	return 0;
+}
+
+#define RESET_OTG_OC_COUNT_MS	100
+static void reset_otg_oc_count_work(struct work_struct *work)
+{
+	struct smb135x_chg *chip =
+		container_of(work, struct smb135x_chg,
+				reset_otg_oc_count_work.work);
+
+	mutex_lock(&chip->otg_oc_count_lock);
+	pr_debug("It has been %dmS since OverCurrent interrupt resetting the count\n",
+			RESET_OTG_OC_COUNT_MS);
+	chip->otg_oc_count = 0;
+	mutex_unlock(&chip->otg_oc_count_lock);
+}
+
+#define MAX_OTG_RETRY	3
+static int otg_oc_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	int rc;
+
+	mutex_lock(&chip->otg_oc_count_lock);
+	cancel_delayed_work_sync(&chip->reset_otg_oc_count_work);
+	++chip->otg_oc_count;
+	if (chip->otg_oc_count < MAX_OTG_RETRY) {
+		rc = smb135x_chg_otg_enable(chip);
+		if (rc < 0)
+			dev_err(chip->dev, "Couldn't enable  OTG mode rc=%d\n",
+				rc);
+	} else {
+		pr_warn_ratelimited("Tried enabling OTG %d times, the USB slave is nonconformant.\n",
+			chip->otg_oc_count);
+	}
+
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+	schedule_delayed_work(&chip->reset_otg_oc_count_work,
+			msecs_to_jiffies(RESET_OTG_OC_COUNT_MS));
+	mutex_unlock(&chip->otg_oc_count_lock);
+	return 0;
+}
+
+static int handle_dc_removal(struct smb135x_chg *chip)
+{
+	union power_supply_propval prop;
+
+	if (chip->dc_psy_type == POWER_SUPPLY_TYPE_WIRELESS) {
+		cancel_delayed_work_sync(&chip->wireless_insertion_work);
+		smb135x_path_suspend(chip, DC, CURRENT, true);
+	}
+	if (chip->dc_psy_type != -EINVAL) {
+		prop.intval = chip->dc_present;
+		power_supply_set_property(chip->dc_psy,
+				POWER_SUPPLY_PROP_ONLINE, &prop);
+	}
+	return 0;
+}
+
+#define DCIN_UNSUSPEND_DELAY_MS		1000
+static int handle_dc_insertion(struct smb135x_chg *chip)
+{
+	union power_supply_propval prop;
+
+	if (chip->dc_psy_type == POWER_SUPPLY_TYPE_WIRELESS)
+		schedule_delayed_work(&chip->wireless_insertion_work,
+			msecs_to_jiffies(DCIN_UNSUSPEND_DELAY_MS));
+	if (chip->dc_psy_type != -EINVAL) {
+		prop.intval = chip->dc_present;
+		power_supply_set_property(chip->dc_psy,
+				POWER_SUPPLY_PROP_ONLINE, &prop);
+	}
+	return 0;
+}
+/**
+ * dcin_uv_handler() - called when the dc voltage crosses the uv threshold
+ * @chip: pointer to smb135x_chg chip
+ * @rt_stat: the status bit indicating whether dc voltage is uv
+ */
+static int dcin_uv_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	/*
+	 * rt_stat indicates if dc is undervolted. If so dc_present
+	 * should be marked removed
+	 */
+	bool dc_present = !rt_stat;
+
+	pr_debug("chip->dc_present = %d dc_present = %d\n",
+			chip->dc_present, dc_present);
+
+	if (chip->dc_present && !dc_present) {
+		/* dc removed */
+		chip->dc_present = dc_present;
+		handle_dc_removal(chip);
+	}
+
+	if (!chip->dc_present && dc_present) {
+		/* dc inserted */
+		chip->dc_present = dc_present;
+		handle_dc_insertion(chip);
+	}
+
+	return 0;
+}
+
+static int dcin_ov_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	/*
+	 * rt_stat indicates if dc is overvolted. If so dc_present
+	 * should be marked removed
+	 */
+	bool dc_present = !rt_stat;
+
+	pr_debug("chip->dc_present = %d dc_present = %d\n",
+			chip->dc_present, dc_present);
+
+	chip->dc_ov = !!rt_stat;
+
+	if (chip->dc_present && !dc_present) {
+		/* dc removed */
+		chip->dc_present = dc_present;
+		handle_dc_removal(chip);
+	}
+
+	if (!chip->dc_present && dc_present) {
+		/* dc inserted */
+		chip->dc_present = dc_present;
+		handle_dc_insertion(chip);
+	}
+	return 0;
+}
+
+static int handle_usb_removal(struct smb135x_chg *chip)
+{
+	union power_supply_propval pval = {0,};
+
+	if (chip->usb_psy) {
+		cancel_delayed_work_sync(&chip->hvdcp_det_work);
+		pm_relax(chip->dev);
+		pr_debug("setting usb psy type = %d\n",
+				POWER_SUPPLY_TYPE_UNKNOWN);
+		pval.intval = POWER_SUPPLY_TYPE_UNKNOWN;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_TYPE, &pval);
+
+		pr_debug("setting usb psy present = %d\n", chip->usb_present);
+		pval.intval = chip->usb_present;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_PRESENT,
+				&pval);
+
+		pr_debug("Setting usb psy dp=r dm=r\n");
+		pval.intval = POWER_SUPPLY_DP_DM_DPR_DMR;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_DP_DM,
+				&pval);
+	}
+	return 0;
+}
+
+static int rerun_apsd(struct smb135x_chg *chip)
+{
+	int rc;
+
+	pr_debug("Reruning APSD\nDisabling APSD\n");
+	rc = smb135x_masked_write(chip, CFG_11_REG, AUTO_SRC_DET_EN_BIT, 0);
+	if (rc) {
+		dev_err(chip->dev, "Couldn't Disable APSD rc=%d\n", rc);
+		return rc;
+	}
+	pr_debug("Allow only 9V chargers\n");
+	rc = smb135x_masked_write(chip, CFG_C_REG,
+			USBIN_ADAPTER_ALLOWANCE_MASK, ALLOW_9V_ONLY);
+	if (rc)
+		dev_err(chip->dev, "Couldn't Allow 9V rc=%d\n", rc);
+	pr_debug("Enabling APSD\n");
+	rc = smb135x_masked_write(chip, CFG_11_REG, AUTO_SRC_DET_EN_BIT, 1);
+	if (rc)
+		dev_err(chip->dev, "Couldn't Enable APSD rc=%d\n", rc);
+	pr_debug("Allow 5V-9V\n");
+	rc = smb135x_masked_write(chip, CFG_C_REG,
+			USBIN_ADAPTER_ALLOWANCE_MASK, ALLOW_5V_TO_9V);
+	if (rc)
+		dev_err(chip->dev, "Couldn't Allow 5V-9V rc=%d\n", rc);
+	return rc;
+}
+
+static void smb135x_hvdcp_det_work(struct work_struct *work)
+{
+	int rc;
+	u8 reg;
+	struct smb135x_chg *chip = container_of(work, struct smb135x_chg,
+							hvdcp_det_work.work);
+	union power_supply_propval pval = {0,};
+
+	rc = smb135x_read(chip, STATUS_7_REG, &reg);
+	if (rc) {
+		pr_err("Couldn't read STATUS_7_REG rc == %d\n", rc);
+		goto end;
+	}
+	pr_debug("STATUS_7_REG = 0x%02X\n", reg);
+
+	if (reg) {
+		pr_debug("HVDCP detected; notifying USB PSY\n");
+		pval.intval = POWER_SUPPLY_TYPE_USB_HVDCP;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_TYPE, &pval);
+	}
+end:
+	pm_relax(chip->dev);
+}
+
+#define HVDCP_NOTIFY_MS 2500
+static int handle_usb_insertion(struct smb135x_chg *chip)
+{
+	u8 reg;
+	int rc;
+	char *usb_type_name = "null";
+	enum power_supply_type usb_supply_type;
+	union power_supply_propval pval = {0,};
+
+	/* usb inserted */
+	rc = smb135x_read(chip, STATUS_5_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read status 5 rc = %d\n", rc);
+		return rc;
+	}
+	/*
+	 * Report the charger type as UNKNOWN if the
+	 * apsd-fail flag is set. This nofifies the USB driver
+	 * to initiate a s/w based charger type detection.
+	 */
+	if (chip->workaround_flags & WRKARND_APSD_FAIL)
+		reg = 0;
+
+	usb_type_name = get_usb_type_name(reg);
+	usb_supply_type = get_usb_supply_type(reg);
+	pr_debug("inserted %s, usb psy type = %d stat_5 = 0x%02x apsd_rerun = %d\n",
+			usb_type_name, usb_supply_type, reg, chip->apsd_rerun);
+
+	if (chip->batt_present && !chip->apsd_rerun && chip->usb_psy) {
+		if (usb_supply_type == POWER_SUPPLY_TYPE_USB) {
+			pr_debug("Setting usb psy dp=f dm=f SDP and rerun\n");
+			pval.intval = POWER_SUPPLY_DP_DM_DPF_DMF;
+			power_supply_set_property(chip->usb_psy,
+					POWER_SUPPLY_PROP_DP_DM,
+					&pval);
+			chip->apsd_rerun = true;
+			rerun_apsd(chip);
+			/* rising edge of src detect will happen in few mS */
+			return 0;
+		}
+
+		pr_debug("Set usb psy dp=f dm=f DCP and no rerun\n");
+		pval.intval = POWER_SUPPLY_DP_DM_DPF_DMF;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_DP_DM,
+				&pval);
+	}
+
+	if (usb_supply_type == POWER_SUPPLY_TYPE_USB_DCP) {
+		pr_debug("schedule hvdcp detection worker\n");
+		pm_stay_awake(chip->dev);
+		schedule_delayed_work(&chip->hvdcp_det_work,
+					msecs_to_jiffies(HVDCP_NOTIFY_MS));
+	}
+
+	if (chip->usb_psy) {
+		if (chip->bms_controlled_charging) {
+			/* enable charging on USB insertion */
+			rc = smb135x_charging_enable(chip, true);
+			if (rc < 0)
+				dev_err(chip->dev, "Couldn't enable charging rc = %d\n",
+						rc);
+		}
+		pr_debug("setting usb psy type = %d\n", usb_supply_type);
+		pval.intval = usb_supply_type;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_TYPE, &pval);
+
+		pr_debug("setting usb psy present = %d\n", chip->usb_present);
+		pval.intval = chip->usb_present;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_PRESENT,
+				&pval);
+	}
+	chip->apsd_rerun = false;
+	return 0;
+}
+
+/**
+ * usbin_uv_handler()
+ * @chip: pointer to smb135x_chg chip
+ * @rt_stat: the status bit indicating chg insertion/removal
+ */
+static int usbin_uv_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	/*
+	 * rt_stat indicates if usb is undervolted
+	 */
+	bool usb_present = !rt_stat;
+
+	pr_debug("chip->usb_present = %d usb_present = %d\n",
+			chip->usb_present, usb_present);
+
+	return 0;
+}
+
+static int usbin_ov_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	union power_supply_propval pval = {0, };
+	/*
+	 * rt_stat indicates if usb is overvolted. If so usb_present
+	 * should be marked removed
+	 */
+	bool usb_present = !rt_stat;
+
+	pr_debug("chip->usb_present = %d usb_present = %d\n",
+			chip->usb_present, usb_present);
+	if (chip->usb_present && !usb_present) {
+		/* USB removed */
+		chip->usb_present = usb_present;
+		handle_usb_removal(chip);
+	} else if (!chip->usb_present && usb_present) {
+		/* USB inserted */
+		chip->usb_present = usb_present;
+		handle_usb_insertion(chip);
+	}
+
+	if (chip->usb_psy) {
+		pval.intval = rt_stat ? POWER_SUPPLY_HEALTH_OVERVOLTAGE
+					: POWER_SUPPLY_HEALTH_GOOD;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_HEALTH, &pval);
+	}
+
+	return 0;
+}
+
+/**
+ * src_detect_handler() - this is called on rising edge when USB
+ *			charger type is detected and on falling edge when
+ *			USB voltage falls below the coarse detect voltage
+ *			(1V), use it for handling USB charger insertion
+ *			and removal.
+ * @chip: pointer to smb135x_chg chip
+ * @rt_stat: the status bit indicating chg insertion/removal
+ */
+static int src_detect_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	bool usb_present = !!rt_stat;
+
+	pr_debug("chip->usb_present = %d usb_present = %d\n",
+			chip->usb_present, usb_present);
+
+	if (!chip->usb_present && usb_present) {
+		/* USB inserted */
+		chip->usb_present = usb_present;
+		handle_usb_insertion(chip);
+	} else if (usb_present && chip->apsd_rerun) {
+		handle_usb_insertion(chip);
+	} else if (chip->usb_present && !usb_present) {
+		chip->usb_present = !chip->usb_present;
+		handle_usb_removal(chip);
+	}
+
+	return 0;
+}
+
+static int chg_inhibit_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+	/*
+	 * charger is inserted when the battery voltage is high
+	 * so h/w won't start charging just yet. Treat this as
+	 * battery full
+	 */
+	pr_debug("rt_stat = 0x%02x\n", rt_stat);
+
+	if (!chip->inhibit_disabled)
+		chip->chg_done_batt_full = !!rt_stat;
+	return 0;
+}
+
+struct smb_irq_info {
+	const char		*name;
+	int			(*smb_irq)(struct smb135x_chg *chip,
+							u8 rt_stat);
+	int			high;
+	int			low;
+};
+
+struct irq_handler_info {
+	u8			stat_reg;
+	u8			val;
+	u8			prev_val;
+	struct smb_irq_info	irq_info[4];
+};
+
+static struct irq_handler_info handlers[] = {
+	{IRQ_A_REG, 0, 0,
+		{
+			{
+				.name		= "cold_soft",
+				.smb_irq	= cold_soft_handler,
+			},
+			{
+				.name		= "hot_soft",
+				.smb_irq	= hot_soft_handler,
+			},
+			{
+				.name		= "cold_hard",
+				.smb_irq	= cold_hard_handler,
+			},
+			{
+				.name		= "hot_hard",
+				.smb_irq	= hot_hard_handler,
+			},
+		},
+	},
+	{IRQ_B_REG, 0, 0,
+		{
+			{
+				.name		= "chg_hot",
+				.smb_irq	= chg_hot_handler,
+			},
+			{
+				.name		= "vbat_low",
+				.smb_irq	= vbat_low_handler,
+			},
+			{
+				.name		= "battery_missing",
+				.smb_irq	= battery_missing_handler,
+			},
+			{
+				.name		= "battery_missing",
+				.smb_irq	= battery_missing_handler,
+			},
+		},
+	},
+	{IRQ_C_REG, 0, 0,
+		{
+			{
+				.name		= "chg_term",
+				.smb_irq	= chg_term_handler,
+			},
+			{
+				.name		= "taper",
+				.smb_irq	= taper_handler,
+			},
+			{
+				.name		= "recharge",
+				.smb_irq	= recharge_handler,
+			},
+			{
+				.name		= "fast_chg",
+				.smb_irq	= fast_chg_handler,
+			},
+		},
+	},
+	{IRQ_D_REG, 0, 0,
+		{
+			{
+				.name		= "prechg_timeout",
+			},
+			{
+				.name		= "safety_timeout",
+				.smb_irq	= safety_timeout_handler,
+			},
+			{
+				.name		= "aicl_done",
+			},
+			{
+				.name		= "battery_ov",
+			},
+		},
+	},
+	{IRQ_E_REG, 0, 0,
+		{
+			{
+				.name		= "usbin_uv",
+				.smb_irq	= usbin_uv_handler,
+			},
+			{
+				.name		= "usbin_ov",
+				.smb_irq	= usbin_ov_handler,
+			},
+			{
+				.name		= "dcin_uv",
+				.smb_irq	= dcin_uv_handler,
+			},
+			{
+				.name		= "dcin_ov",
+				.smb_irq	= dcin_ov_handler,
+			},
+		},
+	},
+	{IRQ_F_REG, 0, 0,
+		{
+			{
+				.name		= "power_ok",
+				.smb_irq	= power_ok_handler,
+			},
+			{
+				.name		= "rid",
+				.smb_irq	= rid_handler,
+			},
+			{
+				.name		= "otg_fail",
+			},
+			{
+				.name		= "otg_oc",
+				.smb_irq	= otg_oc_handler,
+			},
+		},
+	},
+	{IRQ_G_REG, 0, 0,
+		{
+			{
+				.name		= "chg_inhibit",
+				.smb_irq	= chg_inhibit_handler,
+			},
+			{
+				.name		= "chg_error",
+			},
+			{
+				.name		= "wd_timeout",
+			},
+			{
+				.name		= "src_detect",
+				.smb_irq	= src_detect_handler,
+			},
+		},
+	},
+};
+
+static int smb135x_irq_read(struct smb135x_chg *chip)
+{
+	int rc, i;
+
+	/*
+	 * When dcin path is suspended the irq triggered status is not cleared
+	 * causing a storm. To prevent this situation unsuspend dcin path while
+	 * reading interrupts and restore its status back.
+	 */
+	mutex_lock(&chip->path_suspend_lock);
+
+	if (chip->dc_suspended)
+		__smb135x_dc_suspend(chip, false);
+
+	for (i = 0; i < ARRAY_SIZE(handlers); i++) {
+		rc = smb135x_read(chip, handlers[i].stat_reg,
+						&handlers[i].val);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't read %d rc = %d\n",
+					handlers[i].stat_reg, rc);
+			handlers[i].val = 0;
+			continue;
+		}
+	}
+
+	if (chip->dc_suspended)
+		__smb135x_dc_suspend(chip, true);
+
+	mutex_unlock(&chip->path_suspend_lock);
+
+	return rc;
+}
+#define IRQ_LATCHED_MASK	0x02
+#define IRQ_STATUS_MASK		0x01
+#define BITS_PER_IRQ		2
+static irqreturn_t smb135x_chg_stat_handler(int irq, void *dev_id)
+{
+	struct smb135x_chg *chip = dev_id;
+	int i, j;
+	u8 triggered;
+	u8 changed;
+	u8 rt_stat, prev_rt_stat;
+	int rc;
+	int handler_count = 0;
+
+	mutex_lock(&chip->irq_complete);
+	chip->irq_waiting = true;
+	if (!chip->resume_completed) {
+		dev_dbg(chip->dev, "IRQ triggered before device-resume\n");
+		disable_irq_nosync(irq);
+		mutex_unlock(&chip->irq_complete);
+		return IRQ_HANDLED;
+	}
+	chip->irq_waiting = false;
+
+	smb135x_irq_read(chip);
+	for (i = 0; i < ARRAY_SIZE(handlers); i++) {
+		for (j = 0; j < ARRAY_SIZE(handlers[i].irq_info); j++) {
+			triggered = handlers[i].val
+			       & (IRQ_LATCHED_MASK << (j * BITS_PER_IRQ));
+			rt_stat = handlers[i].val
+				& (IRQ_STATUS_MASK << (j * BITS_PER_IRQ));
+			prev_rt_stat = handlers[i].prev_val
+				& (IRQ_STATUS_MASK << (j * BITS_PER_IRQ));
+			changed = prev_rt_stat ^ rt_stat;
+
+			if (triggered || changed)
+				rt_stat ? handlers[i].irq_info[j].high++ :
+						handlers[i].irq_info[j].low++;
+
+			if ((triggered || changed)
+				&& handlers[i].irq_info[j].smb_irq != NULL) {
+				handler_count++;
+				rc = handlers[i].irq_info[j].smb_irq(chip,
+								rt_stat);
+				if (rc < 0)
+					dev_err(chip->dev,
+						"Couldn't handle %d irq for reg 0x%02x rc = %d\n",
+						j, handlers[i].stat_reg, rc);
+			}
+		}
+		handlers[i].prev_val = handlers[i].val;
+	}
+
+	pr_debug("handler count = %d\n", handler_count);
+	if (handler_count) {
+		pr_debug("batt psy changed\n");
+		power_supply_changed(chip->batt_psy);
+		if (chip->usb_psy) {
+			pr_debug("usb psy changed\n");
+			power_supply_changed(chip->usb_psy);
+		}
+		if (chip->dc_psy_type != -EINVAL) {
+			pr_debug("dc psy changed\n");
+			power_supply_changed(chip->dc_psy);
+		}
+	}
+
+	mutex_unlock(&chip->irq_complete);
+
+	return IRQ_HANDLED;
+}
+
+#define LAST_CNFG_REG	0x1F
+static int show_cnfg_regs(struct seq_file *m, void *data)
+{
+	struct smb135x_chg *chip = m->private;
+	int rc;
+	u8 reg;
+	u8 addr;
+
+	for (addr = 0; addr <= LAST_CNFG_REG; addr++) {
+		rc = smb135x_read(chip, addr, &reg);
+		if (!rc)
+			seq_printf(m, "0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	return 0;
+}
+
+static int cnfg_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct smb135x_chg *chip = inode->i_private;
+
+	return single_open(file, show_cnfg_regs, chip);
+}
+
+static const struct file_operations cnfg_debugfs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= cnfg_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#define FIRST_CMD_REG	0x40
+#define LAST_CMD_REG	0x42
+static int show_cmd_regs(struct seq_file *m, void *data)
+{
+	struct smb135x_chg *chip = m->private;
+	int rc;
+	u8 reg;
+	u8 addr;
+
+	for (addr = FIRST_CMD_REG; addr <= LAST_CMD_REG; addr++) {
+		rc = smb135x_read(chip, addr, &reg);
+		if (!rc)
+			seq_printf(m, "0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	return 0;
+}
+
+static int cmd_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct smb135x_chg *chip = inode->i_private;
+
+	return single_open(file, show_cmd_regs, chip);
+}
+
+static const struct file_operations cmd_debugfs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= cmd_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#define FIRST_STATUS_REG	0x46
+#define LAST_STATUS_REG		0x56
+static int show_status_regs(struct seq_file *m, void *data)
+{
+	struct smb135x_chg *chip = m->private;
+	int rc;
+	u8 reg;
+	u8 addr;
+
+	for (addr = FIRST_STATUS_REG; addr <= LAST_STATUS_REG; addr++) {
+		rc = smb135x_read(chip, addr, &reg);
+		if (!rc)
+			seq_printf(m, "0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	return 0;
+}
+
+static int status_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct smb135x_chg *chip = inode->i_private;
+
+	return single_open(file, show_status_regs, chip);
+}
+
+static const struct file_operations status_debugfs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= status_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int show_irq_count(struct seq_file *m, void *data)
+{
+	int i, j, total = 0;
+
+	for (i = 0; i < ARRAY_SIZE(handlers); i++)
+		for (j = 0; j < 4; j++) {
+			seq_printf(m, "%s=%d\t(high=%d low=%d)\n",
+						handlers[i].irq_info[j].name,
+						handlers[i].irq_info[j].high
+						+ handlers[i].irq_info[j].low,
+						handlers[i].irq_info[j].high,
+						handlers[i].irq_info[j].low);
+			total += (handlers[i].irq_info[j].high
+					+ handlers[i].irq_info[j].low);
+		}
+
+	seq_printf(m, "\n\tTotal = %d\n", total);
+
+	return 0;
+}
+
+static int irq_count_debugfs_open(struct inode *inode, struct file *file)
+{
+	struct smb135x_chg *chip = inode->i_private;
+
+	return single_open(file, show_irq_count, chip);
+}
+
+static const struct file_operations irq_count_debugfs_ops = {
+	.owner		= THIS_MODULE,
+	.open		= irq_count_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int get_reg(void *data, u64 *val)
+{
+	struct smb135x_chg *chip = data;
+	int rc;
+	u8 temp;
+
+	rc = smb135x_read(chip, chip->peek_poke_address, &temp);
+	if (rc < 0) {
+		dev_err(chip->dev,
+			"Couldn't read reg %x rc = %d\n",
+			chip->peek_poke_address, rc);
+		return -EAGAIN;
+	}
+	*val = temp;
+	return 0;
+}
+
+static int set_reg(void *data, u64 val)
+{
+	struct smb135x_chg *chip = data;
+	int rc;
+	u8 temp;
+
+	temp = (u8) val;
+	rc = smb135x_write(chip, chip->peek_poke_address, temp);
+	if (rc < 0) {
+		dev_err(chip->dev,
+			"Couldn't write 0x%02x to 0x%02x rc= %d\n",
+			chip->peek_poke_address, temp, rc);
+		return -EAGAIN;
+	}
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(poke_poke_debug_ops, get_reg, set_reg, "0x%02llx\n");
+
+static int force_irq_set(void *data, u64 val)
+{
+	struct smb135x_chg *chip = data;
+
+	smb135x_chg_stat_handler(chip->client->irq, data);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_irq_ops, NULL, force_irq_set, "0x%02llx\n");
+
+static int force_rechg_set(void *data, u64 val)
+{
+	int rc = 0;
+	struct smb135x_chg *chip = data;
+
+	if (!chip->chg_enabled) {
+		pr_debug("Charging Disabled force recharge not allowed\n");
+		return -EINVAL;
+	}
+
+	if (!chip->inhibit_disabled) {
+		rc = smb135x_masked_write(chip, CFG_14_REG, EN_CHG_INHIBIT_BIT,
+					0);
+		if (rc)
+			dev_err(chip->dev,
+				"Couldn't disable charge-inhibit rc=%d\n", rc);
+
+		/* delay for charge-inhibit to take affect */
+		msleep(500);
+	}
+
+	rc |= smb135x_charging(chip, false);
+	rc |= smb135x_charging(chip, true);
+
+	if (!chip->inhibit_disabled) {
+		rc |= smb135x_masked_write(chip, CFG_14_REG,
+				EN_CHG_INHIBIT_BIT, EN_CHG_INHIBIT_BIT);
+		if (rc)
+			dev_err(chip->dev,
+				"Couldn't enable charge-inhibit rc=%d\n", rc);
+	}
+
+	return rc;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_rechg_ops, NULL, force_rechg_set, "0x%02llx\n");
+
+#ifdef DEBUG
+static void dump_regs(struct smb135x_chg *chip)
+{
+	int rc;
+	u8 reg;
+	u8 addr;
+
+	for (addr = 0; addr <= LAST_CNFG_REG; addr++) {
+		rc = smb135x_read(chip, addr, &reg);
+		if (rc < 0)
+			dev_err(chip->dev, "Couldn't read 0x%02x rc = %d\n",
+					addr, rc);
+		else
+			pr_debug("0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	for (addr = FIRST_STATUS_REG; addr <= LAST_STATUS_REG; addr++) {
+		rc = smb135x_read(chip, addr, &reg);
+		if (rc < 0)
+			dev_err(chip->dev, "Couldn't read 0x%02x rc = %d\n",
+					addr, rc);
+		else
+			pr_debug("0x%02x = 0x%02x\n", addr, reg);
+	}
+
+	for (addr = FIRST_CMD_REG; addr <= LAST_CMD_REG; addr++) {
+		rc = smb135x_read(chip, addr, &reg);
+		if (rc < 0)
+			dev_err(chip->dev, "Couldn't read 0x%02x rc = %d\n",
+					addr, rc);
+		else
+			pr_debug("0x%02x = 0x%02x\n", addr, reg);
+	}
+}
+#else
+static void dump_regs(struct smb135x_chg *chip)
+{
+}
+#endif
+static int determine_initial_status(struct smb135x_chg *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc;
+	u8 reg;
+
+	/*
+	 * It is okay to read the interrupt status here since
+	 * interrupts aren't requested. reading interrupt status
+	 * clears the interrupt so be careful to read interrupt
+	 * status only in interrupt handling code
+	 */
+
+	chip->batt_present = true;
+	rc = smb135x_read(chip, IRQ_B_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read irq b rc = %d\n", rc);
+		return rc;
+	}
+	if (reg & IRQ_B_BATT_TERMINAL_BIT || reg & IRQ_B_BATT_MISSING_BIT)
+		chip->batt_present = false;
+	rc = smb135x_read(chip, STATUS_4_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read status 4 rc = %d\n", rc);
+		return rc;
+	}
+	/* treat battery gone if less than 2V */
+	if (reg & BATT_LESS_THAN_2V)
+		chip->batt_present = false;
+
+	rc = smb135x_read(chip, IRQ_A_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read irq A rc = %d\n", rc);
+		return rc;
+	}
+
+	if (reg & IRQ_A_HOT_HARD_BIT)
+		chip->batt_hot = true;
+	if (reg & IRQ_A_COLD_HARD_BIT)
+		chip->batt_cold = true;
+	if (reg & IRQ_A_HOT_SOFT_BIT)
+		chip->batt_warm = true;
+	if (reg & IRQ_A_COLD_SOFT_BIT)
+		chip->batt_cool = true;
+
+	rc = smb135x_read(chip, IRQ_C_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read irq A rc = %d\n", rc);
+		return rc;
+	}
+	if (reg & IRQ_C_TERM_BIT)
+		chip->chg_done_batt_full = true;
+
+	rc = smb135x_read(chip, IRQ_E_REG, &reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't read irq E rc = %d\n", rc);
+		return rc;
+	}
+	chip->usb_present = !(reg & IRQ_E_USB_OV_BIT)
+				&& !(reg & IRQ_E_USB_UV_BIT);
+	chip->dc_present = !(reg & IRQ_E_DC_OV_BIT) && !(reg & IRQ_E_DC_UV_BIT);
+
+	if (chip->usb_present)
+		handle_usb_insertion(chip);
+	else
+		handle_usb_removal(chip);
+
+	if (chip->dc_psy_type != -EINVAL) {
+		if (chip->dc_psy_type == POWER_SUPPLY_TYPE_WIRELESS) {
+			/*
+			 * put the dc path in suspend state if it is powered
+			 * by wireless charger
+			 */
+			if (chip->dc_present)
+				smb135x_path_suspend(chip, DC, CURRENT, false);
+			else
+				smb135x_path_suspend(chip, DC, CURRENT, true);
+		}
+	}
+
+	chip->usb_slave_present = is_usb_slave_present(chip);
+	if (chip->usb_psy && !chip->id_line_not_connected) {
+		pr_debug("setting usb psy usb_otg = %d\n",
+				chip->usb_slave_present);
+		pval.intval = chip->usb_slave_present;
+		power_supply_set_property(chip->usb_psy,
+				POWER_SUPPLY_PROP_USB_OTG, &pval);
+	}
+	return 0;
+}
+
+static int smb135x_hw_init(struct smb135x_chg *chip)
+{
+	int rc;
+	int i;
+	u8 reg, mask;
+
+	if (chip->pinctrl_state_name) {
+		chip->smb_pinctrl = pinctrl_get_select(chip->dev,
+						chip->pinctrl_state_name);
+		if (IS_ERR(chip->smb_pinctrl)) {
+			pr_err("Could not get/set %s pinctrl state rc = %ld\n",
+						chip->pinctrl_state_name,
+						PTR_ERR(chip->smb_pinctrl));
+			return PTR_ERR(chip->smb_pinctrl);
+		}
+	}
+
+	if (chip->therm_bias_vreg) {
+		rc = regulator_enable(chip->therm_bias_vreg);
+		if (rc) {
+			pr_err("Couldn't enable therm-bias rc = %d\n", rc);
+			return rc;
+		}
+	}
+
+	/*
+	 * Enable USB data line pullup regulator this is needed for the D+
+	 * line to be at proper voltage for HVDCP charger detection.
+	 */
+	if (chip->usb_pullup_vreg) {
+		rc = regulator_enable(chip->usb_pullup_vreg);
+		if (rc) {
+			pr_err("Unable to enable data line pull-up regulator rc=%d\n",
+					rc);
+			if (chip->therm_bias_vreg)
+				regulator_disable(chip->therm_bias_vreg);
+			return rc;
+		}
+	}
+
+	rc = smb135x_enable_volatile_writes(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't configure for volatile rc = %d\n",
+				rc);
+		goto free_regulator;
+	}
+
+	/*
+	 * force using current from the register i.e. ignore auto
+	 * power source detect (APSD) mA ratings
+	 */
+	mask = USE_REGISTER_FOR_CURRENT;
+
+	if (chip->workaround_flags & WRKARND_USB100_BIT)
+		reg = 0;
+	else
+		/* this ignores APSD results */
+		reg = USE_REGISTER_FOR_CURRENT;
+
+	rc = smb135x_masked_write(chip, CMD_INPUT_LIMIT, mask, reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't set input limit cmd rc=%d\n", rc);
+		goto free_regulator;
+	}
+
+	/* set bit 0 = 100mA bit 1 = 500mA and set register control */
+	rc = smb135x_masked_write(chip, CFG_E_REG,
+			POLARITY_100_500_BIT | USB_CTRL_BY_PIN_BIT,
+			POLARITY_100_500_BIT);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't set usbin cfg rc=%d\n", rc);
+		goto free_regulator;
+	}
+
+	/*
+	 * set chg en by cmd register, set chg en by writing bit 1,
+	 * enable auto pre to fast, enable current termination, enable
+	 * auto recharge, enable chg inhibition based on the dt flag
+	 */
+	if (chip->inhibit_disabled)
+		reg = 0;
+	else
+		reg = EN_CHG_INHIBIT_BIT;
+
+	rc = smb135x_masked_write(chip, CFG_14_REG,
+			CHG_EN_BY_PIN_BIT | CHG_EN_ACTIVE_LOW_BIT
+			| PRE_TO_FAST_REQ_CMD_BIT | DISABLE_AUTO_RECHARGE_BIT
+			| EN_CHG_INHIBIT_BIT, reg);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't set cfg 14 rc=%d\n", rc);
+		goto free_regulator;
+	}
+
+	/* control USB suspend via command bits */
+	rc = smb135x_masked_write(chip, USBIN_DCIN_CFG_REG,
+		USBIN_SUSPEND_VIA_COMMAND_BIT, USBIN_SUSPEND_VIA_COMMAND_BIT);
+
+	/* set the float voltage */
+	if (chip->vfloat_mv != -EINVAL) {
+		rc = smb135x_float_voltage_set(chip, chip->vfloat_mv);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't set float voltage rc = %d\n", rc);
+			goto free_regulator;
+		}
+	}
+
+	/* set iterm */
+	if (chip->iterm_ma != -EINVAL) {
+		if (chip->iterm_disabled) {
+			dev_err(chip->dev, "Error: Both iterm_disabled and iterm_ma set\n");
+			rc = -EINVAL;
+			goto free_regulator;
+		} else {
+			if (chip->iterm_ma <= 50)
+				reg = CHG_ITERM_50MA;
+			else if (chip->iterm_ma <= 100)
+				reg = CHG_ITERM_100MA;
+			else if (chip->iterm_ma <= 150)
+				reg = CHG_ITERM_150MA;
+			else if (chip->iterm_ma <= 200)
+				reg = CHG_ITERM_200MA;
+			else if (chip->iterm_ma <= 250)
+				reg = CHG_ITERM_250MA;
+			else if (chip->iterm_ma <= 300)
+				reg = CHG_ITERM_300MA;
+			else if (chip->iterm_ma <= 500)
+				reg = CHG_ITERM_500MA;
+			else
+				reg = CHG_ITERM_600MA;
+
+			rc = smb135x_masked_write(chip, CFG_3_REG,
+							CHG_ITERM_MASK, reg);
+			if (rc) {
+				dev_err(chip->dev,
+					"Couldn't set iterm rc = %d\n", rc);
+				goto free_regulator;
+			}
+
+			rc = smb135x_masked_write(chip, CFG_14_REG,
+						DISABLE_CURRENT_TERM_BIT, 0);
+			if (rc) {
+				dev_err(chip->dev,
+					"Couldn't enable iterm rc = %d\n", rc);
+				goto free_regulator;
+			}
+		}
+	} else  if (chip->iterm_disabled) {
+		rc = smb135x_masked_write(chip, CFG_14_REG,
+					DISABLE_CURRENT_TERM_BIT,
+					DISABLE_CURRENT_TERM_BIT);
+		if (rc) {
+			dev_err(chip->dev, "Couldn't set iterm rc = %d\n",
+								rc);
+			goto free_regulator;
+		}
+	}
+
+	/* set the safety time voltage */
+	if (chip->safety_time != -EINVAL) {
+		if (chip->safety_time == 0) {
+			/* safety timer disabled */
+			reg = 1 << SAFETY_TIME_EN_SHIFT;
+			rc = smb135x_masked_write(chip, CFG_16_REG,
+						SAFETY_TIME_EN_BIT, reg);
+			if (rc < 0) {
+				dev_err(chip->dev,
+				"Couldn't disable safety timer rc = %d\n",
+				rc);
+				goto free_regulator;
+			}
+		} else {
+			for (i = 0; i < ARRAY_SIZE(chg_time); i++) {
+				if (chip->safety_time <= chg_time[i]) {
+					reg = i << SAFETY_TIME_MINUTES_SHIFT;
+					break;
+				}
+			}
+			rc = smb135x_masked_write(chip, CFG_16_REG,
+				SAFETY_TIME_EN_BIT | SAFETY_TIME_MINUTES_MASK,
+				reg);
+			if (rc < 0) {
+				dev_err(chip->dev,
+					"Couldn't set safety timer rc = %d\n",
+					rc);
+				goto free_regulator;
+			}
+		}
+	}
+
+	/* battery missing detection */
+	rc = smb135x_masked_write(chip, CFG_19_REG,
+			BATT_MISSING_ALGO_BIT | BATT_MISSING_THERM_BIT,
+			chip->bmd_algo_disabled ? BATT_MISSING_THERM_BIT :
+						BATT_MISSING_ALGO_BIT);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't set batt_missing config = %d\n",
+									rc);
+		goto free_regulator;
+	}
+
+	/* set maximum fastchg current */
+	if (chip->fastchg_ma != -EINVAL) {
+		rc = smb135x_set_fastchg_current(chip, chip->fastchg_ma);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't set fastchg current = %d\n",
+									rc);
+			goto free_regulator;
+		}
+	}
+
+	if (chip->usb_pullup_vreg) {
+		/* enable 9V HVDCP adapter support */
+		rc = smb135x_masked_write(chip, CFG_E_REG, HVDCP_5_9_BIT,
+					HVDCP_5_9_BIT);
+		if (rc < 0) {
+			dev_err(chip->dev,
+				"Couldn't request for 5 or 9V rc=%d\n", rc);
+			goto free_regulator;
+		}
+	}
+
+	if (chip->gamma_setting) {
+		rc = smb135x_masked_write(chip, CFG_1B_REG, COLD_HARD_MASK,
+				chip->gamma_setting[0] << COLD_HARD_SHIFT);
+
+		rc |= smb135x_masked_write(chip, CFG_1B_REG, HOT_HARD_MASK,
+				chip->gamma_setting[1] << HOT_HARD_SHIFT);
+
+		rc |= smb135x_masked_write(chip, CFG_1B_REG, COLD_SOFT_MASK,
+				chip->gamma_setting[2] << COLD_SOFT_SHIFT);
+
+		rc |= smb135x_masked_write(chip, CFG_1B_REG, HOT_SOFT_MASK,
+				chip->gamma_setting[3] << HOT_SOFT_SHIFT);
+		if (rc < 0)
+			goto free_regulator;
+	}
+
+	__smb135x_charging(chip, chip->chg_enabled);
+
+	/* interrupt enabling - active low */
+	if (chip->client->irq) {
+		mask = CHG_STAT_IRQ_ONLY_BIT | CHG_STAT_ACTIVE_HIGH_BIT
+			| CHG_STAT_DISABLE_BIT;
+		reg = CHG_STAT_IRQ_ONLY_BIT;
+		rc = smb135x_masked_write(chip, CFG_17_REG, mask, reg);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't set irq config rc = %d\n",
+					rc);
+			goto free_regulator;
+		}
+
+		/* enabling only interesting interrupts */
+		rc = smb135x_write(chip, IRQ_CFG_REG,
+			IRQ_BAT_HOT_COLD_HARD_BIT
+			| IRQ_BAT_HOT_COLD_SOFT_BIT
+			| IRQ_OTG_OVER_CURRENT_BIT
+			| IRQ_INTERNAL_TEMPERATURE_BIT
+			| IRQ_USBIN_UV_BIT);
+
+		rc |= smb135x_write(chip, IRQ2_CFG_REG,
+			IRQ2_SAFETY_TIMER_BIT
+			| IRQ2_CHG_ERR_BIT
+			| IRQ2_CHG_PHASE_CHANGE_BIT
+			| IRQ2_POWER_OK_BIT
+			| IRQ2_BATT_MISSING_BIT
+			| IRQ2_VBAT_LOW_BIT);
+
+		rc |= smb135x_write(chip, IRQ3_CFG_REG, IRQ3_SRC_DETECT_BIT
+				| IRQ3_DCIN_UV_BIT | IRQ3_RID_DETECT_BIT);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't set irq enable rc = %d\n",
+					rc);
+			goto free_regulator;
+		}
+	}
+
+	/* resume threshold */
+	if (chip->resume_delta_mv != -EINVAL)
+		smb135x_set_resume_threshold(chip, chip->resume_delta_mv);
+
+	/* DC path current settings */
+	if (chip->dc_psy_type != -EINVAL) {
+		rc = smb135x_set_dc_chg_current(chip, chip->dc_psy_ma);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't set dc charge current rc = %d\n",
+					rc);
+			goto free_regulator;
+		}
+	}
+
+	/*
+	 * on some devices the battery is powered via external sources which
+	 * could raise its voltage above the float voltage. smb135x chips go
+	 * in to reverse boost in such a situation and the workaround is to
+	 * disable float voltage compensation (note that the battery will appear
+	 * hot/cold when powered via external source).
+	 */
+
+	if (chip->soft_vfloat_comp_disabled) {
+		mask = HOT_SOFT_VFLOAT_COMP_EN_BIT
+				| COLD_SOFT_VFLOAT_COMP_EN_BIT;
+		rc = smb135x_masked_write(chip, CFG_1A_REG, mask, 0);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't disable soft vfloat rc = %d\n",
+					rc);
+			goto free_regulator;
+		}
+	}
+
+	if (chip->soft_current_comp_disabled) {
+		mask = HOT_SOFT_CURRENT_COMP_EN_BIT
+				| COLD_SOFT_CURRENT_COMP_EN_BIT;
+		rc = smb135x_masked_write(chip, CFG_1A_REG, mask, 0);
+		if (rc < 0) {
+			dev_err(chip->dev, "Couldn't disable soft current rc = %d\n",
+					rc);
+			goto free_regulator;
+		}
+	}
+
+	/*
+	 * Command mode for OTG control. This gives us RID interrupts but keeps
+	 * enabling the 5V OTG via i2c register control
+	 */
+	rc = smb135x_masked_write(chip, USBIN_OTG_REG, OTG_CNFG_MASK,
+			OTG_CNFG_COMMAND_CTRL);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't write to otg cfg reg rc = %d\n",
+				rc);
+		goto free_regulator;
+	}
+	return 0;
+
+free_regulator:
+	if (chip->therm_bias_vreg)
+		regulator_disable(chip->therm_bias_vreg);
+	if (chip->usb_pullup_vreg)
+		regulator_disable(chip->usb_pullup_vreg);
+	return rc;
+}
+
+static const struct of_device_id smb135x_match_table[] = {
+	{
+		.compatible	= "qcom,smb1356-charger",
+		.data		= &version_data[V_SMB1356],
+	},
+	{
+		.compatible	= "qcom,smb1357-charger",
+		.data		= &version_data[V_SMB1357],
+	},
+	{
+		.compatible	= "qcom,smb1358-charger",
+		.data		= &version_data[V_SMB1358],
+	},
+	{
+		.compatible	= "qcom,smb1359-charger",
+		.data		= &version_data[V_SMB1359],
+	},
+	{ },
+};
+
+#define DC_MA_MIN 300
+#define DC_MA_MAX 2000
+#define NUM_GAMMA_VALUES 4
+static int smb_parse_dt(struct smb135x_chg *chip)
+{
+	int rc;
+	struct device_node *node = chip->dev->of_node;
+	const char *dc_psy_type;
+
+	if (!node) {
+		dev_err(chip->dev, "device tree info. missing\n");
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32(node, "qcom,float-voltage-mv",
+						&chip->vfloat_mv);
+	if (rc < 0)
+		chip->vfloat_mv = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,charging-timeout",
+						&chip->safety_time);
+	if (rc < 0)
+		chip->safety_time = -EINVAL;
+
+	if (!rc &&
+		(chip->safety_time > chg_time[ARRAY_SIZE(chg_time) - 1])) {
+		dev_err(chip->dev, "Bad charging-timeout %d\n",
+						chip->safety_time);
+		return -EINVAL;
+	}
+
+	chip->bmd_algo_disabled = of_property_read_bool(node,
+						"qcom,bmd-algo-disabled");
+
+	chip->dc_psy_type = -EINVAL;
+	dc_psy_type = of_get_property(node, "qcom,dc-psy-type", NULL);
+	if (dc_psy_type) {
+		if (strcmp(dc_psy_type, "Mains") == 0)
+			chip->dc_psy_type = POWER_SUPPLY_TYPE_MAINS;
+		else if (strcmp(dc_psy_type, "Wireless") == 0)
+			chip->dc_psy_type = POWER_SUPPLY_TYPE_WIRELESS;
+	}
+
+	if (chip->dc_psy_type != -EINVAL) {
+		rc = of_property_read_u32(node, "qcom,dc-psy-ma",
+							&chip->dc_psy_ma);
+		if (rc < 0) {
+			dev_err(chip->dev,
+					"no mA current for dc rc = %d\n", rc);
+			return rc;
+		}
+
+		if (chip->dc_psy_ma < DC_MA_MIN
+				|| chip->dc_psy_ma > DC_MA_MAX) {
+			dev_err(chip->dev, "Bad dc mA %d\n", chip->dc_psy_ma);
+			return -EINVAL;
+		}
+	}
+
+	rc = of_property_read_u32(node, "qcom,recharge-thresh-mv",
+						&chip->resume_delta_mv);
+	if (rc < 0)
+		chip->resume_delta_mv = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,iterm-ma", &chip->iterm_ma);
+	if (rc < 0)
+		chip->iterm_ma = -EINVAL;
+
+	chip->iterm_disabled = of_property_read_bool(node,
+						"qcom,iterm-disabled");
+
+	chip->chg_disabled_permanently = (of_property_read_bool(node,
+						"qcom,charging-disabled"));
+	chip->chg_enabled = !chip->chg_disabled_permanently;
+
+	chip->inhibit_disabled  = of_property_read_bool(node,
+						"qcom,inhibit-disabled");
+
+	chip->bms_controlled_charging  = of_property_read_bool(node,
+					"qcom,bms-controlled-charging");
+
+	rc = of_property_read_string(node, "qcom,bms-psy-name",
+						&chip->bms_psy_name);
+	if (rc)
+		chip->bms_psy_name = NULL;
+
+	rc = of_property_read_u32(node, "qcom,fastchg-ma", &chip->fastchg_ma);
+	if (rc < 0)
+		chip->fastchg_ma = -EINVAL;
+
+	chip->soft_vfloat_comp_disabled = of_property_read_bool(node,
+					"qcom,soft-vfloat-comp-disabled");
+
+	chip->soft_current_comp_disabled = of_property_read_bool(node,
+					"qcom,soft-current-comp-disabled");
+
+	if (of_find_property(node, "therm-bias-supply", NULL)) {
+		/* get the thermistor bias regulator */
+		chip->therm_bias_vreg = devm_regulator_get(chip->dev,
+							"therm-bias");
+		if (IS_ERR(chip->therm_bias_vreg))
+			return PTR_ERR(chip->therm_bias_vreg);
+	}
+
+	/*
+	 * Gamma value indicates the ratio of the pull up resistors and NTC
+	 * resistor in battery pack. There are 4 options, refer to the graphic
+	 * user interface and choose the right one.
+	 */
+	if (of_find_property(node, "qcom,gamma-setting",
+					&chip->gamma_setting_num)) {
+		chip->gamma_setting_num = chip->gamma_setting_num /
+					sizeof(chip->gamma_setting_num);
+		if (chip->gamma_setting_num != NUM_GAMMA_VALUES) {
+			pr_err("Gamma setting not correct!\n");
+			return -EINVAL;
+		}
+
+		chip->gamma_setting = devm_kzalloc(chip->dev,
+			chip->gamma_setting_num *
+				sizeof(chip->gamma_setting_num), GFP_KERNEL);
+		if (!chip->gamma_setting) {
+			pr_err("gamma setting kzalloc failed!\n");
+			return -ENOMEM;
+		}
+
+		rc = of_property_read_u32_array(node,
+					"qcom,gamma-setting",
+				chip->gamma_setting, chip->gamma_setting_num);
+		if (rc) {
+			pr_err("Couldn't read gamma setting, rc = %d\n", rc);
+			return rc;
+		}
+	}
+
+	if (of_find_property(node, "qcom,thermal-mitigation",
+					&chip->thermal_levels)) {
+		chip->thermal_mitigation = devm_kzalloc(chip->dev,
+			chip->thermal_levels,
+			GFP_KERNEL);
+
+		if (chip->thermal_mitigation == NULL) {
+			pr_err("thermal mitigation kzalloc() failed.\n");
+			return -ENOMEM;
+		}
+
+		chip->thermal_levels /= sizeof(int);
+		rc = of_property_read_u32_array(node,
+				"qcom,thermal-mitigation",
+				chip->thermal_mitigation, chip->thermal_levels);
+		if (rc) {
+			pr_err("Couldn't read threm limits rc = %d\n", rc);
+			return rc;
+		}
+	}
+
+	if (of_find_property(node, "usb-pullup-supply", NULL)) {
+		/* get the data line pull-up regulator */
+		chip->usb_pullup_vreg = devm_regulator_get(chip->dev,
+							"usb-pullup");
+		if (IS_ERR(chip->usb_pullup_vreg))
+			return PTR_ERR(chip->usb_pullup_vreg);
+	}
+
+	chip->pinctrl_state_name = of_get_property(node, "pinctrl-names", NULL);
+
+	chip->id_line_not_connected = of_property_read_bool(node,
+						"qcom,id-line-not-connected");
+	return 0;
+}
+
+static int create_debugfs_entries(struct smb135x_chg *chip)
+{
+	chip->debug_root = debugfs_create_dir("smb135x", NULL);
+	if (!chip->debug_root)
+		dev_err(chip->dev, "Couldn't create debug dir\n");
+
+	if (chip->debug_root) {
+		struct dentry *ent;
+
+		ent = debugfs_create_file("config_registers", S_IFREG | 0444,
+					  chip->debug_root, chip,
+					  &cnfg_debugfs_ops);
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create cnfg debug file\n");
+
+		ent = debugfs_create_file("status_registers", S_IFREG | 0444,
+					  chip->debug_root, chip,
+					  &status_debugfs_ops);
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create status debug file\n");
+
+		ent = debugfs_create_file("cmd_registers", S_IFREG | 0444,
+					  chip->debug_root, chip,
+					  &cmd_debugfs_ops);
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create cmd debug file\n");
+
+		ent = debugfs_create_x32("address", S_IFREG | 0644,
+					  chip->debug_root,
+					  &(chip->peek_poke_address));
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create address debug file\n");
+
+		ent = debugfs_create_file("data", S_IFREG | 0644,
+					  chip->debug_root, chip,
+					  &poke_poke_debug_ops);
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create data debug file\n");
+
+		ent = debugfs_create_file("force_irq",
+					  S_IFREG | 0644,
+					  chip->debug_root, chip,
+					  &force_irq_ops);
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create force_irq debug file\n");
+
+		ent = debugfs_create_x32("skip_writes",
+					  S_IFREG | 0644,
+					  chip->debug_root,
+					  &(chip->skip_writes));
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create skip writes debug file\n");
+
+		ent = debugfs_create_x32("skip_reads",
+					  S_IFREG | 0644,
+					  chip->debug_root,
+					  &(chip->skip_reads));
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create skip reads debug file\n");
+
+		ent = debugfs_create_file("irq_count", S_IFREG | 0444,
+					  chip->debug_root, chip,
+					  &irq_count_debugfs_ops);
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create irq_count debug file\n");
+
+		ent = debugfs_create_file("force_recharge",
+					  S_IFREG | 0644,
+					  chip->debug_root, chip,
+					  &force_rechg_ops);
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create force recharge debug file\n");
+
+		ent = debugfs_create_x32("usb_suspend_votes",
+					  S_IFREG | 0644,
+					  chip->debug_root,
+					  &(chip->usb_suspended));
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create usb_suspend_votes file\n");
+
+		ent = debugfs_create_x32("dc_suspend_votes",
+					  S_IFREG | 0644,
+					  chip->debug_root,
+					  &(chip->dc_suspended));
+		if (!ent)
+			dev_err(chip->dev,
+				"Couldn't create dc_suspend_votes file\n");
+	}
+	return 0;
+}
+
+static int is_parallel_charger(struct i2c_client *client)
+{
+	struct device_node *node = client->dev.of_node;
+
+	return of_property_read_bool(node, "qcom,parallel-charger");
+}
+
+static int smb135x_main_charger_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	int rc;
+	struct smb135x_chg *chip;
+	struct power_supply *usb_psy;
+	struct power_supply_config batt_psy_cfg = {};
+	struct power_supply_config dc_psy_cfg = {};
+	u8 reg = 0;
+
+	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->client = client;
+	chip->dev = &client->dev;
+
+	rc = smb_parse_dt(chip);
+	if (rc < 0) {
+		dev_err(&client->dev, "Unable to parse DT nodes\n");
+		return rc;
+	}
+
+	usb_psy = power_supply_get_by_name("usb");
+	if (!usb_psy && chip->chg_enabled) {
+		dev_dbg(&client->dev, "USB supply not found; defer probe\n");
+		return -EPROBE_DEFER;
+	}
+	chip->usb_psy = usb_psy;
+
+	chip->fake_battery_soc = -EINVAL;
+
+	INIT_DELAYED_WORK(&chip->wireless_insertion_work,
+					wireless_insertion_work);
+
+	INIT_DELAYED_WORK(&chip->reset_otg_oc_count_work,
+					reset_otg_oc_count_work);
+	INIT_DELAYED_WORK(&chip->hvdcp_det_work, smb135x_hvdcp_det_work);
+	mutex_init(&chip->path_suspend_lock);
+	mutex_init(&chip->current_change_lock);
+	mutex_init(&chip->read_write_lock);
+	mutex_init(&chip->otg_oc_count_lock);
+	device_init_wakeup(chip->dev, true);
+	/* probe the device to check if its actually connected */
+	rc = smb135x_read(chip, CFG_4_REG, &reg);
+	if (rc) {
+		pr_err("Failed to detect SMB135x, device may be absent\n");
+		return -ENODEV;
+	}
+
+	i2c_set_clientdata(client, chip);
+
+	rc = smb135x_chip_version_and_revision(chip);
+	if (rc) {
+		dev_err(&client->dev,
+			"Couldn't detect version/revision rc=%d\n", rc);
+		return rc;
+	}
+
+	dump_regs(chip);
+
+	rc = smb135x_regulator_init(chip);
+	if  (rc) {
+		dev_err(&client->dev,
+			"Couldn't initialize regulator rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb135x_hw_init(chip);
+	if (rc < 0) {
+		dev_err(&client->dev,
+			"Unable to initialize hardware rc = %d\n", rc);
+		goto free_regulator;
+	}
+
+	rc = determine_initial_status(chip);
+	if (rc < 0) {
+		dev_err(&client->dev,
+			"Unable to determine init status rc = %d\n", rc);
+		goto free_regulator;
+	}
+
+	chip->batt_psy_d.name = "battery";
+	chip->batt_psy_d.type = POWER_SUPPLY_TYPE_BATTERY;
+	chip->batt_psy_d.get_property = smb135x_battery_get_property;
+	chip->batt_psy_d.set_property = smb135x_battery_set_property;
+	chip->batt_psy_d.properties = smb135x_battery_properties;
+	chip->batt_psy_d.num_properties
+		= ARRAY_SIZE(smb135x_battery_properties);
+	chip->batt_psy_d.external_power_changed
+		= smb135x_external_power_changed;
+	chip->batt_psy_d.property_is_writeable = smb135x_battery_is_writeable;
+
+	batt_psy_cfg.drv_data = chip;
+	batt_psy_cfg.num_supplicants = 0;
+	if (chip->bms_controlled_charging) {
+		batt_psy_cfg.supplied_to = pm_batt_supplied_to;
+		batt_psy_cfg.num_supplicants
+					= ARRAY_SIZE(pm_batt_supplied_to);
+	}
+	chip->batt_psy = devm_power_supply_register(chip->dev,
+				&chip->batt_psy_d, &batt_psy_cfg);
+	if (IS_ERR(chip->batt_psy)) {
+		dev_err(&client->dev, "Unable to register batt_psy rc = %ld\n",
+				PTR_ERR(chip->batt_psy));
+		goto free_regulator;
+	}
+
+	if (chip->dc_psy_type != -EINVAL) {
+		chip->dc_psy_d.name = "dc";
+		chip->dc_psy_d.type = chip->dc_psy_type;
+		chip->dc_psy_d.get_property = smb135x_dc_get_property;
+		chip->dc_psy_d.properties = smb135x_dc_properties;
+		chip->dc_psy_d.num_properties
+			= ARRAY_SIZE(smb135x_dc_properties);
+
+		dc_psy_cfg.drv_data = chip;
+		dc_psy_cfg.num_supplicants = 0;
+		chip->dc_psy = devm_power_supply_register(chip->dev,
+				&chip->dc_psy_d,
+				&dc_psy_cfg);
+
+		if (IS_ERR(chip->dc_psy)) {
+			dev_err(&client->dev,
+				"Unable to register dc_psy rc = %ld\n",
+				PTR_ERR(chip->dc_psy));
+			goto free_regulator;
+		}
+	}
+
+	chip->resume_completed = true;
+	mutex_init(&chip->irq_complete);
+
+	/* STAT irq configuration */
+	if (client->irq) {
+		rc = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+				smb135x_chg_stat_handler,
+				IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+				"smb135x_chg_stat_irq", chip);
+		if (rc < 0) {
+			dev_err(&client->dev,
+				"request_irq for irq=%d  failed rc = %d\n",
+				client->irq, rc);
+			goto free_regulator;
+		}
+		enable_irq_wake(client->irq);
+	}
+
+	create_debugfs_entries(chip);
+	dev_info(chip->dev, "SMB135X version = %s revision = %s successfully probed batt=%d dc = %d usb = %d\n",
+			version_str[chip->version],
+			revision_str[chip->revision],
+			smb135x_get_prop_batt_present(chip),
+			chip->dc_present, chip->usb_present);
+	return 0;
+
+free_regulator:
+	smb135x_regulator_deinit(chip);
+	return rc;
+}
+
+static int smb135x_parallel_charger_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	int rc;
+	struct smb135x_chg *chip;
+	const struct of_device_id *match;
+	struct device_node *node = client->dev.of_node;
+	struct power_supply_config parallel_psy_cfg = {};
+
+	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->client = client;
+	chip->dev = &client->dev;
+	chip->parallel_charger = true;
+	chip->dc_psy_type = -EINVAL;
+
+	chip->chg_enabled = !(of_property_read_bool(node,
+						"qcom,charging-disabled"));
+
+	rc = of_property_read_u32(node, "qcom,recharge-thresh-mv",
+						&chip->resume_delta_mv);
+	if (rc < 0)
+		chip->resume_delta_mv = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,float-voltage-mv",
+						&chip->vfloat_mv);
+	if (rc < 0)
+		chip->vfloat_mv = -EINVAL;
+
+	rc = of_property_read_u32(node, "qcom,parallel-en-pin-polarity",
+					&chip->parallel_pin_polarity_setting);
+	if (rc)
+		chip->parallel_pin_polarity_setting = CHG_EN_ACTIVE_LOW_BIT;
+	else
+		chip->parallel_pin_polarity_setting =
+				chip->parallel_pin_polarity_setting ?
+				CHG_EN_ACTIVE_HIGH_BIT : CHG_EN_ACTIVE_LOW_BIT;
+
+	mutex_init(&chip->path_suspend_lock);
+	mutex_init(&chip->current_change_lock);
+	mutex_init(&chip->read_write_lock);
+
+	match = of_match_node(smb135x_match_table, node);
+	if (match == NULL) {
+		dev_err(chip->dev, "device tree match not found\n");
+		return -EINVAL;
+	}
+
+	chip->version = *(int *)match->data;
+	smb135x_set_current_tables(chip);
+
+	i2c_set_clientdata(client, chip);
+
+	chip->parallel_psy_d.name = "parallel";
+	chip->parallel_psy_d.type = POWER_SUPPLY_TYPE_PARALLEL;
+	chip->parallel_psy_d.get_property = smb135x_parallel_get_property;
+	chip->parallel_psy_d.set_property = smb135x_parallel_set_property;
+	chip->parallel_psy_d.properties	= smb135x_parallel_properties;
+	chip->parallel_psy_d.property_is_writeable
+				= smb135x_parallel_is_writeable;
+	chip->parallel_psy_d.num_properties
+				= ARRAY_SIZE(smb135x_parallel_properties);
+
+	parallel_psy_cfg.drv_data = chip;
+	parallel_psy_cfg.num_supplicants = 0;
+	chip->parallel_psy = devm_power_supply_register(chip->dev,
+			&chip->parallel_psy_d,
+			&parallel_psy_cfg);
+	if (IS_ERR(chip->parallel_psy)) {
+		dev_err(&client->dev,
+			"Unable to register parallel_psy rc = %ld\n",
+			PTR_ERR(chip->parallel_psy));
+		return rc;
+	}
+
+	chip->resume_completed = true;
+	mutex_init(&chip->irq_complete);
+
+	create_debugfs_entries(chip);
+
+	dev_info(chip->dev, "SMB135X USB PARALLEL CHARGER version = %s successfully probed\n",
+			version_str[chip->version]);
+	return 0;
+}
+
+static int smb135x_chg_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	if (is_parallel_charger(client))
+		return smb135x_parallel_charger_probe(client, id);
+	else
+		return smb135x_main_charger_probe(client, id);
+}
+
+static int smb135x_chg_remove(struct i2c_client *client)
+{
+	int rc;
+	struct smb135x_chg *chip = i2c_get_clientdata(client);
+
+	debugfs_remove_recursive(chip->debug_root);
+
+	if (chip->parallel_charger)
+		goto mutex_destroy;
+
+	if (chip->therm_bias_vreg) {
+		rc = regulator_disable(chip->therm_bias_vreg);
+		if (rc)
+			pr_err("Couldn't disable therm-bias rc = %d\n", rc);
+	}
+
+	if (chip->usb_pullup_vreg) {
+		rc = regulator_disable(chip->usb_pullup_vreg);
+		if (rc)
+			pr_err("Couldn't disable data-pullup rc = %d\n", rc);
+	}
+
+	smb135x_regulator_deinit(chip);
+
+mutex_destroy:
+	mutex_destroy(&chip->irq_complete);
+	return 0;
+}
+
+static int smb135x_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smb135x_chg *chip = i2c_get_clientdata(client);
+	int i, rc;
+
+	/* no suspend resume activities for parallel charger */
+	if (chip->parallel_charger)
+		return 0;
+
+	/* Save the current IRQ config */
+	for (i = 0; i < 3; i++) {
+		rc = smb135x_read(chip, IRQ_CFG_REG + i,
+					&chip->irq_cfg_mask[i]);
+		if (rc)
+			dev_err(chip->dev,
+				"Couldn't save irq cfg regs rc=%d\n", rc);
+	}
+
+	/* enable only important IRQs */
+	rc = smb135x_write(chip, IRQ_CFG_REG, IRQ_USBIN_UV_BIT);
+	if (rc < 0)
+		dev_err(chip->dev, "Couldn't set irq_cfg rc = %d\n", rc);
+
+	rc = smb135x_write(chip, IRQ2_CFG_REG, IRQ2_BATT_MISSING_BIT
+						| IRQ2_VBAT_LOW_BIT
+						| IRQ2_POWER_OK_BIT);
+	if (rc < 0)
+		dev_err(chip->dev, "Couldn't set irq2_cfg rc = %d\n", rc);
+
+	rc = smb135x_write(chip, IRQ3_CFG_REG, IRQ3_SRC_DETECT_BIT
+			| IRQ3_DCIN_UV_BIT | IRQ3_RID_DETECT_BIT);
+	if (rc < 0)
+		dev_err(chip->dev, "Couldn't set irq3_cfg rc = %d\n", rc);
+
+	mutex_lock(&chip->irq_complete);
+	chip->resume_completed = false;
+	mutex_unlock(&chip->irq_complete);
+
+	return 0;
+}
+
+static int smb135x_suspend_noirq(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smb135x_chg *chip = i2c_get_clientdata(client);
+
+	/* no suspend resume activities for parallel charger */
+	if (chip->parallel_charger)
+		return 0;
+
+	if (chip->irq_waiting) {
+		pr_err_ratelimited("Aborting suspend, an interrupt was detected while suspending\n");
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static int smb135x_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct smb135x_chg *chip = i2c_get_clientdata(client);
+	int i, rc;
+
+	/* no suspend resume activities for parallel charger */
+	if (chip->parallel_charger)
+		return 0;
+	/* Restore the IRQ config */
+	for (i = 0; i < 3; i++) {
+		rc = smb135x_write(chip, IRQ_CFG_REG + i,
+					chip->irq_cfg_mask[i]);
+		if (rc)
+			dev_err(chip->dev,
+				"Couldn't restore irq cfg regs rc=%d\n", rc);
+	}
+	mutex_lock(&chip->irq_complete);
+	chip->resume_completed = true;
+	if (chip->irq_waiting) {
+		mutex_unlock(&chip->irq_complete);
+		smb135x_chg_stat_handler(client->irq, chip);
+		enable_irq(client->irq);
+	} else {
+		mutex_unlock(&chip->irq_complete);
+	}
+	return 0;
+}
+
+static const struct dev_pm_ops smb135x_pm_ops = {
+	.resume		= smb135x_resume,
+	.suspend_noirq	= smb135x_suspend_noirq,
+	.suspend	= smb135x_suspend,
+};
+
+static const struct i2c_device_id smb135x_chg_id[] = {
+	{"smb135x-charger", 0},
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, smb135x_chg_id);
+
+static void smb135x_shutdown(struct i2c_client *client)
+{
+	int rc;
+	struct smb135x_chg *chip = i2c_get_clientdata(client);
+
+	if (chip->usb_pullup_vreg) {
+		/*
+		 * switch to 5V adapter to prevent any errorneous request of 12V
+		 * when USB D+ line pull-up regulator turns off.
+		 */
+		rc = smb135x_masked_write(chip, CFG_E_REG, HVDCP_5_9_BIT, 0);
+		if (rc < 0)
+			dev_err(chip->dev,
+				"Couldn't request for 5V rc=%d\n", rc);
+	}
+}
+
+static struct i2c_driver smb135x_chg_driver = {
+	.driver		= {
+		.name		= "smb135x-charger",
+		.owner		= THIS_MODULE,
+		.of_match_table	= smb135x_match_table,
+		.pm		= &smb135x_pm_ops,
+	},
+	.probe		= smb135x_chg_probe,
+	.remove		= smb135x_chg_remove,
+	.id_table	= smb135x_chg_id,
+	.shutdown	= smb135x_shutdown,
+};
+
+module_i2c_driver(smb135x_chg_driver);
+
+MODULE_DESCRIPTION("SMB135x Charger");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("i2c:smb135x-charger");
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
new file mode 100644
index 0000000..1e89a721
--- /dev/null
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -0,0 +1,1573 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "SMB138X: %s: " fmt, __func__
+
+#include <linux/device.h>
+#include <linux/iio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include "smb-reg.h"
+#include "smb-lib.h"
+#include "storm-watch.h"
+#include "pmic-voter.h"
+
+#define SMB138X_DEFAULT_FCC_UA 1000000
+#define SMB138X_DEFAULT_ICL_UA 1500000
+
+/* Registers that are not common to be mentioned in smb-reg.h */
+#define SMB2CHG_MISC_ENG_SDCDC_CFG2	(MISC_BASE + 0xC1)
+#define ENG_SDCDC_SEL_OOB_VTH_BIT	BIT(0)
+
+#define SMB2CHG_MISC_ENG_SDCDC_CFG6	(MISC_BASE + 0xC5)
+#define DEAD_TIME_MASK			GENMASK(7, 4)
+#define HIGH_DEAD_TIME_MASK		GENMASK(7, 4)
+
+#define SMB2CHG_DC_TM_SREFGEN		(DCIN_BASE + 0xE2)
+#define STACKED_DIODE_EN_BIT		BIT(2)
+
+#define TDIE_AVG_COUNT	10
+
+enum {
+	OOB_COMP_WA_BIT = BIT(0),
+};
+
+static struct smb_params v1_params = {
+	.fcc		= {
+		.name	= "fast charge current",
+		.reg	= FAST_CHARGE_CURRENT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 6000000,
+		.step_u	= 25000,
+	},
+	.fv		= {
+		.name	= "float voltage",
+		.reg	= FLOAT_VOLTAGE_CFG_REG,
+		.min_u	= 2450000,
+		.max_u	= 4950000,
+		.step_u	= 10000,
+	},
+	.usb_icl	= {
+		.name	= "usb input current limit",
+		.reg	= USBIN_CURRENT_LIMIT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 6000000,
+		.step_u	= 25000,
+	},
+	.dc_icl		= {
+		.name	= "dc input current limit",
+		.reg	= DCIN_CURRENT_LIMIT_CFG_REG,
+		.min_u	= 0,
+		.max_u	= 6000000,
+		.step_u	= 25000,
+	},
+	.freq_buck	= {
+		.name	= "buck switching frequency",
+		.reg	= CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG,
+		.min_u	= 500,
+		.max_u	= 2000,
+		.step_u	= 100,
+	},
+};
+
+struct smb_dt_props {
+	bool	suspend_input;
+	int	fcc_ua;
+	int	usb_icl_ua;
+	int	dc_icl_ua;
+	int	chg_temp_max_mdegc;
+	int	connector_temp_max_mdegc;
+};
+
+struct smb138x {
+	struct smb_charger	chg;
+	struct smb_dt_props	dt;
+	struct power_supply	*parallel_psy;
+	u32			wa_flags;
+};
+
+static int __debug_mask;
+module_param_named(
+	debug_mask, __debug_mask, int, 0600
+);
+
+irqreturn_t smb138x_handle_slave_chg_state_change(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb138x *chip = irq_data->parent_data;
+
+	if (chip->parallel_psy)
+		power_supply_changed(chip->parallel_psy);
+
+	return IRQ_HANDLED;
+}
+
+static int smb138x_get_prop_charger_temp(struct smb138x *chip,
+				 union power_supply_propval *val)
+{
+	union power_supply_propval pval;
+	int rc = 0, avg = 0, i;
+	struct smb_charger *chg = &chip->chg;
+
+	for (i = 0; i < TDIE_AVG_COUNT; i++) {
+		pval.intval = 0;
+		rc = smblib_get_prop_charger_temp(chg, &pval);
+		if (rc < 0) {
+			pr_err("Couldnt read chg temp at %dth iteration rc = %d\n",
+					i + 1, rc);
+			return rc;
+		}
+		avg += pval.intval;
+	}
+	val->intval = avg / TDIE_AVG_COUNT;
+	return rc;
+}
+
+static int smb138x_parse_dt(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct device_node *node = chg->dev->of_node;
+	int rc;
+
+	if (!node) {
+		pr_err("device tree node missing\n");
+		return -EINVAL;
+	}
+
+	chip->dt.suspend_input = of_property_read_bool(node,
+				"qcom,suspend-input");
+
+	rc = of_property_read_u32(node,
+				"qcom,fcc-max-ua", &chip->dt.fcc_ua);
+	if (rc < 0)
+		chip->dt.fcc_ua = SMB138X_DEFAULT_FCC_UA;
+
+	rc = of_property_read_u32(node,
+				"qcom,usb-icl-ua", &chip->dt.usb_icl_ua);
+	if (rc < 0)
+		chip->dt.usb_icl_ua = SMB138X_DEFAULT_ICL_UA;
+
+	rc = of_property_read_u32(node,
+				"qcom,dc-icl-ua", &chip->dt.dc_icl_ua);
+	if (rc < 0)
+		chip->dt.dc_icl_ua = SMB138X_DEFAULT_ICL_UA;
+
+	rc = of_property_read_u32(node,
+				"qcom,charger-temp-max-mdegc",
+				&chip->dt.chg_temp_max_mdegc);
+	if (rc < 0)
+		chip->dt.chg_temp_max_mdegc = 80000;
+
+	rc = of_property_read_u32(node,
+				"qcom,connector-temp-max-mdegc",
+				&chip->dt.connector_temp_max_mdegc);
+	if (rc < 0)
+		chip->dt.connector_temp_max_mdegc = 105000;
+
+	return 0;
+}
+
+/************************
+ * USB PSY REGISTRATION *
+ ************************/
+
+static enum power_supply_property smb138x_usb_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_VOLTAGE_MIN,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
+	POWER_SUPPLY_PROP_TYPE,
+	POWER_SUPPLY_PROP_TYPEC_MODE,
+	POWER_SUPPLY_PROP_TYPEC_POWER_ROLE,
+	POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION,
+};
+
+static int smb138x_usb_get_prop(struct power_supply *psy,
+				enum power_supply_property prop,
+				union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		rc = smblib_get_prop_usb_present(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		rc = smblib_get_prop_usb_online(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+		val->intval = chg->voltage_min_uv;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		val->intval = chg->voltage_max_uv;
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+		rc = smblib_get_prop_usb_voltage_now(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_get_prop_usb_current_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPE:
+		val->intval = chg->usb_psy_desc.type;
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_MODE:
+		rc = smblib_get_prop_typec_mode(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+		rc = smblib_get_prop_typec_power_role(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION:
+		rc = smblib_get_prop_typec_cc_orientation(chg, val);
+		break;
+	default:
+		pr_err("get prop %d is not supported\n", prop);
+		return -EINVAL;
+	}
+
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", prop, rc);
+		return -ENODATA;
+	}
+
+	return rc;
+}
+
+static int smb138x_usb_set_prop(struct power_supply *psy,
+				enum power_supply_property prop,
+				const union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+		rc = smblib_set_prop_usb_voltage_min(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smblib_set_prop_usb_voltage_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_set_prop_usb_current_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+		rc = smblib_set_prop_typec_power_role(chg, val);
+		break;
+	default:
+		pr_err("set prop %d is not supported\n", prop);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int smb138x_usb_prop_is_writeable(struct power_supply *psy,
+					 enum power_supply_property prop)
+{
+	switch (prop) {
+	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+		return 1;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int smb138x_init_usb_psy(struct smb138x *chip)
+{
+	struct power_supply_config usb_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+
+	chg->usb_psy_desc.name = "usb";
+	chg->usb_psy_desc.type = POWER_SUPPLY_TYPE_UNKNOWN;
+	chg->usb_psy_desc.properties = smb138x_usb_props;
+	chg->usb_psy_desc.num_properties = ARRAY_SIZE(smb138x_usb_props);
+	chg->usb_psy_desc.get_property = smb138x_usb_get_prop;
+	chg->usb_psy_desc.set_property = smb138x_usb_set_prop;
+	chg->usb_psy_desc.property_is_writeable = smb138x_usb_prop_is_writeable;
+
+	usb_cfg.drv_data = chip;
+	usb_cfg.of_node = chg->dev->of_node;
+	chg->usb_psy = devm_power_supply_register(chg->dev,
+						  &chg->usb_psy_desc,
+						  &usb_cfg);
+	if (IS_ERR(chg->usb_psy)) {
+		pr_err("Couldn't register USB power supply\n");
+		return PTR_ERR(chg->usb_psy);
+	}
+
+	return 0;
+}
+
+/*************************
+ * BATT PSY REGISTRATION *
+ *************************/
+
+static enum power_supply_property smb138x_batt_props[] = {
+	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_CHARGER_TEMP,
+	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+	POWER_SUPPLY_PROP_SET_SHIP_MODE,
+};
+
+static int smb138x_batt_get_prop(struct power_supply *psy,
+				 enum power_supply_property prop,
+				 union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_STATUS:
+		rc = smblib_get_prop_batt_status(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_HEALTH:
+		rc = smblib_get_prop_batt_health(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		rc = smblib_get_prop_batt_present(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smblib_get_prop_input_suspend(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		rc = smblib_get_prop_batt_charge_type(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		rc = smblib_get_prop_batt_capacity(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP:
+		rc = smb138x_get_prop_charger_temp(chip, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
+		rc = smblib_get_prop_charger_temp_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		/* Not in ship mode as long as device is active */
+		val->intval = 0;
+		break;
+	default:
+		pr_err("batt power supply get prop %d not supported\n", prop);
+		return -EINVAL;
+	}
+
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", prop, rc);
+		return -ENODATA;
+	}
+
+	return rc;
+}
+
+static int smb138x_batt_set_prop(struct power_supply *psy,
+				 enum power_supply_property prop,
+				 const union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smblib_set_prop_input_suspend(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CAPACITY:
+		rc = smblib_set_prop_batt_capacity(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		/* Not in ship mode as long as the device is active */
+		if (!val->intval)
+			break;
+		rc = smblib_set_prop_ship_mode(chg, val);
+		break;
+	default:
+		pr_err("batt power supply set prop %d not supported\n", prop);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int smb138x_batt_prop_is_writeable(struct power_supply *psy,
+					  enum power_supply_property prop)
+{
+	switch (prop) {
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+	case POWER_SUPPLY_PROP_CAPACITY:
+		return 1;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static const struct power_supply_desc batt_psy_desc = {
+	.name			= "battery",
+	.type			= POWER_SUPPLY_TYPE_BATTERY,
+	.properties		= smb138x_batt_props,
+	.num_properties		= ARRAY_SIZE(smb138x_batt_props),
+	.get_property		= smb138x_batt_get_prop,
+	.set_property		= smb138x_batt_set_prop,
+	.property_is_writeable	= smb138x_batt_prop_is_writeable,
+};
+
+static int smb138x_init_batt_psy(struct smb138x *chip)
+{
+	struct power_supply_config batt_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	batt_cfg.drv_data = chip;
+	batt_cfg.of_node = chg->dev->of_node;
+	chg->batt_psy = devm_power_supply_register(chg->dev,
+						   &batt_psy_desc,
+						   &batt_cfg);
+	if (IS_ERR(chg->batt_psy)) {
+		pr_err("Couldn't register battery power supply\n");
+		return PTR_ERR(chg->batt_psy);
+	}
+
+	return rc;
+}
+
+/*****************************
+ * PARALLEL PSY REGISTRATION *
+ *****************************/
+
+static int smb138x_get_prop_connector_health(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc, lb_mdegc, ub_mdegc, rst_mdegc, connector_mdegc;
+
+	if (!chg->iio.connector_temp_chan ||
+		PTR_ERR(chg->iio.connector_temp_chan) == -EPROBE_DEFER)
+		chg->iio.connector_temp_chan = iio_channel_get(chg->dev,
+							"connector_temp");
+
+	if (IS_ERR(chg->iio.connector_temp_chan))
+		return POWER_SUPPLY_HEALTH_UNKNOWN;
+
+	rc = iio_read_channel_processed(chg->iio.connector_temp_thr1_chan,
+							&lb_mdegc);
+	if (rc < 0) {
+		pr_err("Couldn't read connector lower bound rc=%d\n", rc);
+		return POWER_SUPPLY_HEALTH_UNKNOWN;
+	}
+
+	rc = iio_read_channel_processed(chg->iio.connector_temp_thr2_chan,
+							&ub_mdegc);
+	if (rc < 0) {
+		pr_err("Couldn't read connector upper bound rc=%d\n", rc);
+		return POWER_SUPPLY_HEALTH_UNKNOWN;
+	}
+
+	rc = iio_read_channel_processed(chg->iio.connector_temp_thr3_chan,
+							&rst_mdegc);
+	if (rc < 0) {
+		pr_err("Couldn't read connector reset bound rc=%d\n", rc);
+		return POWER_SUPPLY_HEALTH_UNKNOWN;
+	}
+
+	rc = iio_read_channel_processed(chg->iio.connector_temp_chan,
+							&connector_mdegc);
+	if (rc < 0) {
+		pr_err("Couldn't read connector temperature rc=%d\n", rc);
+		return POWER_SUPPLY_HEALTH_UNKNOWN;
+	}
+
+	if (connector_mdegc < lb_mdegc)
+		return POWER_SUPPLY_HEALTH_COOL;
+	else if (connector_mdegc < ub_mdegc)
+		return POWER_SUPPLY_HEALTH_WARM;
+	else if (connector_mdegc < rst_mdegc)
+		return POWER_SUPPLY_HEALTH_HOT;
+
+	return POWER_SUPPLY_HEALTH_OVERHEAT;
+}
+
+static enum power_supply_property smb138x_parallel_props[] = {
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
+	POWER_SUPPLY_PROP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_PIN_ENABLED,
+	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CHARGER_TEMP,
+	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+	POWER_SUPPLY_PROP_MODEL_NAME,
+	POWER_SUPPLY_PROP_PARALLEL_MODE,
+	POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
+	POWER_SUPPLY_PROP_SET_SHIP_MODE,
+};
+
+static int smb138x_parallel_get_prop(struct power_supply *psy,
+				     enum power_supply_property prop,
+				     union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+	u8 temp;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CHARGE_TYPE:
+		rc = smblib_get_prop_batt_charge_type(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+		rc = smblib_read(chg, BATTERY_CHARGER_STATUS_5_REG,
+				 &temp);
+		if (rc >= 0)
+			val->intval = (bool)(temp & CHARGING_ENABLE_BIT);
+		break;
+	case POWER_SUPPLY_PROP_PIN_ENABLED:
+		rc = smblib_read(chg, BATTERY_CHARGER_STATUS_5_REG,
+				 &temp);
+		if (rc >= 0)
+			val->intval = !(temp & DISABLE_CHARGING_BIT);
+		break;
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smblib_get_usb_suspend(chg, &val->intval);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		rc = smblib_get_charge_param(chg, &chg->param.fcc,
+					     &val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_NOW:
+		rc = smblib_get_prop_slave_current_now(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP:
+		rc = smb138x_get_prop_charger_temp(chip, val);
+		break;
+	case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
+		rc = smblib_get_prop_charger_temp_max(chg, val);
+		break;
+	case POWER_SUPPLY_PROP_MODEL_NAME:
+		val->strval = "smb138x";
+		break;
+	case POWER_SUPPLY_PROP_PARALLEL_MODE:
+		val->intval = POWER_SUPPLY_PL_USBMID_USBMID;
+		break;
+	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
+		val->intval = smb138x_get_prop_connector_health(chip);
+		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		/* Not in ship mode as long as device is active */
+		val->intval = 0;
+		break;
+	default:
+		pr_err("parallel power supply get prop %d not supported\n",
+			prop);
+		return -EINVAL;
+	}
+
+	if (rc < 0) {
+		pr_debug("Couldn't get prop %d rc = %d\n", prop, rc);
+		return -ENODATA;
+	}
+
+	return rc;
+}
+
+static int smb138x_set_parallel_suspend(struct smb138x *chip, bool suspend)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	rc = smblib_masked_write(chg, WD_CFG_REG, WDOG_TIMER_EN_BIT,
+				 suspend ? 0 : WDOG_TIMER_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't %s watchdog rc=%d\n",
+		       suspend ? "disable" : "enable", rc);
+		suspend = true;
+	}
+
+	rc = smblib_masked_write(chg, USBIN_CMD_IL_REG, USBIN_SUSPEND_BIT,
+				 suspend ? USBIN_SUSPEND_BIT : 0);
+	if (rc < 0) {
+		pr_err("Couldn't %s parallel charger rc=%d\n",
+		       suspend ? "suspend" : "resume", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int smb138x_parallel_set_prop(struct power_supply *psy,
+				     enum power_supply_property prop,
+				     const union power_supply_propval *val)
+{
+	struct smb138x *chip = power_supply_get_drvdata(psy);
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	switch (prop) {
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		rc = smb138x_set_parallel_suspend(chip, (bool)val->intval);
+		break;
+	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+		rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+		rc = smblib_set_charge_param(chg, &chg->param.fcc, val->intval);
+		break;
+	case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+		/* Not in ship mode as long as the device is active */
+		if (!val->intval)
+			break;
+		rc = smblib_set_prop_ship_mode(chg, val);
+		break;
+	default:
+		pr_debug("parallel power supply set prop %d not supported\n",
+			prop);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int smb138x_parallel_prop_is_writeable(struct power_supply *psy,
+					      enum power_supply_property prop)
+{
+	return 0;
+}
+
+static const struct power_supply_desc parallel_psy_desc = {
+	.name			= "parallel",
+	.type			= POWER_SUPPLY_TYPE_PARALLEL,
+	.properties		= smb138x_parallel_props,
+	.num_properties		= ARRAY_SIZE(smb138x_parallel_props),
+	.get_property		= smb138x_parallel_get_prop,
+	.set_property		= smb138x_parallel_set_prop,
+	.property_is_writeable	= smb138x_parallel_prop_is_writeable,
+};
+
+static int smb138x_init_parallel_psy(struct smb138x *chip)
+{
+	struct power_supply_config parallel_cfg = {};
+	struct smb_charger *chg = &chip->chg;
+
+	parallel_cfg.drv_data = chip;
+	parallel_cfg.of_node = chg->dev->of_node;
+	chip->parallel_psy = devm_power_supply_register(chg->dev,
+						   &parallel_psy_desc,
+						   &parallel_cfg);
+	if (IS_ERR(chip->parallel_psy)) {
+		pr_err("Couldn't register parallel power supply\n");
+		return PTR_ERR(chip->parallel_psy);
+	}
+
+	return 0;
+}
+
+/******************************
+ * VBUS REGULATOR REGISTRATION *
+ ******************************/
+
+struct regulator_ops smb138x_vbus_reg_ops = {
+	.enable		= smblib_vbus_regulator_enable,
+	.disable	= smblib_vbus_regulator_disable,
+	.is_enabled	= smblib_vbus_regulator_is_enabled,
+};
+
+static int smb138x_init_vbus_regulator(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct regulator_config cfg = {};
+	int rc = 0;
+
+	chg->vbus_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vbus_vreg),
+				      GFP_KERNEL);
+	if (!chg->vbus_vreg)
+		return -ENOMEM;
+
+	cfg.dev = chg->dev;
+	cfg.driver_data = chip;
+
+	chg->vbus_vreg->rdesc.owner = THIS_MODULE;
+	chg->vbus_vreg->rdesc.type = REGULATOR_VOLTAGE;
+	chg->vbus_vreg->rdesc.ops = &smb138x_vbus_reg_ops;
+	chg->vbus_vreg->rdesc.of_match = "qcom,smb138x-vbus";
+	chg->vbus_vreg->rdesc.name = "qcom,smb138x-vbus";
+
+	chg->vbus_vreg->rdev = devm_regulator_register(chg->dev,
+						&chg->vbus_vreg->rdesc, &cfg);
+	if (IS_ERR(chg->vbus_vreg->rdev)) {
+		rc = PTR_ERR(chg->vbus_vreg->rdev);
+		chg->vbus_vreg->rdev = NULL;
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't register VBUS regualtor rc=%d\n", rc);
+	}
+
+	return rc;
+}
+
+/******************************
+ * VCONN REGULATOR REGISTRATION *
+ ******************************/
+
+struct regulator_ops smb138x_vconn_reg_ops = {
+	.enable		= smblib_vconn_regulator_enable,
+	.disable	= smblib_vconn_regulator_disable,
+	.is_enabled	= smblib_vconn_regulator_is_enabled,
+};
+
+static int smb138x_init_vconn_regulator(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct regulator_config cfg = {};
+	int rc = 0;
+
+	chg->vconn_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vconn_vreg),
+				      GFP_KERNEL);
+	if (!chg->vconn_vreg)
+		return -ENOMEM;
+
+	cfg.dev = chg->dev;
+	cfg.driver_data = chip;
+
+	chg->vconn_vreg->rdesc.owner = THIS_MODULE;
+	chg->vconn_vreg->rdesc.type = REGULATOR_VOLTAGE;
+	chg->vconn_vreg->rdesc.ops = &smb138x_vconn_reg_ops;
+	chg->vconn_vreg->rdesc.of_match = "qcom,smb138x-vconn";
+	chg->vconn_vreg->rdesc.name = "qcom,smb138x-vconn";
+
+	chg->vconn_vreg->rdev = devm_regulator_register(chg->dev,
+						&chg->vconn_vreg->rdesc, &cfg);
+	if (IS_ERR(chg->vconn_vreg->rdev)) {
+		rc = PTR_ERR(chg->vconn_vreg->rdev);
+		chg->vconn_vreg->rdev = NULL;
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't register VCONN regualtor rc=%d\n", rc);
+	}
+
+	return rc;
+}
+
+/***************************
+ * HARDWARE INITIALIZATION *
+ ***************************/
+
+#define MDEGC_3		3000
+#define MDEGC_15	15000
+static int smb138x_init_slave_hw(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc;
+
+	if (chip->wa_flags & OOB_COMP_WA_BIT) {
+		rc = smblib_masked_write(chg, SMB2CHG_MISC_ENG_SDCDC_CFG2,
+					ENG_SDCDC_SEL_OOB_VTH_BIT,
+					ENG_SDCDC_SEL_OOB_VTH_BIT);
+		if (rc < 0) {
+			pr_err("Couldn't configure the OOB comp threshold rc = %d\n",
+									rc);
+			return rc;
+		}
+
+		rc = smblib_masked_write(chg, SMB2CHG_MISC_ENG_SDCDC_CFG6,
+				DEAD_TIME_MASK, HIGH_DEAD_TIME_MASK);
+		if (rc < 0) {
+			pr_err("Couldn't configure the sdcdc cfg 6 reg rc = %d\n",
+									rc);
+			return rc;
+		}
+	}
+
+	/* enable watchdog bark and bite interrupts, and disable the watchdog */
+	rc = smblib_masked_write(chg, WD_CFG_REG, WDOG_TIMER_EN_BIT
+			| WDOG_TIMER_EN_ON_PLUGIN_BIT | BITE_WDOG_INT_EN_BIT
+			| BARK_WDOG_INT_EN_BIT,
+			BITE_WDOG_INT_EN_BIT | BARK_WDOG_INT_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure the watchdog rc=%d\n", rc);
+		return rc;
+	}
+
+	/* disable charging when watchdog bites */
+	rc = smblib_masked_write(chg, SNARL_BARK_BITE_WD_CFG_REG,
+				 BITE_WDOG_DISABLE_CHARGING_CFG_BIT,
+				 BITE_WDOG_DISABLE_CHARGING_CFG_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure the watchdog bite rc=%d\n", rc);
+		return rc;
+	}
+
+	/* suspend parallel charging */
+	rc = smb138x_set_parallel_suspend(chip, true);
+	if (rc < 0) {
+		pr_err("Couldn't suspend parallel charging rc=%d\n", rc);
+		return rc;
+	}
+
+	/* initialize FCC to 0 */
+	rc = smblib_set_charge_param(chg, &chg->param.fcc, 0);
+	if (rc < 0) {
+		pr_err("Couldn't set 0 FCC rc=%d\n", rc);
+		return rc;
+	}
+
+	/* enable the charging path */
+	rc = smblib_masked_write(chg, CHARGING_ENABLE_CMD_REG,
+				 CHARGING_ENABLE_CMD_BIT,
+				 CHARGING_ENABLE_CMD_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't enable charging rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure charge enable for software control; active high */
+	rc = smblib_masked_write(chg, CHGR_CFG2_REG,
+				 CHG_EN_POLARITY_BIT | CHG_EN_SRC_BIT, 0);
+	if (rc < 0) {
+		pr_err("Couldn't configure charge enable source rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/* enable parallel current sensing */
+	rc = smblib_masked_write(chg, CFG_REG,
+				 VCHG_EN_CFG_BIT, VCHG_EN_CFG_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't enable parallel current sensing rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	/* enable stacked diode */
+	rc = smblib_write(chg, SMB2CHG_DC_TM_SREFGEN, STACKED_DIODE_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't enable stacked diode rc=%d\n", rc);
+		return rc;
+	}
+
+	/* initialize charger temperature threshold */
+	rc = iio_write_channel_processed(chg->iio.temp_max_chan,
+					chip->dt.chg_temp_max_mdegc);
+	if (rc < 0) {
+		pr_err("Couldn't set charger temp threshold rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = iio_write_channel_processed(chg->iio.connector_temp_thr1_chan,
+				chip->dt.connector_temp_max_mdegc);
+	if (rc < 0) {
+		pr_err("Couldn't set connector temp threshold1 rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = iio_write_channel_processed(chg->iio.connector_temp_thr2_chan,
+				chip->dt.connector_temp_max_mdegc + MDEGC_3);
+	if (rc < 0) {
+		pr_err("Couldn't set connector temp threshold2 rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = iio_write_channel_processed(chg->iio.connector_temp_thr3_chan,
+				chip->dt.connector_temp_max_mdegc + MDEGC_15);
+	if (rc < 0) {
+		pr_err("Couldn't set connector temp threshold3 rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smblib_write(chg, THERMREG_SRC_CFG_REG,
+						THERMREG_SKIN_ADC_SRC_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't enable connector thermreg source rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int smb138x_init_hw(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	/* votes must be cast before configuring software control */
+	vote(chg->dc_suspend_votable,
+		DEFAULT_VOTER, chip->dt.suspend_input, 0);
+	vote(chg->fcc_votable,
+		DEFAULT_VOTER, true, chip->dt.fcc_ua);
+	vote(chg->usb_icl_votable,
+		DCP_VOTER, true, chip->dt.usb_icl_ua);
+	vote(chg->dc_icl_votable,
+		DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
+
+	chg->dcp_icl_ua = chip->dt.usb_icl_ua;
+
+	/* configure to a fixed 700khz freq to avoid tdie errors */
+	rc = smblib_set_charge_param(chg, &chg->param.freq_buck, 700);
+	if (rc < 0) {
+		pr_err("Couldn't configure 700Khz switch freq rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure charge enable for software control; active high */
+	rc = smblib_masked_write(chg, CHGR_CFG2_REG,
+				 CHG_EN_POLARITY_BIT | CHG_EN_SRC_BIT, 0);
+	if (rc < 0) {
+		pr_err("Couldn't configure charge enable source rc=%d\n", rc);
+		return rc;
+	}
+
+	/* enable the charging path */
+	rc = vote(chg->chg_disable_votable, DEFAULT_VOTER, false, 0);
+	if (rc < 0) {
+		pr_err("Couldn't enable charging rc=%d\n", rc);
+		return rc;
+	}
+
+	/*
+	 * trigger the usb-typec-change interrupt only when the CC state
+	 * changes, or there was a VBUS error
+	 */
+	rc = smblib_write(chg, TYPE_C_INTRPT_ENB_REG,
+			    TYPEC_CCSTATE_CHANGE_INT_EN_BIT
+			  | TYPEC_VBUS_ERROR_INT_EN_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure Type-C interrupts rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure VCONN for software control */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 VCONN_EN_SRC_BIT | VCONN_EN_VALUE_BIT,
+				 VCONN_EN_SRC_BIT);
+	if (rc < 0) {
+		pr_err("Couldn't configure VCONN for SW control rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure VBUS for software control */
+	rc = smblib_masked_write(chg, OTG_CFG_REG, OTG_EN_SRC_CFG_BIT, 0);
+	if (rc < 0) {
+		pr_err("Couldn't configure VBUS for SW control rc=%d\n", rc);
+		return rc;
+	}
+
+	/* configure power role for dual-role */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 TYPEC_POWER_ROLE_CMD_MASK, 0);
+	if (rc < 0) {
+		pr_err("Couldn't configure power role for DRP rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chip->wa_flags & OOB_COMP_WA_BIT) {
+		rc = smblib_masked_write(chg, SMB2CHG_MISC_ENG_SDCDC_CFG2,
+					ENG_SDCDC_SEL_OOB_VTH_BIT,
+					ENG_SDCDC_SEL_OOB_VTH_BIT);
+		if (rc < 0) {
+			pr_err("Couldn't configure the OOB comp threshold rc = %d\n",
+									rc);
+			return rc;
+		}
+
+		rc = smblib_masked_write(chg, SMB2CHG_MISC_ENG_SDCDC_CFG6,
+				DEAD_TIME_MASK, HIGH_DEAD_TIME_MASK);
+		if (rc < 0) {
+			pr_err("Couldn't configure the sdcdc cfg 6 reg rc = %d\n",
+									rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int smb138x_setup_wa_flags(struct smb138x *chip)
+{
+	struct pmic_revid_data *pmic_rev_id;
+	struct device_node *revid_dev_node;
+
+	revid_dev_node = of_parse_phandle(chip->chg.dev->of_node,
+					"qcom,pmic-revid", 0);
+	if (!revid_dev_node) {
+		pr_err("Missing qcom,pmic-revid property\n");
+		return -EINVAL;
+	}
+
+	pmic_rev_id = get_revid_data(revid_dev_node);
+	if (IS_ERR_OR_NULL(pmic_rev_id)) {
+		/*
+		 * the revid peripheral must be registered, any failure
+		 * here only indicates that the rev-id module has not
+		 * probed yet.
+		 */
+		return -EPROBE_DEFER;
+	}
+
+	switch (pmic_rev_id->pmic_subtype) {
+	case SMB1381_SUBTYPE:
+		if (pmic_rev_id->rev4 < 2) /* SMB1381 rev 1.0 */
+			chip->wa_flags |= OOB_COMP_WA_BIT;
+		break;
+	default:
+		pr_err("PMIC subtype %d not supported\n",
+				pmic_rev_id->pmic_subtype);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/****************************
+ * DETERMINE INITIAL STATUS *
+ ****************************/
+
+static irqreturn_t smb138x_handle_temperature_change(int irq, void *data)
+{
+	struct smb_irq_data *irq_data = data;
+	struct smb138x *chip = irq_data->parent_data;
+
+	power_supply_changed(chip->parallel_psy);
+	return IRQ_HANDLED;
+}
+
+static int smb138x_determine_initial_slave_status(struct smb138x *chip)
+{
+	struct smb_irq_data irq_data = {chip, "determine-initial-status"};
+
+	smb138x_handle_temperature_change(0, &irq_data);
+	return 0;
+}
+
+static int smb138x_determine_initial_status(struct smb138x *chip)
+{
+	struct smb_irq_data irq_data = {chip, "determine-initial-status"};
+
+	smblib_handle_usb_plugin(0, &irq_data);
+	smblib_handle_usb_typec_change(0, &irq_data);
+	smblib_handle_usb_source_change(0, &irq_data);
+	return 0;
+}
+
+/**************************
+ * INTERRUPT REGISTRATION *
+ **************************/
+
+static struct smb_irq_info smb138x_irqs[] = {
+/* CHARGER IRQs */
+	[CHG_ERROR_IRQ] = {
+		.name		= "chg-error",
+		.handler	= smblib_handle_debug,
+	},
+	[CHG_STATE_CHANGE_IRQ] = {
+		.name		= "chg-state-change",
+		.handler	= smb138x_handle_slave_chg_state_change,
+		.wake		= true,
+	},
+	[STEP_CHG_STATE_CHANGE_IRQ] = {
+		.name		= "step-chg-state-change",
+		.handler	= smblib_handle_debug,
+	},
+	[STEP_CHG_SOC_UPDATE_FAIL_IRQ] = {
+		.name		= "step-chg-soc-update-fail",
+		.handler	= smblib_handle_debug,
+	},
+	[STEP_CHG_SOC_UPDATE_REQ_IRQ] = {
+		.name		= "step-chg-soc-update-request",
+		.handler	= smblib_handle_debug,
+	},
+/* OTG IRQs */
+	[OTG_FAIL_IRQ] = {
+		.name		= "otg-fail",
+		.handler	= smblib_handle_debug,
+	},
+	[OTG_OVERCURRENT_IRQ] = {
+		.name		= "otg-overcurrent",
+		.handler	= smblib_handle_debug,
+	},
+	[OTG_OC_DIS_SW_STS_IRQ] = {
+		.name		= "otg-oc-dis-sw-sts",
+		.handler	= smblib_handle_debug,
+	},
+	[TESTMODE_CHANGE_DET_IRQ] = {
+		.name		= "testmode-change-detect",
+		.handler	= smblib_handle_debug,
+	},
+/* BATTERY IRQs */
+	[BATT_TEMP_IRQ] = {
+		.name		= "bat-temp",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_OCP_IRQ] = {
+		.name		= "bat-ocp",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_OV_IRQ] = {
+		.name		= "bat-ov",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_LOW_IRQ] = {
+		.name		= "bat-low",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_THERM_ID_MISS_IRQ] = {
+		.name		= "bat-therm-or-id-missing",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+	[BATT_TERM_MISS_IRQ] = {
+		.name		= "bat-terminal-missing",
+		.handler	= smblib_handle_batt_psy_changed,
+	},
+/* USB INPUT IRQs */
+	[USBIN_COLLAPSE_IRQ] = {
+		.name		= "usbin-collapse",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_LT_3P6V_IRQ] = {
+		.name		= "usbin-lt-3p6v",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_UV_IRQ] = {
+		.name		= "usbin-uv",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_OV_IRQ] = {
+		.name		= "usbin-ov",
+		.handler	= smblib_handle_debug,
+	},
+	[USBIN_PLUGIN_IRQ] = {
+		.name		= "usbin-plugin",
+		.handler	= smblib_handle_usb_plugin,
+	},
+	[USBIN_SRC_CHANGE_IRQ] = {
+		.name		= "usbin-src-change",
+		.handler	= smblib_handle_usb_source_change,
+	},
+	[USBIN_ICL_CHANGE_IRQ] = {
+		.name		= "usbin-icl-change",
+		.handler	= smblib_handle_debug,
+	},
+	[TYPE_C_CHANGE_IRQ] = {
+		.name		= "type-c-change",
+		.handler	= smblib_handle_usb_typec_change,
+	},
+/* DC INPUT IRQs */
+	[DCIN_COLLAPSE_IRQ] = {
+		.name		= "dcin-collapse",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_LT_3P6V_IRQ] = {
+		.name		= "dcin-lt-3p6v",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_UV_IRQ] = {
+		.name		= "dcin-uv",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_OV_IRQ] = {
+		.name		= "dcin-ov",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_PLUGIN_IRQ] = {
+		.name		= "dcin-plugin",
+		.handler	= smblib_handle_debug,
+	},
+	[DIV2_EN_DG_IRQ] = {
+		.name		= "div2-en-dg",
+		.handler	= smblib_handle_debug,
+	},
+	[DCIN_ICL_CHANGE_IRQ] = {
+		.name		= "dcin-icl-change",
+		.handler	= smblib_handle_debug,
+	},
+/* MISCELLANEOUS IRQs */
+	[WDOG_SNARL_IRQ] = {
+		.name		= "wdog-snarl",
+		.handler	= smblib_handle_debug,
+	},
+	[WDOG_BARK_IRQ] = {
+		.name		= "wdog-bark",
+		.handler	= smblib_handle_wdog_bark,
+		.wake		= true,
+	},
+	[AICL_FAIL_IRQ] = {
+		.name		= "aicl-fail",
+		.handler	= smblib_handle_debug,
+	},
+	[AICL_DONE_IRQ] = {
+		.name		= "aicl-done",
+		.handler	= smblib_handle_debug,
+	},
+	[HIGH_DUTY_CYCLE_IRQ] = {
+		.name		= "high-duty-cycle",
+		.handler	= smblib_handle_debug,
+	},
+	[INPUT_CURRENT_LIMIT_IRQ] = {
+		.name		= "input-current-limiting",
+		.handler	= smblib_handle_debug,
+	},
+	[TEMPERATURE_CHANGE_IRQ] = {
+		.name		= "temperature-change",
+		.handler	= smb138x_handle_temperature_change,
+	},
+	[SWITCH_POWER_OK_IRQ] = {
+		.name		= "switcher-power-ok",
+		.handler	= smblib_handle_debug,
+	},
+};
+
+static int smb138x_get_irq_index_byname(const char *irq_name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(smb138x_irqs); i++) {
+		if (strcmp(smb138x_irqs[i].name, irq_name) == 0)
+			return i;
+	}
+
+	return -ENOENT;
+}
+
+static int smb138x_request_interrupt(struct smb138x *chip,
+				     struct device_node *node,
+				     const char *irq_name)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0, irq, irq_index;
+	struct smb_irq_data *irq_data;
+
+	irq = of_irq_get_byname(node, irq_name);
+	if (irq < 0) {
+		pr_err("Couldn't get irq %s byname\n", irq_name);
+		return irq;
+	}
+
+	irq_index = smb138x_get_irq_index_byname(irq_name);
+	if (irq_index < 0) {
+		pr_err("%s is not a defined irq\n", irq_name);
+		return irq_index;
+	}
+
+	if (!smb138x_irqs[irq_index].handler)
+		return 0;
+
+	irq_data = devm_kzalloc(chg->dev, sizeof(*irq_data), GFP_KERNEL);
+	if (!irq_data)
+		return -ENOMEM;
+
+	irq_data->parent_data = chip;
+	irq_data->name = irq_name;
+	irq_data->storm_data = smb138x_irqs[irq_index].storm_data;
+	mutex_init(&irq_data->storm_data.storm_lock);
+
+	rc = devm_request_threaded_irq(chg->dev, irq, NULL,
+					smb138x_irqs[irq_index].handler,
+					IRQF_ONESHOT, irq_name, irq_data);
+	if (rc < 0) {
+		pr_err("Couldn't request irq %d\n", irq);
+		return rc;
+	}
+
+	if (smb138x_irqs[irq_index].wake)
+		enable_irq_wake(irq);
+
+	return rc;
+}
+
+static int smb138x_request_interrupts(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	struct device_node *node = chg->dev->of_node;
+	struct device_node *child;
+	int rc = 0;
+	const char *name;
+	struct property *prop;
+
+	for_each_available_child_of_node(node, child) {
+		of_property_for_each_string(child, "interrupt-names",
+					    prop, name) {
+			rc = smb138x_request_interrupt(chip, child, name);
+			if (rc < 0) {
+				pr_err("Couldn't request interrupt %s rc=%d\n",
+				       name, rc);
+				return rc;
+			}
+		}
+	}
+
+	return rc;
+}
+
+/*********
+ * PROBE *
+ *********/
+
+static int smb138x_master_probe(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	chg->param = v1_params;
+
+	rc = smblib_init(chg);
+	if (rc < 0) {
+		pr_err("Couldn't initialize smblib rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb138x_parse_dt(chip);
+	if (rc < 0) {
+		pr_err("Couldn't parse device tree rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb138x_init_vbus_regulator(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize vbus regulator rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = smb138x_init_vconn_regulator(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize vconn regulator rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = smb138x_init_usb_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize usb psy rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb138x_init_batt_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize batt psy rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb138x_init_hw(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize hardware rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = smb138x_determine_initial_status(chip);
+	if (rc < 0) {
+		pr_err("Couldn't determine initial status rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = smb138x_request_interrupts(chip);
+	if (rc < 0) {
+		pr_err("Couldn't request interrupts rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int smb138x_slave_probe(struct smb138x *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+	int rc = 0;
+
+	chg->param = v1_params;
+
+	rc = smblib_init(chg);
+	if (rc < 0) {
+		pr_err("Couldn't initialize smblib rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	chg->iio.temp_max_chan = iio_channel_get(chg->dev, "charger_temp_max");
+	if (IS_ERR(chg->iio.temp_max_chan)) {
+		rc = PTR_ERR(chg->iio.temp_max_chan);
+		goto cleanup;
+	}
+
+	chg->iio.connector_temp_thr1_chan = iio_channel_get(chg->dev,
+							"connector_temp_thr1");
+	if (IS_ERR(chg->iio.connector_temp_thr1_chan)) {
+		rc = PTR_ERR(chg->iio.connector_temp_thr1_chan);
+		goto cleanup;
+	}
+
+	chg->iio.connector_temp_thr2_chan = iio_channel_get(chg->dev,
+							"connector_temp_thr2");
+	if (IS_ERR(chg->iio.connector_temp_thr2_chan)) {
+		rc = PTR_ERR(chg->iio.connector_temp_thr2_chan);
+		goto cleanup;
+	}
+
+	chg->iio.connector_temp_thr3_chan = iio_channel_get(chg->dev,
+							"connector_temp_thr3");
+	if (IS_ERR(chg->iio.connector_temp_thr3_chan)) {
+		rc = PTR_ERR(chg->iio.connector_temp_thr3_chan);
+		goto cleanup;
+	}
+
+	rc = smb138x_parse_dt(chip);
+	if (rc < 0) {
+		pr_err("Couldn't parse device tree rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb138x_init_slave_hw(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize hardware rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb138x_init_parallel_psy(chip);
+	if (rc < 0) {
+		pr_err("Couldn't initialize parallel psy rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb138x_determine_initial_slave_status(chip);
+	if (rc < 0) {
+		pr_err("Couldn't determine initial status rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	rc = smb138x_request_interrupts(chip);
+	if (rc < 0) {
+		pr_err("Couldn't request interrupts rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	return rc;
+
+cleanup:
+	smblib_deinit(chg);
+	if (chip->parallel_psy)
+		power_supply_unregister(chip->parallel_psy);
+	return rc;
+}
+
+static const struct of_device_id match_table[] = {
+	{
+		.compatible = "qcom,smb138x-charger",
+		.data = (void *) PARALLEL_MASTER
+	},
+	{
+		.compatible = "qcom,smb138x-parallel-slave",
+		.data = (void *) PARALLEL_SLAVE
+	},
+	{ },
+};
+
+static int smb138x_probe(struct platform_device *pdev)
+{
+	struct smb138x *chip;
+	const struct of_device_id *id;
+	int rc = 0;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->chg.dev = &pdev->dev;
+	chip->chg.debug_mask = &__debug_mask;
+	chip->chg.irq_info = smb138x_irqs;
+	chip->chg.name = "SMB";
+
+	chip->chg.regmap = dev_get_regmap(chip->chg.dev->parent, NULL);
+	if (!chip->chg.regmap) {
+		pr_err("parent regmap is missing\n");
+		return -EINVAL;
+	}
+
+	id = of_match_device(of_match_ptr(match_table), chip->chg.dev);
+	if (!id) {
+		pr_err("Couldn't find a matching device\n");
+		return -ENODEV;
+	}
+
+	platform_set_drvdata(pdev, chip);
+
+	rc = smb138x_setup_wa_flags(chip);
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't setup wa flags rc = %d\n", rc);
+		return rc;
+	}
+
+	chip->chg.mode = (enum smb_mode) id->data;
+	switch (chip->chg.mode) {
+	case PARALLEL_MASTER:
+		rc = smb138x_master_probe(chip);
+		break;
+	case PARALLEL_SLAVE:
+		rc = smb138x_slave_probe(chip);
+		break;
+	default:
+		pr_err("Couldn't find a matching mode %d\n", chip->chg.mode);
+		rc = -EINVAL;
+		goto cleanup;
+	}
+
+	if (rc < 0) {
+		if (rc != -EPROBE_DEFER)
+			pr_err("Couldn't probe SMB138X rc=%d\n", rc);
+		goto cleanup;
+	}
+
+	pr_info("SMB138X probed successfully mode=%d\n", chip->chg.mode);
+	return rc;
+
+cleanup:
+	platform_set_drvdata(pdev, NULL);
+	return rc;
+}
+
+static int smb138x_remove(struct platform_device *pdev)
+{
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static struct platform_driver smb138x_driver = {
+	.driver	= {
+		.name		= "qcom,smb138x-charger",
+		.owner		= THIS_MODULE,
+		.of_match_table	= match_table,
+	},
+	.probe	= smb138x_probe,
+	.remove	= smb138x_remove,
+};
+module_platform_driver(smb138x_driver);
+
+MODULE_DESCRIPTION("QPNP SMB138X Charger Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/qcom/storm-watch.c b/drivers/power/supply/qcom/storm-watch.c
new file mode 100644
index 0000000..5275079
--- /dev/null
+++ b/drivers/power/supply/qcom/storm-watch.c
@@ -0,0 +1,66 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "storm-watch.h"
+
+/**
+ * is_storming(): Check if an event is storming
+ *
+ * @data: Data for tracking an event storm
+ *
+ * The return value will be true if a storm has been detected and
+ * false if a storm was not detected.
+ */
+bool is_storming(struct storm_watch *data)
+{
+	ktime_t curr_kt, delta_kt;
+	bool is_storming = false;
+
+	if (!data)
+		return false;
+
+	if (!data->enabled)
+		return false;
+
+	/* max storm count must be greater than 0 */
+	if (data->max_storm_count <= 0)
+		return false;
+
+	/* the period threshold must be greater than 0ms */
+	if (data->storm_period_ms <= 0)
+		return false;
+
+	mutex_lock(&data->storm_lock);
+	curr_kt = ktime_get_boottime();
+	delta_kt = ktime_sub(curr_kt, data->last_kt);
+
+	if (ktime_to_ms(delta_kt) < data->storm_period_ms)
+		data->storm_count++;
+	else
+		data->storm_count = 0;
+
+	if (data->storm_count > data->max_storm_count) {
+		is_storming = true;
+		data->storm_count = 0;
+	}
+
+	data->last_kt = curr_kt;
+	mutex_unlock(&data->storm_lock);
+	return is_storming;
+}
+
+void reset_storm_count(struct storm_watch *data)
+{
+	mutex_lock(&data->storm_lock);
+	data->storm_count = 0;
+	mutex_unlock(&data->storm_lock);
+}
diff --git a/drivers/power/supply/qcom/storm-watch.h b/drivers/power/supply/qcom/storm-watch.h
new file mode 100644
index 0000000..ff05c4a
--- /dev/null
+++ b/drivers/power/supply/qcom/storm-watch.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __STORM_WATCH_H
+#define __STORM_WATCH_H
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+
+/**
+ * Data used to track an event storm.
+ *
+ * @storm_period_ms: The maximum time interval between two events. If this limit
+ *                   is exceeded then the event chain will be broken and removed
+ *                   from consideration for a storm.
+ * @max_storm_count: The number of chained events required to trigger a storm.
+ * @storm_count:     The current number of chained events.
+ * @last_kt:         Kernel time of the last event seen.
+ * @storm_lock:      Mutex lock to protect storm_watch data.
+ */
+struct storm_watch {
+	bool		enabled;
+	int		storm_period_ms;
+	int		max_storm_count;
+	int		storm_count;
+	ktime_t		last_kt;
+	struct mutex	storm_lock;
+};
+
+bool is_storming(struct storm_watch *data);
+void reset_storm_count(struct storm_watch *data);
+#endif
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index f9b6fba..a530f08 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -560,8 +560,12 @@
 	WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
 	task->state = state;
 
-	if (!list_empty(&task->running))
+	spin_lock_bh(&conn->taskqueuelock);
+	if (!list_empty(&task->running)) {
+		pr_debug_once("%s while task on list", __func__);
 		list_del_init(&task->running);
+	}
+	spin_unlock_bh(&conn->taskqueuelock);
 
 	if (conn->task == task)
 		conn->task = NULL;
@@ -783,7 +787,9 @@
 		if (session->tt->xmit_task(task))
 			goto free_task;
 	} else {
+		spin_lock_bh(&conn->taskqueuelock);
 		list_add_tail(&task->running, &conn->mgmtqueue);
+		spin_unlock_bh(&conn->taskqueuelock);
 		iscsi_conn_queue_work(conn);
 	}
 
@@ -1474,8 +1480,10 @@
 	 * this may be on the requeue list already if the xmit_task callout
 	 * is handling the r2ts while we are adding new ones
 	 */
+	spin_lock_bh(&conn->taskqueuelock);
 	if (list_empty(&task->running))
 		list_add_tail(&task->running, &conn->requeue);
+	spin_unlock_bh(&conn->taskqueuelock);
 	iscsi_conn_queue_work(conn);
 }
 EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1512,22 +1520,26 @@
 	 * only have one nop-out as a ping from us and targets should not
 	 * overflow us with nop-ins
 	 */
+	spin_lock_bh(&conn->taskqueuelock);
 check_mgmt:
 	while (!list_empty(&conn->mgmtqueue)) {
 		conn->task = list_entry(conn->mgmtqueue.next,
 					 struct iscsi_task, running);
 		list_del_init(&conn->task->running);
+		spin_unlock_bh(&conn->taskqueuelock);
 		if (iscsi_prep_mgmt_task(conn, conn->task)) {
 			/* regular RX path uses back_lock */
 			spin_lock_bh(&conn->session->back_lock);
 			__iscsi_put_task(conn->task);
 			spin_unlock_bh(&conn->session->back_lock);
 			conn->task = NULL;
+			spin_lock_bh(&conn->taskqueuelock);
 			continue;
 		}
 		rc = iscsi_xmit_task(conn);
 		if (rc)
 			goto done;
+		spin_lock_bh(&conn->taskqueuelock);
 	}
 
 	/* process pending command queue */
@@ -1535,19 +1547,24 @@
 		conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
 					running);
 		list_del_init(&conn->task->running);
+		spin_unlock_bh(&conn->taskqueuelock);
 		if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
 			fail_scsi_task(conn->task, DID_IMM_RETRY);
+			spin_lock_bh(&conn->taskqueuelock);
 			continue;
 		}
 		rc = iscsi_prep_scsi_cmd_pdu(conn->task);
 		if (rc) {
 			if (rc == -ENOMEM || rc == -EACCES) {
+				spin_lock_bh(&conn->taskqueuelock);
 				list_add_tail(&conn->task->running,
 					      &conn->cmdqueue);
 				conn->task = NULL;
+				spin_unlock_bh(&conn->taskqueuelock);
 				goto done;
 			} else
 				fail_scsi_task(conn->task, DID_ABORT);
+			spin_lock_bh(&conn->taskqueuelock);
 			continue;
 		}
 		rc = iscsi_xmit_task(conn);
@@ -1558,6 +1575,7 @@
 		 * we need to check the mgmt queue for nops that need to
 		 * be sent to aviod starvation
 		 */
+		spin_lock_bh(&conn->taskqueuelock);
 		if (!list_empty(&conn->mgmtqueue))
 			goto check_mgmt;
 	}
@@ -1577,12 +1595,15 @@
 		conn->task = task;
 		list_del_init(&conn->task->running);
 		conn->task->state = ISCSI_TASK_RUNNING;
+		spin_unlock_bh(&conn->taskqueuelock);
 		rc = iscsi_xmit_task(conn);
 		if (rc)
 			goto done;
+		spin_lock_bh(&conn->taskqueuelock);
 		if (!list_empty(&conn->mgmtqueue))
 			goto check_mgmt;
 	}
+	spin_unlock_bh(&conn->taskqueuelock);
 	spin_unlock_bh(&conn->session->frwd_lock);
 	return -ENODATA;
 
@@ -1738,7 +1759,9 @@
 			goto prepd_reject;
 		}
 	} else {
+		spin_lock_bh(&conn->taskqueuelock);
 		list_add_tail(&task->running, &conn->cmdqueue);
+		spin_unlock_bh(&conn->taskqueuelock);
 		iscsi_conn_queue_work(conn);
 	}
 
@@ -2897,6 +2920,7 @@
 	INIT_LIST_HEAD(&conn->mgmtqueue);
 	INIT_LIST_HEAD(&conn->cmdqueue);
 	INIT_LIST_HEAD(&conn->requeue);
+	spin_lock_init(&conn->taskqueuelock);
 	INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
 
 	/* allocate login_task used for the login/text sequences */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 734a042..f7e3f27 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -11393,6 +11393,7 @@
 	.id_table	= lpfc_id_table,
 	.probe		= lpfc_pci_probe_one,
 	.remove		= lpfc_pci_remove_one,
+	.shutdown	= lpfc_pci_remove_one,
 	.suspend        = lpfc_pci_suspend_one,
 	.resume		= lpfc_pci_resume_one,
 	.err_handler    = &lpfc_err_handler,
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index bff9689..feab7ea 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -5375,16 +5375,22 @@
 
 static int
 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
-	struct atio_from_isp *atio)
+	struct atio_from_isp *atio, bool ha_locked)
 {
 	struct qla_hw_data *ha = vha->hw;
 	uint16_t status;
+	unsigned long flags;
 
 	if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
 		return 0;
 
+	if (!ha_locked)
+		spin_lock_irqsave(&ha->hardware_lock, flags);
 	status = temp_sam_status;
 	qlt_send_busy(vha, atio, status);
+	if (!ha_locked)
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
 	return 1;
 }
 
@@ -5429,7 +5435,7 @@
 
 
 		if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
-			rc = qlt_chk_qfull_thresh_hold(vha, atio);
+			rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked);
 			if (rc != 0) {
 				tgt->atio_irq_cmd_count--;
 				return;
@@ -5552,7 +5558,7 @@
 			break;
 		}
 
-		rc = qlt_chk_qfull_thresh_hold(vha, atio);
+		rc = qlt_chk_qfull_thresh_hold(vha, atio, true);
 		if (rc != 0) {
 			tgt->irq_cmd_count--;
 			return;
@@ -6794,6 +6800,8 @@
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	kfree(op);
 }
 
 void
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 5633e1f..266c0a2 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -213,6 +213,7 @@
 	bool tx_blocked_signal_sent;
 	struct kthread_work kwork;
 	struct kthread_worker kworker;
+	struct work_struct wakeup_work;
 	struct task_struct *task;
 	struct tasklet_struct tasklet;
 	struct srcu_struct use_ref;
@@ -874,20 +875,10 @@
 		srcu_read_unlock(&einfo->use_ref, rcu_id);
 		return;
 	}
-	if (!atomic_ctx) {
-		if (einfo->tx_resume_needed && fifo_write_avail(einfo)) {
-			einfo->tx_resume_needed = false;
-			einfo->xprt_if.glink_core_if_ptr->tx_resume(
-							&einfo->xprt_if);
-		}
-		spin_lock_irqsave(&einfo->write_lock, flags);
-		if (einfo->tx_blocked_signal_sent) {
-			wake_up_all(&einfo->tx_blocked_queue);
-			einfo->tx_blocked_signal_sent = false;
-		}
-		spin_unlock_irqrestore(&einfo->write_lock, flags);
-	}
 
+	if ((atomic_ctx) && ((einfo->tx_resume_needed) ||
+		(waitqueue_active(&einfo->tx_blocked_queue)))) /* tx waiting ?*/
+		schedule_work(&einfo->wakeup_work);
 
 	/*
 	 * Access to the fifo needs to be synchronized, however only the calls
@@ -1195,6 +1186,39 @@
 }
 
 /**
+ * tx_wakeup_worker() - worker function to wakeup tx blocked thread
+ * @work:	kwork associated with the edge to process commands on.
+ */
+static void tx_wakeup_worker(struct work_struct *work)
+{
+	struct edge_info *einfo;
+	bool trigger_wakeup = false;
+	unsigned long flags;
+	int rcu_id;
+
+	einfo = container_of(work, struct edge_info, wakeup_work);
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+	if (einfo->tx_resume_needed && fifo_write_avail(einfo)) {
+		einfo->tx_resume_needed = false;
+		einfo->xprt_if.glink_core_if_ptr->tx_resume(
+						&einfo->xprt_if);
+	}
+	spin_lock_irqsave(&einfo->write_lock, flags);
+	if (waitqueue_active(&einfo->tx_blocked_queue)) { /* tx waiting ?*/
+		einfo->tx_blocked_signal_sent = false;
+		trigger_wakeup = true;
+	}
+	spin_unlock_irqrestore(&einfo->write_lock, flags);
+	if (trigger_wakeup)
+		wake_up_all(&einfo->tx_blocked_queue);
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
  * rx_worker() - worker function to process received commands
  * @work:	kwork associated with the edge to process commands on.
  */
@@ -2303,6 +2327,7 @@
 	init_waitqueue_head(&einfo->tx_blocked_queue);
 	kthread_init_work(&einfo->kwork, rx_worker);
 	kthread_init_worker(&einfo->kworker);
+	INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
 	tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
 	einfo->read_from_fifo = read_from_fifo;
 	einfo->write_to_fifo = write_to_fifo;
@@ -2402,6 +2427,7 @@
 reg_xprt_fail:
 smem_alloc_fail:
 	kthread_flush_worker(&einfo->kworker);
+	flush_work(&einfo->wakeup_work);
 	kthread_stop(einfo->task);
 	einfo->task = NULL;
 	tasklet_kill(&einfo->tasklet);
@@ -2489,6 +2515,7 @@
 	init_waitqueue_head(&einfo->tx_blocked_queue);
 	kthread_init_work(&einfo->kwork, rx_worker);
 	kthread_init_worker(&einfo->kworker);
+	INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
 	tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
 	einfo->intentless = true;
 	einfo->read_from_fifo = memcpy32_fromio;
@@ -2649,6 +2676,7 @@
 reg_xprt_fail:
 toc_init_fail:
 	kthread_flush_worker(&einfo->kworker);
+	flush_work(&einfo->wakeup_work);
 	kthread_stop(einfo->task);
 	einfo->task = NULL;
 	tasklet_kill(&einfo->tasklet);
@@ -2780,6 +2808,7 @@
 	init_waitqueue_head(&einfo->tx_blocked_queue);
 	kthread_init_work(&einfo->kwork, rx_worker);
 	kthread_init_worker(&einfo->kworker);
+	INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
 	tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
 	einfo->read_from_fifo = read_from_fifo;
 	einfo->write_to_fifo = write_to_fifo;
@@ -2900,6 +2929,7 @@
 reg_xprt_fail:
 smem_alloc_fail:
 	kthread_flush_worker(&einfo->kworker);
+	flush_work(&einfo->wakeup_work);
 	kthread_stop(einfo->task);
 	einfo->task = NULL;
 	tasklet_kill(&einfo->tasklet);
diff --git a/drivers/soc/qcom/ipc_router_glink_xprt.c b/drivers/soc/qcom/ipc_router_glink_xprt.c
index 9a9d73b..458e39d 100644
--- a/drivers/soc/qcom/ipc_router_glink_xprt.c
+++ b/drivers/soc/qcom/ipc_router_glink_xprt.c
@@ -43,8 +43,14 @@
 #define MIN_FRAG_SZ (IPC_ROUTER_HDR_SIZE + sizeof(union rr_control_msg))
 #define IPC_RTR_XPRT_NAME_LEN (2 * GLINK_NAME_SIZE)
 #define PIL_SUBSYSTEM_NAME_LEN 32
-#define DEFAULT_NUM_INTENTS 5
-#define DEFAULT_RX_INTENT_SIZE 2048
+
+#define MAX_NUM_LO_INTENTS 5
+#define MAX_NUM_MD_INTENTS 3
+#define MAX_NUM_HI_INTENTS 2
+#define LO_RX_INTENT_SIZE 2048
+#define MD_RX_INTENT_SIZE 8192
+#define HI_RX_INTENT_SIZE (17 * 1024)
+
 /**
  * ipc_router_glink_xprt - IPC Router's GLINK XPRT structure
  * @list: IPC router's GLINK XPRT list.
@@ -82,6 +88,9 @@
 	unsigned int xprt_version;
 	unsigned int xprt_option;
 	bool disable_pil_loading;
+	uint32_t cur_lo_intents_cnt;
+	uint32_t cur_md_intents_cnt;
+	uint32_t cur_hi_intents_cnt;
 };
 
 struct ipc_router_glink_xprt_work {
@@ -342,7 +351,7 @@
 	}
 
 	D("%s %zu bytes @ %p\n", __func__, rx_work->iovec_size, rx_work->iovec);
-	if (rx_work->iovec_size <= DEFAULT_RX_INTENT_SIZE)
+	if (rx_work->iovec_size <= HI_RX_INTENT_SIZE)
 		reuse_intent = true;
 
 	pkt = glink_xprt_copy_data(rx_work);
@@ -371,9 +380,14 @@
 				IPC_ROUTER_XPRT_EVENT_OPEN, NULL);
 	D("%s: Notified IPC Router of %s OPEN\n",
 	  __func__, glink_xprtp->xprt.name);
-	for (i = 0; i < DEFAULT_NUM_INTENTS; i++)
+	glink_xprtp->cur_lo_intents_cnt = 0;
+	glink_xprtp->cur_md_intents_cnt = 0;
+	glink_xprtp->cur_hi_intents_cnt = 0;
+	for (i = 0; i < MAX_NUM_LO_INTENTS; i++) {
 		glink_queue_rx_intent(glink_xprtp->ch_hndl, (void *)glink_xprtp,
-				      DEFAULT_RX_INTENT_SIZE);
+				      LO_RX_INTENT_SIZE);
+		glink_xprtp->cur_lo_intents_cnt++;
+	}
 	kfree(xprt_work);
 }
 
@@ -394,13 +408,32 @@
 
 static void glink_xprt_qrx_intent_worker(struct work_struct *work)
 {
+	size_t sz;
 	struct queue_rx_intent_work *qrx_intent_work =
 		container_of(work, struct queue_rx_intent_work, work);
 	struct ipc_router_glink_xprt *glink_xprtp =
 					qrx_intent_work->glink_xprtp;
+	uint32_t *cnt = NULL;
+	int ret;
 
-	glink_queue_rx_intent(glink_xprtp->ch_hndl, (void *)glink_xprtp,
-			      qrx_intent_work->intent_size);
+	sz = qrx_intent_work->intent_size;
+	if (sz <= MD_RX_INTENT_SIZE) {
+		if (glink_xprtp->cur_md_intents_cnt >= MAX_NUM_MD_INTENTS)
+			goto qrx_intent_worker_out;
+		sz = MD_RX_INTENT_SIZE;
+		cnt = &glink_xprtp->cur_md_intents_cnt;
+	} else if (sz <= HI_RX_INTENT_SIZE) {
+		if (glink_xprtp->cur_hi_intents_cnt >= MAX_NUM_HI_INTENTS)
+			goto qrx_intent_worker_out;
+		sz = HI_RX_INTENT_SIZE;
+		cnt = &glink_xprtp->cur_hi_intents_cnt;
+	}
+
+	ret = glink_queue_rx_intent(glink_xprtp->ch_hndl, (void *)glink_xprtp,
+					sz);
+	if (!ret && cnt)
+		(*cnt)++;
+qrx_intent_worker_out:
 	kfree(qrx_intent_work);
 }
 
@@ -470,7 +503,7 @@
 	struct ipc_router_glink_xprt *glink_xprtp =
 		(struct ipc_router_glink_xprt *)priv;
 
-	if (sz <= DEFAULT_RX_INTENT_SIZE)
+	if (sz <= LO_RX_INTENT_SIZE)
 		return true;
 
 	qrx_intent_work = kmalloc(sizeof(struct queue_rx_intent_work),
diff --git a/drivers/soc/qcom/llcc-core.c b/drivers/soc/qcom/llcc-core.c
index 2c9d0a0..fc88c71 100644
--- a/drivers/soc/qcom/llcc-core.c
+++ b/drivers/soc/qcom/llcc-core.c
@@ -20,7 +20,6 @@
 #include <linux/regmap.h>
 
 /* Config registers offsets*/
-#define COMMON_CFG0		0x00030004
 #define DRP_ECC_ERROR_CFG	0x00040000
 
 /* TRP, DRP interrupt register offsets */
@@ -29,7 +28,6 @@
 #define TRP_INTERRUPT_0_ENABLE		0x00020488
 #define DRP_INTERRUPT_ENABLE		0x0004100C
 
-#define DATA_RAM_ECC_ENABLE	0x1
 #define SB_ERROR_THRESHOLD	0x1
 #define SB_ERROR_THRESHOLD_SHIFT	24
 #define SB_DB_TRP_INTERRUPT_ENABLE	0x3
@@ -50,10 +48,6 @@
 	regmap_update_bits(llcc_regmap, TRP_INTERRUPT_0_ENABLE,
 		SB_DB_TRP_INTERRUPT_ENABLE, SB_DB_TRP_INTERRUPT_ENABLE);
 
-	/* Enable ECC for for data ram */
-	regmap_update_bits(llcc_regmap, COMMON_CFG0,
-				DATA_RAM_ECC_ENABLE, DATA_RAM_ECC_ENABLE);
-
 	/* Enable SB error for Data RAM */
 	sb_err_threshold = (SB_ERROR_THRESHOLD << SB_ERROR_THRESHOLD_SHIFT);
 	regmap_write(llcc_regmap, DRP_ECC_ERROR_CFG, sb_err_threshold);
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c
index 501b902..77c2ae6 100644
--- a/drivers/soc/qcom/llcc-slice.c
+++ b/drivers/soc/qcom/llcc-slice.c
@@ -49,8 +49,6 @@
 #define LLCC_TRP_STATUSn(n)   (4 + n * 0x1000)
 #define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + 0x8 * n)
 #define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + 0x8 * n)
-#define LLCC_TRP_PCB_ACT       0x23204
-#define LLCC_TRP_SCID_DIS_CAP_ALLOC 0x23200
 
 /**
  * Driver data for llcc
@@ -320,8 +318,6 @@
 	u32 attr0_cfg;
 	u32 attr1_val;
 	u32 attr0_val;
-	u32 pcb = 0;
-	u32 cad = 0;
 	u32 max_cap_cacheline;
 	u32 sz;
 	const struct llcc_slice_config *llcc_table;
@@ -358,14 +354,6 @@
 		regmap_write(drv->llcc_map, attr1_cfg, attr1_val);
 		regmap_write(drv->llcc_map, attr0_cfg, attr0_val);
 
-		/* Write the retain on power collapse bit for each scid */
-		pcb |= llcc_table[i].retain_on_pc << llcc_table[i].slice_id;
-		regmap_write(drv->llcc_map, LLCC_TRP_PCB_ACT, pcb);
-
-		/* Disable capacity alloc */
-		cad |= llcc_table[i].dis_cap_alloc << llcc_table[i].slice_id;
-		regmap_write(drv->llcc_map, LLCC_TRP_SCID_DIS_CAP_ALLOC, cad);
-
 		/* Make sure that the SCT is programmed before activating */
 		mb();
 
diff --git a/drivers/soc/qcom/msm-core.c b/drivers/soc/qcom/msm-core.c
index 5a0b261..fa5c6b2 100644
--- a/drivers/soc/qcom/msm-core.c
+++ b/drivers/soc/qcom/msm-core.c
@@ -402,9 +402,9 @@
 		return -EINVAL;
 
 	get_user(cluster, &argp->cluster);
-	mpidr = (argp->cluster << (MAX_CORES_PER_CLUSTER *
+	mpidr = (cluster << (MAX_CORES_PER_CLUSTER *
 			MAX_NUM_OF_CLUSTERS));
-	cpumask = argp->cpumask;
+	get_user(cpumask, &argp->cpumask);
 
 	switch (cmd) {
 	case EA_LEAKAGE:
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index e2ad422..1f5cfc0 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -17,7 +17,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-//#include <soc/qcom/rpm-smd.h>
 #include <soc/qcom/cmd-db.h>
 #include <soc/qcom/rpmh.h>
 #include <soc/qcom/tcs.h>
@@ -36,11 +35,6 @@
 #define BCM_TCS_CMD_VOTE_Y_SHFT		0
 #define BCM_TCS_CMD_VOTE_Y_MASK		0xFFFC000
 
-#define VCD_MAX_CNT			10
-
-#define RSC_HLOS_DRV_ID			2
-#define RSC_DISP_DRV_ID			0
-
 #define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \
 	(((commit & 0x1) << BCM_TCS_CMD_COMMIT_SHFT) |\
 	((valid & 0x1) << BCM_TCS_CMD_VALID_SHFT) |\
@@ -49,10 +43,8 @@
 
 static int msm_bus_dev_init_qos(struct device *dev, void *data);
 
-struct list_head bcm_clist_inorder[VCD_MAX_CNT];
-struct list_head bcm_query_list_inorder[VCD_MAX_CNT];
-static struct rpmh_client *mbox_apps;
-static struct rpmh_client *mbox_disp;
+static struct list_head bcm_query_list_inorder[VCD_MAX_CNT];
+static struct msm_bus_node_device_type *cur_rsc;
 
 struct bcm_db {
 	uint32_t unit_size;
@@ -304,7 +296,8 @@
 				int *n_sleep,
 				struct tcs_cmd *cmdlist_active,
 				struct tcs_cmd *cmdlist_wake,
-				struct tcs_cmd *cmdlist_sleep)
+				struct tcs_cmd *cmdlist_sleep,
+				struct list_head *cur_bcm_clist)
 {
 	struct msm_bus_node_device_type *cur_bcm = NULL;
 	int i = 0;
@@ -318,13 +311,13 @@
 
 	for (i = 0; i < VCD_MAX_CNT; i++) {
 		last_tcs = -1;
-		if (list_empty(&bcm_clist_inorder[i]))
+		if (list_empty(&cur_bcm_clist[i]))
 			continue;
-		list_for_each_entry(cur_bcm, &bcm_clist_inorder[i], link) {
-			if (cur_bcm->dirty) {
+		list_for_each_entry(cur_bcm, &cur_bcm_clist[i], link) {
+			if (cur_bcm->updated) {
 				if (last_tcs != -1 &&
 					list_is_last(&cur_bcm->link,
-						&bcm_clist_inorder[i])) {
+						&cur_bcm_clist[i])) {
 					cmdlist_active[last_tcs].data |=
 						BCM_TCS_CMD_COMMIT_MASK;
 					cmdlist_active[last_tcs].complete
@@ -335,7 +328,7 @@
 			n_active[idx]++;
 			commit = false;
 			if (list_is_last(&cur_bcm->link,
-						&bcm_clist_inorder[i])) {
+						&cur_bcm_clist[i])) {
 				commit = true;
 				idx++;
 			}
@@ -344,7 +337,7 @@
 				cur_bcm->node_bw[ACTIVE_CTX].max_ab, commit);
 			k++;
 			last_tcs = k;
-			cur_bcm->dirty = true;
+			cur_bcm->updated = true;
 		}
 	}
 
@@ -355,9 +348,9 @@
 	idx = 0;
 	for (i = 0; i < VCD_MAX_CNT; i++) {
 		last_tcs = -1;
-		if (list_empty(&bcm_clist_inorder[i]))
+		if (list_empty(&cur_bcm_clist[i]))
 			continue;
-		list_for_each_entry(cur_bcm, &bcm_clist_inorder[i], link) {
+		list_for_each_entry(cur_bcm, &cur_bcm_clist[i], link) {
 			commit = false;
 			if ((cur_bcm->node_bw[DUAL_CTX].max_ab ==
 				cur_bcm->node_bw[ACTIVE_CTX].max_ab) &&
@@ -365,7 +358,7 @@
 				cur_bcm->node_bw[ACTIVE_CTX].max_ib)) {
 				if (last_tcs != -1 &&
 					list_is_last(&cur_bcm->link,
-					&bcm_clist_inorder[i])) {
+					&cur_bcm_clist[i])) {
 					cmdlist_wake[k].data |=
 						BCM_TCS_CMD_COMMIT_MASK;
 					cmdlist_sleep[k].data |=
@@ -379,7 +372,7 @@
 			last_tcs = k;
 			n_sleep[idx]++;
 			if (list_is_last(&cur_bcm->link,
-						&bcm_clist_inorder[i])) {
+						&cur_bcm_clist[i])) {
 				commit = true;
 				idx++;
 			}
@@ -445,10 +438,18 @@
 	cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
 	cur_vcd = cur_bcm->bcmdev->clk_domain;
 
-	if (!cur_bcm->dirty)
-		list_add_tail(&cur_bcm->link, &bcm_clist_inorder[cur_vcd]);
-	else
-		cur_bcm->dirty = false;
+	if (!cur_bcm->node_info->num_rsc_devs)
+		goto exit_bcm_clist_add;
+
+	if (!cur_rsc)
+		cur_rsc = to_msm_bus_node(cur_bcm->node_info->rsc_devs[0]);
+
+	if (!cur_bcm->dirty) {
+		list_add_tail(&cur_bcm->link,
+					&cur_rsc->rscdev->bcm_clist[cur_vcd]);
+		cur_bcm->dirty = true;
+	}
+	cur_bcm->updated = false;
 
 exit_bcm_clist_add:
 	return ret;
@@ -480,7 +481,7 @@
 	struct msm_bus_node_device_type *cur_bcm = NULL;
 
 	if (!cur_dev->node_info->num_bcm_devs)
-		goto exit_bcm_clist_add;
+		goto exit_bcm_clist_clean;
 
 	cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
 
@@ -492,7 +493,7 @@
 		list_del_init(&cur_bcm->link);
 	}
 
-exit_bcm_clist_add:
+exit_bcm_clist_clean:
 	return ret;
 }
 
@@ -506,7 +507,6 @@
 
 	cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
 
-	MSM_BUS_ERR("%s: removing bcm %d\n", __func__, cur_bcm->node_info->id);
 	cur_bcm->query_dirty = false;
 	list_del_init(&cur_bcm->query_link);
 
@@ -525,6 +525,7 @@
 	struct tcs_cmd *cmdlist_wake = NULL;
 	struct tcs_cmd *cmdlist_sleep = NULL;
 	struct rpmh_client *cur_mbox = NULL;
+	struct list_head *cur_bcm_clist = NULL;
 	int *n_active = NULL;
 	int *n_wake = NULL;
 	int *n_sleep = NULL;
@@ -534,6 +535,7 @@
 	int cnt_sleep = 0;
 	int i = 0;
 
+
 	list_for_each_entry_safe(node, node_tmp, clist, link) {
 		if (unlikely(node->node_info->defer_qos))
 			msm_bus_dev_init_qos(&node->dev, NULL);
@@ -541,10 +543,13 @@
 		bcm_clist_add(node);
 	}
 
+	cur_mbox = cur_rsc->rscdev->mbox;
+	cur_bcm_clist = cur_rsc->rscdev->bcm_clist;
+
 	for (i = 0; i < VCD_MAX_CNT; i++) {
-		if (list_empty(&bcm_clist_inorder[i]))
+		if (list_empty(&cur_bcm_clist[i]))
 			continue;
-		list_for_each_entry(cur_bcm, &bcm_clist_inorder[i], link) {
+		list_for_each_entry(cur_bcm, &cur_bcm_clist[i], link) {
 			if ((cur_bcm->node_bw[DUAL_CTX].max_ab !=
 				cur_bcm->node_bw[ACTIVE_CTX].max_ab) ||
 				(cur_bcm->node_bw[DUAL_CTX].max_ib !=
@@ -552,18 +557,13 @@
 				cnt_sleep++;
 				cnt_wake++;
 			}
-			if (!cur_bcm->dirty)
+			if (!cur_bcm->updated)
 				cnt_active++;
-			if (!cur_mbox) {
-				if (cur_bcm->bcmdev->drv_id == RSC_HLOS_DRV_ID)
-					cur_mbox = mbox_apps;
-				else
-					cur_mbox = mbox_disp;
-			}
 		}
 		cnt_vcd++;
 	}
 
+	MSM_BUS_ERR("%s: cmd_gen\n", __func__);
 	n_active = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
 	n_wake = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
 	n_sleep = kcalloc(cnt_vcd+1, sizeof(int), GFP_KERNEL);
@@ -577,17 +577,13 @@
 								GFP_KERNEL);
 	}
 	bcm_cnt = tcs_cmd_list_gen(n_active, n_wake, n_sleep, cmdlist_active,
-					cmdlist_wake, cmdlist_sleep);
+				cmdlist_wake, cmdlist_sleep, cur_bcm_clist);
 
 	ret = rpmh_invalidate(cur_mbox);
-
-	ret = rpmh_write_passthru(cur_mbox, RPMH_ACTIVE_ONLY_STATE,
+	ret = rpmh_write_passthru(cur_mbox, cur_rsc->rscdev->req_state,
 						cmdlist_active, n_active);
-	if (cur_mbox == mbox_apps)
-		ret = rpmh_write_passthru(cur_mbox, RPMH_WAKE_ONLY_STATE,
-						cmdlist_wake, n_wake);
-	else
-		ret = rpmh_write_passthru(cur_mbox, RPMH_AWAKE_STATE,
+
+	ret = rpmh_write_passthru(cur_mbox, RPMH_WAKE_ONLY_STATE,
 						cmdlist_wake, n_wake);
 
 	ret = rpmh_write_passthru(cur_mbox, RPMH_SLEEP_STATE,
@@ -599,6 +595,7 @@
 		list_del_init(&node->link);
 	}
 
+	cur_rsc = NULL;
 	kfree(cmdlist_active);
 	kfree(cmdlist_wake);
 	kfree(cmdlist_sleep);
@@ -950,7 +947,6 @@
 
 	// Add way to count # of VCDs, initialize LL
 	for (i = 0; i < VCD_MAX_CNT; i++) {
-		INIT_LIST_HEAD(&bcm_clist_inorder[i]);
 		INIT_LIST_HEAD(&bcm_query_list_inorder[i]);
 	}
 
@@ -958,7 +954,45 @@
 	return ret;
 }
 
+static int msm_bus_rsc_init(struct platform_device *pdev,
+			struct device *dev,
+			struct msm_bus_node_device_type *pdata)
+{
+	struct msm_bus_rsc_device_type *rscdev;
+	struct msm_bus_node_device_type *node_dev = NULL;
+	int ret = 0;
+	int i = 0;
 
+	node_dev = to_msm_bus_node(dev);
+	if (!node_dev) {
+		ret = -ENXIO;
+		goto exit_rsc_init;
+	}
+
+	rscdev = devm_kzalloc(dev, sizeof(struct msm_bus_rsc_device_type),
+								GFP_KERNEL);
+	if (!rscdev) {
+		ret = -ENOMEM;
+		goto exit_rsc_init;
+	}
+
+	node_dev->rscdev = rscdev;
+	rscdev->req_state = pdata->rscdev->req_state;
+	rscdev->mbox = rpmh_get_byname(pdev, node_dev->node_info->name);
+
+	if (IS_ERR_OR_NULL(rscdev->mbox)) {
+		MSM_BUS_ERR("%s: Failed to get mbox:%s", __func__,
+						node_dev->node_info->name);
+		return PTR_ERR(rscdev->mbox);
+	}
+
+	// Add way to count # of VCDs, initialize LL
+	for (i = 0; i < VCD_MAX_CNT; i++)
+		INIT_LIST_HEAD(&rscdev->bcm_clist[i]);
+
+exit_rsc_init:
+	return ret;
+}
 
 static int msm_bus_init_clk(struct device *bus_dev,
 				struct msm_bus_node_device_type *pdata)
@@ -1059,10 +1093,12 @@
 	node_info->num_connections = pdata_node_info->num_connections;
 	node_info->num_blist = pdata_node_info->num_blist;
 	node_info->num_bcm_devs = pdata_node_info->num_bcm_devs;
+	node_info->num_rsc_devs = pdata_node_info->num_rsc_devs;
 	node_info->num_qports = pdata_node_info->num_qports;
 	node_info->virt_dev = pdata_node_info->virt_dev;
 	node_info->is_fab_dev = pdata_node_info->is_fab_dev;
 	node_info->is_bcm_dev = pdata_node_info->is_bcm_dev;
+	node_info->is_rsc_dev = pdata_node_info->is_rsc_dev;
 	node_info->qos_params.mode = pdata_node_info->qos_params.mode;
 	node_info->qos_params.prio1 = pdata_node_info->qos_params.prio1;
 	node_info->qos_params.prio0 = pdata_node_info->qos_params.prio0;
@@ -1176,6 +1212,30 @@
 		pdata_node_info->bcm_dev_ids,
 		sizeof(int) * pdata_node_info->num_bcm_devs);
 
+	node_info->rsc_devs = devm_kzalloc(bus_dev,
+			sizeof(struct device *) *
+				pdata_node_info->num_rsc_devs,
+			GFP_KERNEL);
+	if (!node_info->rsc_devs) {
+		MSM_BUS_ERR("%s:rsc dev connections alloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	node_info->rsc_dev_ids = devm_kzalloc(bus_dev,
+			sizeof(int) * pdata_node_info->num_rsc_devs,
+			GFP_KERNEL);
+	if (!node_info->rsc_devs) {
+		MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
+		devm_kfree(bus_dev, node_info->rsc_devs);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->rsc_dev_ids,
+		pdata_node_info->rsc_dev_ids,
+		sizeof(int) * pdata_node_info->num_rsc_devs);
+
 	node_info->qport = devm_kzalloc(bus_dev,
 			sizeof(int) * pdata_node_info->num_qports,
 			GFP_KERNEL);
@@ -1268,6 +1328,7 @@
 {
 	struct msm_bus_node_device_type *bus_node = NULL;
 	struct msm_bus_node_device_type *bcm_node = NULL;
+	struct msm_bus_node_device_type *rsc_node = NULL;
 	int ret = 0;
 	int j;
 	struct msm_bus_node_device_type *fab;
@@ -1281,7 +1342,8 @@
 
 	/* Setup parent bus device for this node */
 	if (!bus_node->node_info->is_fab_dev &&
-		!bus_node->node_info->is_bcm_dev) {
+		!bus_node->node_info->is_bcm_dev &&
+		!bus_node->node_info->is_rsc_dev) {
 		struct device *bus_parent_device =
 			bus_find_device(&msm_bus_type, NULL,
 				(void *)&bus_node->node_info->bus_device_id,
@@ -1351,6 +1413,22 @@
 		bcm_node->bcmdev->num_bus_devs++;
 	}
 
+	for (j = 0; j < bus_node->node_info->num_rsc_devs; j++) {
+		bus_node->node_info->rsc_devs[j] =
+			bus_find_device(&msm_bus_type, NULL,
+				(void *)&bus_node->node_info->rsc_dev_ids[j],
+				msm_bus_device_match_adhoc);
+
+		if (!bus_node->node_info->rsc_devs[j]) {
+			MSM_BUS_ERR("%s: Error finding conn %d for device %d",
+				__func__, bus_node->node_info->rsc_dev_ids[j],
+				 bus_node->node_info->id);
+			ret = -ENODEV;
+			goto exit_setup_dev_conn;
+		}
+		rsc_node = to_msm_bus_node(bus_node->node_info->rsc_devs[j]);
+	}
+
 exit_setup_dev_conn:
 	return ret;
 }
@@ -1461,6 +1539,14 @@
 					__func__, pdata->info[i].node_info->id);
 				goto exit_device_probe;
 			}
+		if (pdata->info[i].node_info->is_rsc_dev)
+			ret = msm_bus_rsc_init(pdev, node_dev, &pdata->info[i]);
+			if (ret) {
+				MSM_BUS_ERR("%s: Error intializing rsc %d",
+					__func__, pdata->info[i].node_info->id);
+				goto exit_device_probe;
+			}
+
 	}
 
 	ret = bus_for_each_dev(&msm_bus_type, NULL, NULL,
@@ -1481,18 +1567,6 @@
 	msm_bus_arb_setops_adhoc(&arb_ops);
 	bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_node_debug);
 
-	mbox_apps = rpmh_get_byname(pdev, "apps_rsc");
-	if (IS_ERR_OR_NULL(mbox_apps)) {
-		MSM_BUS_ERR("%s: apps mbox failure", __func__);
-		return PTR_ERR(mbox_apps);
-	}
-
-	mbox_disp = rpmh_get_byname(pdev, "disp_rsc");
-	if (IS_ERR_OR_NULL(mbox_disp)) {
-		MSM_BUS_ERR("%s: disp mbox failure", __func__);
-		return PTR_ERR(mbox_disp);
-	}
-
 	devm_kfree(&pdev->dev, pdata->info);
 	devm_kfree(&pdev->dev, pdata);
 exit_device_probe:
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c
index c417ebe..5710bca 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c
@@ -90,6 +90,37 @@
 	return NULL;
 }
 
+static struct msm_bus_rsc_device_type *get_rsc_device_info(
+		struct device_node *dev_node,
+		struct platform_device *pdev)
+{
+	struct msm_bus_rsc_device_type *rsc_dev;
+	int ret;
+
+	rsc_dev = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_rsc_device_type),
+			GFP_KERNEL);
+	if (!rsc_dev) {
+		dev_err(&pdev->dev,
+			"Error: Unable to allocate memory for rsc_dev\n");
+		goto rsc_dev_err;
+	}
+
+	ret = of_property_read_u32(dev_node, "qcom,req_state",
+			&rsc_dev->req_state);
+	if (ret) {
+		dev_dbg(&pdev->dev, "req_state missing, using default\n");
+		rsc_dev->req_state = 2;
+	}
+
+	return rsc_dev;
+
+rsc_dev_err:
+	devm_kfree(&pdev->dev, rsc_dev);
+	rsc_dev = 0;
+	return NULL;
+}
+
 static struct msm_bus_bcm_device_type *get_bcm_device_info(
 		struct device_node *dev_node,
 		struct platform_device *pdev)
@@ -113,11 +144,6 @@
 		goto bcm_dev_err;
 	}
 
-	ret = of_property_read_u32(dev_node, "qcom,drv-id",
-			&bcm_dev->drv_id);
-	if (ret)
-		dev_dbg(&pdev->dev, "drv-id is missing\n");
-
 	return bcm_dev;
 
 bcm_dev_err:
@@ -359,6 +385,7 @@
 	struct device_node *con_node;
 	struct device_node *bus_dev;
 	struct device_node *bcm_dev;
+	struct device_node *rsc_dev;
 
 	node_info = devm_kzalloc(&pdev->dev,
 			sizeof(struct msm_bus_node_info_type),
@@ -456,13 +483,34 @@
 					node_info->id);
 			goto node_info_err;
 		}
-		dev_err(&pdev->dev, "found bcm device. Node %d BCM:%d\n",
-				node_info->id, node_info->bcm_dev_ids[0]);
-
 		of_node_put(bcm_dev);
 	}
 
+	if (of_get_property(dev_node, "qcom,rscs", &size)) {
+		node_info->num_rsc_devs = size / sizeof(int);
+		node_info->rsc_dev_ids = devm_kzalloc(&pdev->dev, size,
+				GFP_KERNEL);
+	} else {
+		node_info->num_rsc_devs = 0;
+		node_info->rsc_devs = 0;
+	}
+
+	for (i = 0; i < node_info->num_rsc_devs; i++) {
+		rsc_dev = of_parse_phandle(dev_node, "qcom,rscs", i);
+		if (IS_ERR_OR_NULL(rsc_dev))
+			goto node_info_err;
+
+		if (of_property_read_u32(rsc_dev, "cell-id",
+				&node_info->rsc_dev_ids[i])){
+			dev_err(&pdev->dev, "Can't find rsc device. Node %d",
+					node_info->id);
+			goto node_info_err;
+		}
+		of_node_put(rsc_dev);
+	}
+
 	node_info->is_bcm_dev = of_property_read_bool(dev_node, "qcom,bcm-dev");
+	node_info->is_rsc_dev = of_property_read_bool(dev_node, "qcom,rsc-dev");
 	node_info->is_fab_dev = of_property_read_bool(dev_node, "qcom,fab-dev");
 	node_info->virt_dev = of_property_read_bool(dev_node, "qcom,virt-dev");
 
@@ -505,6 +553,18 @@
 		}
 	}
 
+	if (node_device->node_info->is_rsc_dev) {
+
+		node_device->rscdev = get_rsc_device_info(dev_node, pdev);
+
+		if (IS_ERR_OR_NULL(node_device->rscdev)) {
+			dev_err(&pdev->dev,
+				"Error: RSC device info missing\n");
+			devm_kfree(&pdev->dev, node_device->node_info);
+			return -ENODATA;
+		}
+	}
+
 	if (node_device->node_info->is_fab_dev) {
 		struct device_node *qos_clk_node;
 
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
index 3b5eabd..f415735 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
@@ -20,6 +20,8 @@
 #include <linux/msm_bus_rules.h>
 #include "msm_bus_core.h"
 
+#define VCD_MAX_CNT 16
+
 struct msm_bus_node_device_type;
 
 struct link_node {
@@ -64,6 +66,12 @@
 	uint32_t vrail_used;
 };
 
+struct msm_bus_rsc_device_type {
+	struct rpmh_client *mbox;
+	struct list_head bcm_clist[VCD_MAX_CNT];
+	int req_state;
+};
+
 struct msm_bus_bcm_device_type {
 	const char *name;
 	uint32_t width;
@@ -128,16 +136,20 @@
 	unsigned int num_connections;
 	unsigned int num_blist;
 	unsigned int num_bcm_devs;
+	unsigned int num_rsc_devs;
 	bool is_fab_dev;
 	bool virt_dev;
 	bool is_bcm_dev;
+	bool is_rsc_dev;
 	bool is_traversed;
 	unsigned int *connections;
 	unsigned int *black_listed_connections;
 	unsigned int *bcm_dev_ids;
+	unsigned int *rsc_dev_ids;
 	struct device **dev_connections;
 	struct device **black_connections;
 	struct device **bcm_devs;
+	struct device **rsc_devs;
 	int bcm_req_idx;
 	unsigned int bus_device_id;
 	struct device *bus_device;
@@ -151,6 +163,7 @@
 	struct msm_bus_node_info_type *node_info;
 	struct msm_bus_fab_device_type *fabdev;
 	struct msm_bus_bcm_device_type *bcmdev;
+	struct msm_bus_rsc_device_type *rscdev;
 	int num_lnodes;
 	struct link_node *lnode_list;
 	struct nodebw node_bw[NUM_CTX];
@@ -164,6 +177,7 @@
 	struct device_node *of_node;
 	struct device dev;
 	bool dirty;
+	bool updated;
 	bool query_dirty;
 	struct list_head dev_link;
 	struct list_head devlist;
diff --git a/drivers/soc/qcom/msm_glink_pkt.c b/drivers/soc/qcom/msm_glink_pkt.c
index 38d29e4..2471d27 100644
--- a/drivers/soc/qcom/msm_glink_pkt.c
+++ b/drivers/soc/qcom/msm_glink_pkt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -502,13 +502,21 @@
 				struct queue_rx_intent_work, work);
 	struct glink_pkt_dev *devp = work_item->devp;
 
-	if (!devp || !devp->handle) {
+	if (!devp) {
+		GLINK_PKT_ERR("%s: Invalid device\n", __func__);
+		kfree(work_item);
+		return;
+	}
+	mutex_lock(&devp->ch_lock);
+	if (!devp->handle) {
 		GLINK_PKT_ERR("%s: Invalid device Handle\n", __func__);
+		mutex_unlock(&devp->ch_lock);
 		kfree(work_item);
 		return;
 	}
 
 	ret = glink_queue_rx_intent(devp->handle, devp, work_item->intent_size);
+	mutex_unlock(&devp->ch_lock);
 	GLINK_PKT_INFO("%s: Triggered with size[%zu] ret[%d]\n",
 				__func__, work_item->intent_size, ret);
 	if (ret)
@@ -664,8 +672,15 @@
 	spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
 
 	ret = copy_to_user(buf, pkt->data, pkt->size);
-	if (WARN_ON(ret != 0))
-		return ret;
+	if (ret) {
+		GLINK_PKT_ERR(
+		"%s copy_to_user failed ret[%d] on dev id:%d size %zu\n",
+		 __func__, ret, devp->i, pkt->size);
+		spin_lock_irqsave(&devp->pkt_list_lock, flags);
+		list_add_tail(&pkt->list, &devp->pkt_list);
+		spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
+		return -EFAULT;
+	}
 
 	ret = pkt->size;
 	glink_rx_done(devp->handle, pkt->data, false);
@@ -739,8 +754,13 @@
 	}
 
 	ret = copy_from_user(data, buf, count);
-	if (WARN_ON(ret != 0))
-		return ret;
+	if (ret) {
+		GLINK_PKT_ERR(
+		"%s copy_from_user failed ret[%d] on dev id:%d size %zu\n",
+		 __func__, ret, devp->i, count);
+		kfree(data);
+		return -EFAULT;
+	}
 
 	ret = glink_tx(devp->handle, data, data, count, GLINK_TX_REQ_INTENT);
 	if (ret) {
@@ -1038,6 +1058,27 @@
 }
 
 /**
+ * pop_rx_pkt() - return first pkt from rx pkt_list
+ * devp:	pointer to G-Link packet device.
+ *
+ * This function return first item from rx pkt_list and NULL if list is empty.
+ */
+struct glink_rx_pkt *pop_rx_pkt(struct glink_pkt_dev *devp)
+{
+	unsigned long flags;
+	struct glink_rx_pkt *pkt = NULL;
+
+	spin_lock_irqsave(&devp->pkt_list_lock, flags);
+	if (!list_empty(&devp->pkt_list)) {
+		pkt = list_first_entry(&devp->pkt_list,
+				struct glink_rx_pkt, list);
+		list_del(&pkt->list);
+	}
+	spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
+	return pkt;
+}
+
+/**
  * glink_pkt_release() - release operation on glink_pkt device
  * inode:	Pointer to the inode structure.
  * file:	Pointer to the file structure.
@@ -1051,6 +1092,7 @@
 	int ret = 0;
 	struct glink_pkt_dev *devp = file->private_data;
 	unsigned long flags;
+	struct glink_rx_pkt *pkt;
 
 	GLINK_PKT_INFO("%s() on dev id:%d by [%s] ref_cnt[%d]\n",
 			__func__, devp->i, current->comm, devp->ref_cnt);
@@ -1059,9 +1101,14 @@
 		devp->ref_cnt--;
 
 	if (devp->handle && devp->ref_cnt == 0) {
+		while ((pkt = pop_rx_pkt(devp))) {
+			glink_rx_done(devp->handle, pkt->data, false);
+			kfree(pkt);
+		}
 		wake_up(&devp->ch_read_wait_queue);
 		wake_up_interruptible(&devp->ch_opened_wait_queue);
 		ret = glink_close(devp->handle);
+		devp->handle = NULL;
 		if (ret)  {
 			GLINK_PKT_ERR("%s: close failed ret[%d]\n",
 						__func__, ret);
diff --git a/drivers/soc/qcom/msm_smem.c b/drivers/soc/qcom/msm_smem.c
index c2fb37b..1bbd751 100644
--- a/drivers/soc/qcom/msm_smem.c
+++ b/drivers/soc/qcom/msm_smem.c
@@ -79,6 +79,7 @@
 static void *smem_ramdump_dev;
 static DEFINE_MUTEX(spinlock_init_lock);
 static DEFINE_SPINLOCK(smem_init_check_lock);
+static struct device *smem_dev;
 static int smem_module_inited;
 static RAW_NOTIFIER_HEAD(smem_module_init_notifier_list);
 static DEFINE_MUTEX(smem_module_init_notifier_lock);
@@ -374,7 +375,7 @@
 	uint32_t a_hdr_size;
 	int rc;
 
-	SMEM_DBG("%s(%u, %u, %u, %u, %d, %d)\n", __func__, id, *size, to_proc,
+	SMEM_DBG("%s(%u, %u, %u, %d, %d)\n", __func__, id, to_proc,
 					flags, skip_init_check, use_rspinlock);
 
 	if (!skip_init_check && !smem_initialized_check())
@@ -817,7 +818,7 @@
 void *smem_get_entry(unsigned int id, unsigned int *size, unsigned int to_proc,
 							unsigned int flags)
 {
-	SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, *size, to_proc, flags);
+	SMEM_DBG("%s(%u, %u, %u)\n", __func__, id, to_proc, flags);
 
 	/*
 	 * Handle the circular dependecy between SMEM and software implemented
@@ -1084,7 +1085,8 @@
 	void *handle;
 	struct restart_notifier_block *nb;
 
-	smem_ramdump_dev = create_ramdump_device("smem", NULL);
+	if (smem_dev)
+		smem_ramdump_dev = create_ramdump_device("smem", smem_dev);
 	if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
 		LOG_ERR("%s: Unable to create smem ramdump device.\n",
 			__func__);
@@ -1509,7 +1511,7 @@
 		SMEM_INFO("smem security enabled\n");
 		smem_init_security();
 	}
-
+	smem_dev = &pdev->dev;
 	probe_done = true;
 
 	ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index 9c3f9431..0b952a4 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -101,6 +101,7 @@
 		.ei_array	= NULL,
 	},
 };
+EXPORT_SYMBOL(qmi_response_type_v01_ei);
 
 struct elem_info qmi_error_resp_type_v01_ei[] = {
 	{
diff --git a/drivers/soc/qcom/service-locator.c b/drivers/soc/qcom/service-locator.c
index 10caf22..b40d678 100644
--- a/drivers/soc/qcom/service-locator.c
+++ b/drivers/soc/qcom/service-locator.c
@@ -266,10 +266,12 @@
 		if (!domains_read) {
 			db_rev_count = pd->db_rev_count = resp->db_rev_count;
 			pd->total_domains = resp->total_domains;
-			if (!pd->total_domains && resp->domain_list_len) {
-				pr_err("total domains not set\n");
-				pd->total_domains = resp->domain_list_len;
+			if (!resp->total_domains) {
+				pr_err("No matching domains found\n");
+				rc = -EIO;
+				goto out;
 			}
+
 			pd->domain_list = kmalloc(
 					sizeof(struct servreg_loc_entry_v01) *
 					resp->total_domains, GFP_KERNEL);
@@ -286,6 +288,10 @@
 			rc = -EAGAIN;
 			goto out;
 		}
+		if (resp->domain_list_len >  resp->total_domains) {
+			/* Always read total_domains from the response msg */
+			resp->domain_list_len = resp->total_domains;
+		}
 		/* Copy the response*/
 		store_get_domain_list_response(pd, resp, domains_read);
 		domains_read += resp->domain_list_len;
@@ -372,6 +378,7 @@
 	if (!pqw) {
 		rc = -ENOMEM;
 		pr_err("Allocation failed\n");
+		kfree(pqcd);
 		goto err;
 	}
 	pqw->notifier = locator_nb;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 9125d93..ef1c8c1 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -154,7 +154,7 @@
 
 	buf = kzalloc(12, GFP_KERNEL);
 	if (!buf)
-		return;
+		goto out_free;
 
 	memset(cdb, 0, MAX_COMMAND_SIZE);
 	cdb[0] = MODE_SENSE;
@@ -169,9 +169,10 @@
 	 * If MODE_SENSE still returns zero, set the default value to 1024.
 	 */
 	sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+out_free:
 	if (!sdev->sector_size)
 		sdev->sector_size = 1024;
-out_free:
+
 	kfree(buf);
 }
 
@@ -314,9 +315,10 @@
 				sd->lun, sd->queue_depth);
 	}
 
-	dev->dev_attrib.hw_block_size = sd->sector_size;
+	dev->dev_attrib.hw_block_size =
+		min_not_zero((int)sd->sector_size, 512);
 	dev->dev_attrib.hw_max_sectors =
-		min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+		min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
 	dev->dev_attrib.hw_queue_depth = sd->queue_depth;
 
 	/*
@@ -339,8 +341,10 @@
 	/*
 	 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
 	 */
-	if (sd->type == TYPE_TAPE)
+	if (sd->type == TYPE_TAPE) {
 		pscsi_tape_read_blocksize(dev, sd);
+		dev->dev_attrib.hw_block_size = sd->sector_size;
+	}
 	return 0;
 }
 
@@ -406,7 +410,7 @@
 /*
  * Called with struct Scsi_Host->host_lock called.
  */
-static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
+static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
 	__releases(sh->host_lock)
 {
 	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
@@ -433,28 +437,6 @@
 	return 0;
 }
 
-/*
- * Called with struct Scsi_Host->host_lock called.
- */
-static int pscsi_create_type_other(struct se_device *dev,
-		struct scsi_device *sd)
-	__releases(sh->host_lock)
-{
-	struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
-	struct Scsi_Host *sh = sd->host;
-	int ret;
-
-	spin_unlock_irq(sh->host_lock);
-	ret = pscsi_add_device_to_list(dev, sd);
-	if (ret)
-		return ret;
-
-	pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
-		phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
-		sd->channel, sd->id, sd->lun);
-	return 0;
-}
-
 static int pscsi_configure_device(struct se_device *dev)
 {
 	struct se_hba *hba = dev->se_hba;
@@ -542,11 +524,8 @@
 		case TYPE_DISK:
 			ret = pscsi_create_type_disk(dev, sd);
 			break;
-		case TYPE_ROM:
-			ret = pscsi_create_type_rom(dev, sd);
-			break;
 		default:
-			ret = pscsi_create_type_other(dev, sd);
+			ret = pscsi_create_type_nondisk(dev, sd);
 			break;
 		}
 
@@ -611,8 +590,7 @@
 		else if (pdv->pdv_lld_host)
 			scsi_host_put(pdv->pdv_lld_host);
 
-		if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
-			scsi_device_put(sd);
+		scsi_device_put(sd);
 
 		pdv->pdv_sd = NULL;
 	}
@@ -1069,7 +1047,6 @@
 	if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
 		return pdv->pdv_bd->bd_part->nr_sects;
 
-	dump_stack();
 	return 0;
 }
 
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index aabd660..a53fb23 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1104,9 +1104,15 @@
 			return ret;
 		break;
 	case VERIFY:
+	case VERIFY_16:
 		size = 0;
-		sectors = transport_get_sectors_10(cdb);
-		cmd->t_task_lba = transport_lba_32(cdb);
+		if (cdb[0] == VERIFY) {
+			sectors = transport_get_sectors_10(cdb);
+			cmd->t_task_lba = transport_lba_32(cdb);
+		} else {
+			sectors = transport_get_sectors_16(cdb);
+			cmd->t_task_lba = transport_lba_64(cdb);
+		}
 		cmd->execute_cmd = sbc_emulate_noop;
 		goto check_lba;
 	case REZERO_UNIT:
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 2fda339..2a4000d 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -454,6 +454,16 @@
 	  the sensor. Also able to set threshold temperature for both hot and cold
 	  and update when a threshold is reached.
 
+config THERMAL_TSENS
+	tristate "Qualcomm Technologies Inc. TSENS Temperature driver"
+	depends on THERMAL
+	help
+	  This enables the thermal sysfs driver for the TSENS device. It shows
+	  up in Sysfs as a thermal zone with multiple trip points. Also able
+	  to set threshold temperature for both warm and cool and update
+	  thermal userspace client when a threshold is reached. Warm/Cool
+	  temperature thresholds can be set independently for each sensor.
+
 menu "Qualcomm thermal drivers"
 depends on (ARCH_QCOM && OF) || COMPILE_TEST
 source "drivers/thermal/qcom/Kconfig"
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index d9489a7..e12c199 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -57,3 +57,4 @@
 obj-$(CONFIG_MTK_THERMAL)	+= mtk_thermal.o
 obj-$(CONFIG_GENERIC_ADC_THERMAL)	+= thermal-generic-adc.o
 obj-$(CONFIG_THERMAL_QPNP_ADC_TM)	+= qpnp-adc-tm.o
+obj-$(CONFIG_THERMAL_TSENS)	+= msm-tsens.o tsens2xxx.o tsens-dbg.o
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 9ce0e9e..5556b5b 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -30,6 +30,8 @@
 #include <linux/slab.h>
 #include <linux/cpu.h>
 #include <linux/cpu_cooling.h>
+#include <linux/sched.h>
+#include <linux/of_device.h>
 
 #include <trace/events/thermal.h>
 
@@ -45,6 +47,7 @@
  *	level 0 --> 1st Max Freq
  *	level 1 --> 2nd Max Freq
  *	...
+ *	leven n --> core isolated
  */
 
 /**
@@ -70,8 +73,8 @@
  *	cooling	devices.
  * @clipped_freq: integer value representing the absolute value of the clipped
  *	frequency.
- * @max_level: maximum cooling level. One less than total number of valid
- *	cpufreq frequencies.
+ * @max_level: maximum cooling level. [0..max_level-1: <freq>
+ *	max_level: Core unavailable]
  * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
  * @node: list_head to link all cpufreq_cooling_device together.
  * @last_load: load measured by the latest call to cpufreq_get_requested_power()
@@ -103,6 +106,7 @@
 	int dyn_power_table_entries;
 	struct device *cpu_dev;
 	get_static_t plat_get_static_power;
+	struct cpu_cooling_ops *plat_ops;
 };
 static DEFINE_IDR(cpufreq_idr);
 static DEFINE_MUTEX(cooling_cpufreq_lock);
@@ -162,7 +166,7 @@
 {
 	unsigned long level;
 
-	for (level = 0; level <= cpufreq_dev->max_level; level++) {
+	for (level = 0; level < cpufreq_dev->max_level; level++) {
 		if (freq == cpufreq_dev->freq_table[level])
 			return level;
 
@@ -535,11 +539,27 @@
 	if (cpufreq_device->cpufreq_state == state)
 		return 0;
 
+	/* If state is the last, isolate the CPU */
+	if (state == cpufreq_device->max_level)
+		return sched_isolate_cpu(cpu);
+	else if (state < cpufreq_device->max_level)
+		sched_unisolate_cpu(cpu);
+
 	clip_freq = cpufreq_device->freq_table[state];
 	cpufreq_device->cpufreq_state = state;
 	cpufreq_device->clipped_freq = clip_freq;
 
-	cpufreq_update_policy(cpu);
+	/* Check if the device has a platform mitigation function that
+	 * can handle the CPU freq mitigation, if not, notify cpufreq
+	 * framework.
+	 */
+	if (cpufreq_device->plat_ops) {
+		if (cpufreq_device->plat_ops->ceil_limit)
+			cpufreq_device->plat_ops->ceil_limit(cpu,
+						clip_freq);
+	} else {
+		cpufreq_update_policy(cpu);
+	}
 
 	return 0;
 }
@@ -783,6 +803,9 @@
  * @capacitance: dynamic power coefficient for these cpus
  * @plat_static_func: function to calculate the static power consumed by these
  *                    cpus (optional)
+ * @plat_mitig_func: function that does the mitigation by changing the
+ *                   frequencies (Optional). By default, cpufreq framweork will
+ *                   be notified of the new limits.
  *
  * This interface function registers the cpufreq cooling device with the name
  * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -795,7 +818,8 @@
 static struct thermal_cooling_device *
 __cpufreq_cooling_register(struct device_node *np,
 			const struct cpumask *clip_cpus, u32 capacitance,
-			get_static_t plat_static_func)
+			get_static_t plat_static_func,
+			struct cpu_cooling_ops *plat_ops)
 {
 	struct cpufreq_policy *policy;
 	struct thermal_cooling_device *cool_dev;
@@ -848,7 +872,9 @@
 	cpufreq_for_each_valid_entry(pos, table)
 		cpufreq_dev->max_level++;
 
-	cpufreq_dev->freq_table = kmalloc(sizeof(*cpufreq_dev->freq_table) *
+	/* Last level will indicate the core will be isolated. */
+	cpufreq_dev->max_level++;
+	cpufreq_dev->freq_table = kzalloc(sizeof(*cpufreq_dev->freq_table) *
 					  cpufreq_dev->max_level, GFP_KERNEL);
 	if (!cpufreq_dev->freq_table) {
 		cool_dev = ERR_PTR(-ENOMEM);
@@ -874,6 +900,8 @@
 		cooling_ops = &cpufreq_cooling_ops;
 	}
 
+	cpufreq_dev->plat_ops = plat_ops;
+
 	ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
 	if (ret) {
 		cool_dev = ERR_PTR(ret);
@@ -881,7 +909,7 @@
 	}
 
 	/* Fill freq-table in descending order of frequencies */
-	for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
+	for (i = 0, freq = -1; i < cpufreq_dev->max_level; i++) {
 		freq = find_next_max(table, freq);
 		cpufreq_dev->freq_table[i] = freq;
 
@@ -949,7 +977,7 @@
 struct thermal_cooling_device *
 cpufreq_cooling_register(const struct cpumask *clip_cpus)
 {
-	return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL);
+	return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
 
@@ -973,7 +1001,7 @@
 	if (!np)
 		return ERR_PTR(-EINVAL);
 
-	return __cpufreq_cooling_register(np, clip_cpus, 0, NULL);
+	return __cpufreq_cooling_register(np, clip_cpus, 0, NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
 
@@ -1003,11 +1031,34 @@
 			       get_static_t plat_static_func)
 {
 	return __cpufreq_cooling_register(NULL, clip_cpus, capacitance,
-				plat_static_func);
+				plat_static_func, NULL);
 }
 EXPORT_SYMBOL(cpufreq_power_cooling_register);
 
 /**
+ * cpufreq_platform_cooling_register() - create cpufreq cooling device with
+ * additional platform specific mitigation function.
+ *
+ * @clip_cpus: cpumask of cpus where the frequency constraints will happen
+ * @plat_ops: the platform mitigation functions that will be called insted of
+ * cpufreq, if provided.
+ *
+ * Return: a valid struct thermal_cooling_device pointer on success,
+ * on failure, it returns a corresponding ERR_PTR().
+ */
+struct thermal_cooling_device *
+cpufreq_platform_cooling_register(const struct cpumask *clip_cpus,
+				struct cpu_cooling_ops *plat_ops)
+{
+	struct device_node *cpu_node;
+
+	cpu_node = of_cpu_device_node_get(cpumask_first(clip_cpus));
+	return __cpufreq_cooling_register(cpu_node, clip_cpus, 0, NULL,
+						plat_ops);
+}
+EXPORT_SYMBOL(cpufreq_platform_cooling_register);
+
+/**
  * of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
  * @np:	a valid struct device_node to the cooling device device tree node
  * @clip_cpus:	cpumask of cpus where the frequency constraints will happen
@@ -1040,7 +1091,7 @@
 		return ERR_PTR(-EINVAL);
 
 	return __cpufreq_cooling_register(np, clip_cpus, capacitance,
-				plat_static_func);
+				plat_static_func, NULL);
 }
 EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
 
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
new file mode 100644
index 0000000..5b4bb7a
--- /dev/null
+++ b/drivers/thermal/msm-tsens.c
@@ -0,0 +1,288 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the term_tm of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include "tsens.h"
+
+LIST_HEAD(tsens_device_list);
+
+static int tsens_get_temp(struct tsens_sensor *s, int *temp)
+{
+	struct tsens_device *tmdev = s->tmdev;
+
+	return tmdev->ops->get_temp(s, temp);
+}
+
+static int tsens_set_trip_temp(struct tsens_sensor *s, int trip, int temp)
+{
+	struct tsens_device *tmdev = s->tmdev;
+
+	if (tmdev->ops->set_trip_temp)
+		return tmdev->ops->set_trip_temp(s, trip, temp);
+
+	return 0;
+}
+
+static int tsens_init(struct tsens_device *tmdev)
+{
+	if (tmdev->ops->hw_init)
+		return tmdev->ops->hw_init(tmdev);
+
+	return 0;
+}
+
+static int tsens_register_interrupts(struct tsens_device *tmdev)
+{
+	if (tmdev->ops->interrupts_reg)
+		return tmdev->ops->interrupts_reg(tmdev);
+
+	return 0;
+}
+
+static const struct of_device_id tsens_table[] = {
+	{	.compatible = "qcom,msm8996-tsens",
+		.data = &data_tsens2xxx,
+	},
+	{	.compatible = "qcom,msm8953-tsens",
+		.data = &data_tsens2xxx,
+	},
+	{	.compatible = "qcom,msm8998-tsens",
+		.data = &data_tsens2xxx,
+	},
+	{	.compatible = "qcom,msmhamster-tsens",
+		.data = &data_tsens2xxx,
+	},
+	{	.compatible = "qcom,sdm660-tsens",
+		.data = &data_tsens23xx,
+	},
+	{	.compatible = "qcom,sdm630-tsens",
+		.data = &data_tsens23xx,
+	},
+	{	.compatible = "qcom,sdm845-tsens",
+		.data = &data_tsens24xx,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, tsens_table);
+
+static struct thermal_zone_of_device_ops tsens_tm_thermal_zone_ops = {
+	.get_temp = tsens_get_temp,
+	.set_trip_temp = tsens_set_trip_temp,
+};
+
+static int get_device_tree_data(struct platform_device *pdev,
+				struct tsens_device *tmdev)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	u32 *hw_id, *client_id;
+	u32 rc = 0, i, tsens_num_sensors = 0;
+	int tsens_len;
+	const struct of_device_id *id;
+	const struct tsens_data *data;
+	struct resource *res_tsens_mem, *res_mem = NULL;
+
+	if (!of_match_node(tsens_table, of_node)) {
+		pr_err("Need to read SoC specific fuse map\n");
+		return -ENODEV;
+	}
+
+	id = of_match_node(tsens_table, of_node);
+	if (id == NULL) {
+		pr_err("can not find tsens_table of_node\n");
+		return -ENODEV;
+	}
+
+	data = id->data;
+	hw_id = devm_kzalloc(&pdev->dev,
+		tsens_num_sensors * sizeof(u32), GFP_KERNEL);
+	if (!hw_id)
+		return -ENOMEM;
+
+	client_id = devm_kzalloc(&pdev->dev,
+		tsens_num_sensors * sizeof(u32), GFP_KERNEL);
+	if (!client_id)
+		return -ENOMEM;
+
+	tmdev->ops = data->ops;
+	tmdev->ctrl_data = data;
+	tmdev->pdev = pdev;
+
+	if (!tmdev->ops || !tmdev->ops->hw_init || !tmdev->ops->get_temp) {
+		pr_err("Invalid ops\n");
+		return -EINVAL;
+	}
+
+	/* TSENS register region */
+	res_tsens_mem = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "tsens_physical");
+	if (!res_tsens_mem) {
+		pr_err("Could not get tsens physical address resource\n");
+		return -EINVAL;
+	}
+
+	tsens_len = res_tsens_mem->end - res_tsens_mem->start + 1;
+
+	res_mem = request_mem_region(res_tsens_mem->start,
+				tsens_len, res_tsens_mem->name);
+	if (!res_mem) {
+		pr_err("Request tsens physical memory region failed\n");
+		return -EINVAL;
+	}
+
+	tmdev->tsens_addr = ioremap(res_mem->start, tsens_len);
+	if (!tmdev->tsens_addr) {
+		pr_err("Failed to IO map TSENS registers.\n");
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of_node,
+		"qcom,sensor-id", hw_id, tsens_num_sensors);
+	if (rc) {
+		pr_err("Default sensor id mapping\n");
+		for (i = 0; i < tsens_num_sensors; i++)
+			tmdev->sensor[i].hw_id = i;
+	} else {
+		pr_err("Use specified sensor id mapping\n");
+		for (i = 0; i < tsens_num_sensors; i++)
+			tmdev->sensor[i].hw_id = hw_id[i];
+	}
+
+	rc = of_property_read_u32_array(of_node,
+		"qcom,client-id", client_id, tsens_num_sensors);
+	if (rc) {
+		for (i = 0; i < tsens_num_sensors; i++)
+			tmdev->sensor[i].id = i;
+		pr_debug("Default client id mapping\n");
+	} else {
+		for (i = 0; i < tsens_num_sensors; i++)
+			tmdev->sensor[i].id = client_id[i];
+		pr_debug("Use specified client id mapping\n");
+	}
+
+	return 0;
+}
+
+static int tsens_thermal_zone_register(struct tsens_device *tmdev)
+{
+	int rc = 0, i = 0;
+
+	for (i = 0; i < tmdev->num_sensors; i++) {
+		tmdev->sensor[i].tmdev = tmdev;
+		tmdev->sensor[i].tzd = devm_thermal_zone_of_sensor_register(
+					&tmdev->pdev->dev, i, &tmdev->sensor[i],
+					&tsens_tm_thermal_zone_ops);
+		if (IS_ERR(tmdev->sensor[i].tzd)) {
+			pr_err("Error registering sensor:%d\n", i);
+			continue;
+		}
+	}
+
+	return rc;
+}
+
+static int tsens_tm_remove(struct platform_device *pdev)
+{
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+int tsens_tm_probe(struct platform_device *pdev)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	struct tsens_device *tmdev = NULL;
+	u32 tsens_num_sensors = 0;
+	int rc;
+
+	if (!(pdev->dev.of_node))
+		return -ENODEV;
+
+	rc = of_property_read_u32(of_node,
+			"qcom,sensors", &tsens_num_sensors);
+	if (rc || (!tsens_num_sensors)) {
+		dev_err(&pdev->dev, "missing sensors\n");
+		return -ENODEV;
+	}
+
+	tmdev = devm_kzalloc(&pdev->dev,
+			sizeof(struct tsens_device) +
+			tsens_num_sensors *
+			sizeof(struct tsens_sensor),
+			GFP_KERNEL);
+	if (tmdev == NULL) {
+		pr_err("%s: kzalloc() failed.\n", __func__);
+		return -ENOMEM;
+	}
+
+	tmdev->num_sensors = tsens_num_sensors;
+
+	rc = get_device_tree_data(pdev, tmdev);
+	if (rc) {
+		pr_err("Error reading TSENS DT\n");
+		return rc;
+	}
+
+	rc = tsens_init(tmdev);
+	if (rc)
+		return rc;
+
+	rc = tsens_thermal_zone_register(tmdev);
+	if (rc) {
+		pr_err("Error registering the thermal zone\n");
+		return rc;
+	}
+
+	rc = tsens_register_interrupts(tmdev);
+	if (rc < 0) {
+		pr_err("TSENS interrupt register failed:%d\n", rc);
+		return rc;
+	}
+
+	list_add_tail(&tmdev->list, &tsens_device_list);
+	platform_set_drvdata(pdev, tmdev);
+
+	return rc;
+}
+
+static struct platform_driver tsens_tm_driver = {
+	.probe = tsens_tm_probe,
+	.remove = tsens_tm_remove,
+	.driver = {
+		.name = "msm-tsens",
+		.owner = THIS_MODULE,
+		.of_match_table = tsens_table,
+	},
+};
+
+int __init tsens_tm_init_driver(void)
+{
+	return platform_driver_register(&tsens_tm_driver);
+}
+subsys_initcall(tsens_tm_init_driver);
+
+static void __exit tsens_tm_deinit(void)
+{
+	platform_driver_unregister(&tsens_tm_driver);
+}
+module_exit(tsens_tm_deinit);
+
+MODULE_ALIAS("platform:" TSENS_DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index d04ec3b..b337ad7 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -31,11 +31,16 @@
 #include <linux/export.h>
 #include <linux/string.h>
 #include <linux/thermal.h>
+#include <linux/list.h>
 
 #include "thermal_core.h"
 
-/***   Private data structures to represent thermal device tree data ***/
+#define for_each_tz_sibling(pos, head)                                         \
+	for (pos = list_first_entry((head), struct __thermal_zone, list);\
+		&(pos->list) != (head);                                  \
+		pos = list_next_entry(pos, list))                        \
 
+/***   Private data structures to represent thermal device tree data ***/
 /**
  * struct __thermal_bind_param - a match between trip and cooling device
  * @cooling_device: a pointer to identify the referred cooling device
@@ -54,18 +59,36 @@
 };
 
 /**
+ * struct __sensor_param - Holds individual sensor data
+ * @sensor_data: sensor driver private data passed as input argument
+ * @ops: sensor driver ops
+ * @trip_high: last trip high value programmed in the sensor driver
+ * @trip_low: last trip low value programmed in the sensor driver
+ * @lock: mutex lock acquired before updating the trip temperatures
+ * @first_tz: list head pointing the first thermal zone
+ */
+struct __sensor_param {
+	void *sensor_data;
+	const struct thermal_zone_of_device_ops *ops;
+	int trip_high, trip_low;
+	struct mutex lock;
+	struct list_head first_tz;
+};
+
+/**
  * struct __thermal_zone - internal representation of a thermal zone
  * @mode: current thermal zone device mode (enabled/disabled)
  * @passive_delay: polling interval while passive cooling is activated
  * @polling_delay: zone polling interval
  * @slope: slope of the temperature adjustment curve
  * @offset: offset of the temperature adjustment curve
+ * @tzd: thermal zone device pointer for this sensor
  * @ntrips: number of trip points
  * @trips: an array of trip points (0..ntrips - 1)
  * @num_tbps: number of thermal bind params
  * @tbps: an array of thermal bind params (0..num_tbps - 1)
- * @sensor_data: sensor private data used while reading temperature and trend
- * @ops: set of callbacks to handle the thermal zone based on DT
+ * @list: sibling thermal zone pointer
+ * @senps: sensor related parameters
  */
 
 struct __thermal_zone {
@@ -74,6 +97,7 @@
 	int polling_delay;
 	int slope;
 	int offset;
+	struct thermal_zone_device *tzd;
 
 	/* trip data */
 	int ntrips;
@@ -83,11 +107,14 @@
 	int num_tbps;
 	struct __thermal_bind_params *tbps;
 
+	struct list_head list;
 	/* sensor interface */
-	void *sensor_data;
-	const struct thermal_zone_of_device_ops *ops;
+	struct __sensor_param *senps;
 };
 
+static int of_thermal_aggregate_trip_types(struct thermal_zone_device *tz,
+		unsigned int trip_type_mask, int *low, int *high);
+
 /***   DT thermal zone device callbacks   ***/
 
 static int of_thermal_get_temp(struct thermal_zone_device *tz,
@@ -95,21 +122,36 @@
 {
 	struct __thermal_zone *data = tz->devdata;
 
-	if (!data->ops->get_temp)
+	if (!data->senps || !data->senps->ops->get_temp)
 		return -EINVAL;
 
-	return data->ops->get_temp(data->sensor_data, temp);
+	return data->senps->ops->get_temp(data->senps->sensor_data, temp);
 }
 
 static int of_thermal_set_trips(struct thermal_zone_device *tz,
-				int low, int high)
+				int inp_low, int inp_high)
 {
 	struct __thermal_zone *data = tz->devdata;
+	int high = INT_MAX, low = INT_MIN, ret = 0;
 
-	if (!data->ops || !data->ops->set_trips)
+	if (!data->senps || !data->senps->ops->set_trips)
 		return -EINVAL;
 
-	return data->ops->set_trips(data->sensor_data, low, high);
+	mutex_lock(&data->senps->lock);
+	of_thermal_aggregate_trip_types(tz, GENMASK(THERMAL_TRIP_CRITICAL, 0),
+					&low, &high);
+	if (low == data->senps->trip_low
+		&& high == data->senps->trip_high)
+		goto set_trips_exit;
+
+	data->senps->trip_low = low;
+	data->senps->trip_high = high;
+	ret = data->senps->ops->set_trips(data->senps->sensor_data,
+					  low, high);
+
+set_trips_exit:
+	mutex_unlock(&data->senps->lock);
+	return ret;
 }
 
 /**
@@ -192,7 +234,10 @@
 {
 	struct __thermal_zone *data = tz->devdata;
 
-	return data->ops->set_emul_temp(data->sensor_data, temp);
+	if (!data->senps || !data->senps->ops->set_emul_temp)
+		return -EINVAL;
+
+	return data->senps->ops->set_emul_temp(data->senps->sensor_data, temp);
 }
 
 static int of_thermal_get_trend(struct thermal_zone_device *tz, int trip,
@@ -200,10 +245,11 @@
 {
 	struct __thermal_zone *data = tz->devdata;
 
-	if (!data->ops->get_trend)
+	if (!data->senps || !data->senps->ops->get_trend)
 		return -EINVAL;
 
-	return data->ops->get_trend(data->sensor_data, trip, trend);
+	return data->senps->ops->get_trend(data->senps->sensor_data,
+					   trip, trend);
 }
 
 static int of_thermal_bind(struct thermal_zone_device *thermal,
@@ -325,10 +371,11 @@
 	if (trip >= data->ntrips || trip < 0)
 		return -EDOM;
 
-	if (data->ops->set_trip_temp) {
+	if (data->senps && data->senps->ops->set_trip_temp) {
 		int ret;
 
-		ret = data->ops->set_trip_temp(data->sensor_data, trip, temp);
+		ret = data->senps->ops->set_trip_temp(data->senps->sensor_data,
+						      trip, temp);
 		if (ret)
 			return ret;
 	}
@@ -381,6 +428,89 @@
 	return -EINVAL;
 }
 
+static int of_thermal_aggregate_trip_types(struct thermal_zone_device *tz,
+		unsigned int trip_type_mask, int *low, int *high)
+{
+	int min = INT_MIN;
+	int max = INT_MAX;
+	int tt, th, trip;
+	int temp = tz->temperature;
+	struct thermal_zone_device *zone = NULL;
+	struct __thermal_zone *data = tz->devdata;
+	struct list_head *head;
+	enum thermal_trip_type type = 0;
+
+	head = &data->senps->first_tz;
+	for_each_tz_sibling(data, head) {
+		zone = data->tzd;
+		for (trip = 0; trip < data->ntrips; trip++) {
+			of_thermal_get_trip_type(zone, trip, &type);
+			if (!(BIT(type) & trip_type_mask))
+				continue;
+
+			if (!zone->tzp->tracks_low) {
+				tt = data->trips[trip].temperature;
+				if (tt > temp && tt < max)
+					max = tt;
+				th = tt - data->trips[trip].hysteresis;
+				if (th < temp && th > min)
+					min = th;
+			} else {
+				tt = data->trips[trip].temperature;
+				if (tt < temp && tt > min)
+					min = tt;
+				th = tt + data->trips[trip].hysteresis;
+				if (th > temp && th < max)
+					max = th;
+			}
+		}
+	}
+
+	*high = max;
+	*low = min;
+
+	return 0;
+}
+
+/*
+ * of_thermal_aggregate_trip - aggregate trip temperatures across sibling
+ *				thermal zones.
+ * @tz: pointer to the primary thermal zone.
+ * @type: the thermal trip type to be aggregated upon
+ * @low: the low trip threshold which the most lesser than the @temp
+ * @high: the high trip threshold which is the least greater than the @temp
+ */
+int of_thermal_aggregate_trip(struct thermal_zone_device *tz,
+				enum thermal_trip_type type,
+				int *low, int *high)
+{
+	if (type <= THERMAL_TRIP_CRITICAL)
+		return of_thermal_aggregate_trip_types(tz, BIT(type), low,
+						       high);
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(of_thermal_aggregate_trip);
+
+/*
+ * of_thermal_handle_trip - Handle thermal trip from sensors
+ *
+ * @tz: pointer to the primary thermal zone.
+ */
+void of_thermal_handle_trip(struct thermal_zone_device *tz)
+{
+	struct thermal_zone_device *zone;
+	struct __thermal_zone *data = tz->devdata;
+	struct list_head *head;
+
+	head = &data->senps->first_tz;
+	for_each_tz_sibling(data, head) {
+		zone = data->tzd;
+		thermal_zone_device_update(zone, THERMAL_EVENT_UNSPECIFIED);
+	}
+}
+EXPORT_SYMBOL(of_thermal_handle_trip);
+
 static struct thermal_zone_device_ops of_thermal_ops = {
 	.get_mode = of_thermal_get_mode,
 	.set_mode = of_thermal_set_mode,
@@ -400,8 +530,8 @@
 
 static struct thermal_zone_device *
 thermal_zone_of_add_sensor(struct device_node *zone,
-			   struct device_node *sensor, void *data,
-			   const struct thermal_zone_of_device_ops *ops)
+			   struct device_node *sensor,
+			   struct __sensor_param *sens_param)
 {
 	struct thermal_zone_device *tzd;
 	struct __thermal_zone *tz;
@@ -412,12 +542,11 @@
 
 	tz = tzd->devdata;
 
-	if (!ops)
+	if (!sens_param->ops)
 		return ERR_PTR(-EINVAL);
 
 	mutex_lock(&tzd->lock);
-	tz->ops = ops;
-	tz->sensor_data = data;
+	tz->senps = sens_param;
 
 	tzd->ops->get_temp = of_thermal_get_temp;
 	tzd->ops->get_trend = of_thermal_get_trend;
@@ -426,12 +555,13 @@
 	 * The thermal zone core will calculate the window if they have set the
 	 * optional set_trips pointer.
 	 */
-	if (ops->set_trips)
+	if (sens_param->ops->set_trips)
 		tzd->ops->set_trips = of_thermal_set_trips;
 
-	if (ops->set_emul_temp)
+	if (sens_param->ops->set_emul_temp)
 		tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
 
+	list_add_tail(&tz->list, &sens_param->first_tz);
 	mutex_unlock(&tzd->lock);
 
 	return tzd;
@@ -462,11 +592,10 @@
  * 01 - This function must enqueue the new sensor instead of using
  * it as the only source of temperature values.
  *
- * 02 - There must be a way to match the sensor with all thermal zones
- * that refer to it.
- *
  * Return: On success returns a valid struct thermal_zone_device,
- * otherwise, it returns a corresponding ERR_PTR(). Caller must
+ * otherwise, it returns a corresponding ERR_PTR(). Incase there are multiple
+ * thermal zones referencing the same sensor, the return value will be
+ * thermal_zone_device pointer of the first thermal zone. Caller must
  * check the return value with help of IS_ERR() helper.
  */
 struct thermal_zone_device *
@@ -475,6 +604,8 @@
 {
 	struct device_node *np, *child, *sensor_np;
 	struct thermal_zone_device *tzd = ERR_PTR(-ENODEV);
+	struct thermal_zone_device *first_tzd = NULL;
+	struct __sensor_param *sens_param = NULL;
 
 	np = of_find_node_by_name(NULL, "thermal-zones");
 	if (!np)
@@ -485,6 +616,17 @@
 		return ERR_PTR(-EINVAL);
 	}
 
+	sens_param = kzalloc(sizeof(*sens_param), GFP_KERNEL);
+	if (!sens_param) {
+		of_node_put(np);
+		return ERR_PTR(-ENOMEM);
+	}
+	sens_param->sensor_data = data;
+	sens_param->ops = ops;
+	INIT_LIST_HEAD(&sens_param->first_tz);
+	sens_param->trip_high = INT_MAX;
+	sens_param->trip_low = INT_MIN;
+	mutex_init(&sens_param->lock);
 	sensor_np = of_node_get(dev->of_node);
 
 	for_each_available_child_of_node(np, child) {
@@ -509,21 +651,23 @@
 
 		if (sensor_specs.np == sensor_np && id == sensor_id) {
 			tzd = thermal_zone_of_add_sensor(child, sensor_np,
-							 data, ops);
-			if (!IS_ERR(tzd))
+							 sens_param);
+			if (!IS_ERR(tzd)) {
+				if (!first_tzd)
+					first_tzd = tzd;
 				tzd->ops->set_mode(tzd, THERMAL_DEVICE_ENABLED);
-
-			of_node_put(sensor_specs.np);
-			of_node_put(child);
-			goto exit;
+			}
 		}
 		of_node_put(sensor_specs.np);
 	}
-exit:
 	of_node_put(sensor_np);
 	of_node_put(np);
 
-	return tzd;
+	if (!first_tzd) {
+		first_tzd = ERR_PTR(-ENODEV);
+		kfree(sens_param);
+	}
+	return first_tzd;
 }
 EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_register);
 
@@ -546,6 +690,8 @@
 				       struct thermal_zone_device *tzd)
 {
 	struct __thermal_zone *tz;
+	struct thermal_zone_device *pos;
+	struct list_head *head;
 
 	if (!dev || !tzd || !tzd->devdata)
 		return;
@@ -556,14 +702,20 @@
 	if (!tz)
 		return;
 
-	mutex_lock(&tzd->lock);
-	tzd->ops->get_temp = NULL;
-	tzd->ops->get_trend = NULL;
-	tzd->ops->set_emul_temp = NULL;
+	head = &tz->senps->first_tz;
+	for_each_tz_sibling(tz, head) {
+		pos = tz->tzd;
+		mutex_lock(&pos->lock);
+		pos->ops->get_temp = NULL;
+		pos->ops->get_trend = NULL;
+		pos->ops->set_emul_temp = NULL;
 
-	tz->ops = NULL;
-	tz->sensor_data = NULL;
-	mutex_unlock(&tzd->lock);
+		list_del(&tz->list);
+		if (list_empty(&tz->senps->first_tz))
+			kfree(tz->senps);
+		tz->senps = NULL;
+		mutex_unlock(&pos->lock);
+	}
 }
 EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_unregister);
 
@@ -832,6 +984,7 @@
 	if (!tz)
 		return ERR_PTR(-ENOMEM);
 
+	INIT_LIST_HEAD(&tz->list);
 	ret = of_property_read_u32(np, "polling-delay-passive", &prop);
 	if (ret < 0) {
 		pr_err("missing polling-delay-passive property\n");
@@ -975,6 +1128,7 @@
 		struct thermal_zone_params *tzp;
 		int i, mask = 0;
 		u32 prop;
+		const char *governor_name;
 
 		tz = thermal_of_build_thermal_zone(child);
 		if (IS_ERR(tz)) {
@@ -997,6 +1151,11 @@
 		/* No hwmon because there might be hwmon drivers registering */
 		tzp->no_hwmon = true;
 
+		if (!of_property_read_string(child, "thermal-governor",
+						&governor_name))
+			strlcpy(tzp->governor_name, governor_name,
+					THERMAL_NAME_LENGTH);
+
 		if (!of_property_read_u32(child, "sustainable-power", &prop))
 			tzp->sustainable_power = prop;
 
@@ -1007,6 +1166,9 @@
 		tzp->slope = tz->slope;
 		tzp->offset = tz->offset;
 
+		if (of_property_read_bool(child, "tracks-low"))
+			tzp->tracks_low = true;
+
 		zone = thermal_zone_device_register(child->name, tz->ntrips,
 						    mask, tz,
 						    ops, tzp,
@@ -1019,7 +1181,9 @@
 			kfree(ops);
 			of_thermal_free_zone(tz);
 			/* attempting to build remaining zones still */
+			continue;
 		}
+		tz->tzd = zone;
 	}
 	of_node_put(np);
 
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index bcef2e7..4fa7f82 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -37,7 +37,7 @@
  *       for this trip point
  *    d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit
  *       for this trip point
- * If the temperature is lower than a trip point,
+ * If the temperature is lower than a hysteresis temperature,
  *    a. if the trend is THERMAL_TREND_RAISING, do nothing
  *    b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
  *       state for this trip point, if the cooling state already
@@ -126,7 +126,7 @@
 
 static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
 {
-	int trip_temp;
+	int trip_temp, hyst_temp;
 	enum thermal_trip_type trip_type;
 	enum thermal_trend trend;
 	struct thermal_instance *instance;
@@ -134,22 +134,24 @@
 	int old_target;
 
 	if (trip == THERMAL_TRIPS_NONE) {
-		trip_temp = tz->forced_passive;
+		hyst_temp = trip_temp = tz->forced_passive;
 		trip_type = THERMAL_TRIPS_NONE;
 	} else {
 		tz->ops->get_trip_temp(tz, trip, &trip_temp);
+		if (tz->ops->get_trip_hyst) {
+			tz->ops->get_trip_hyst(tz, trip, &hyst_temp);
+			hyst_temp = trip_temp - hyst_temp;
+		} else {
+			hyst_temp = trip_temp;
+		}
 		tz->ops->get_trip_type(tz, trip, &trip_type);
 	}
 
 	trend = get_tz_trend(tz, trip);
 
-	if (tz->temperature >= trip_temp) {
-		throttle = true;
-		trace_thermal_zone_trip(tz, trip, trip_type);
-	}
-
-	dev_dbg(&tz->device, "Trip%d[type=%d,temp=%d]:trend=%d,throttle=%d\n",
-				trip, trip_type, trip_temp, trend, throttle);
+	dev_dbg(&tz->device,
+		"Trip%d[type=%d,temp=%d,hyst=%d]:trend=%d,throttle=%d\n",
+		trip, trip_type, trip_temp, hyst_temp, trend, throttle);
 
 	mutex_lock(&tz->lock);
 
@@ -158,6 +160,22 @@
 			continue;
 
 		old_target = instance->target;
+		/*
+		 * Step wise has to lower the mitigation only if the
+		 * temperature goes below the hysteresis temperature.
+		 * Atleast, it has to hold on to mitigation device lower
+		 * limit if the temperature is above the hysteresis
+		 * temperature.
+		 */
+		if (tz->temperature >= trip_temp ||
+			(tz->temperature >= hyst_temp &&
+			 old_target != THERMAL_NO_TARGET)) {
+			throttle = true;
+			trace_thermal_zone_trip(tz, trip, trip_type);
+		} else {
+			throttle = false;
+		}
+
 		instance->target = get_target_state(instance, trend, throttle);
 		dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
 					old_target, (int)instance->target);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 226b0b4ac..5b627ea 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -49,6 +49,8 @@
 MODULE_DESCRIPTION("Generic thermal management sysfs support");
 MODULE_LICENSE("GPL v2");
 
+#define THERMAL_MAX_ACTIVE	16
+
 static DEFINE_IDR(thermal_tz_idr);
 static DEFINE_IDR(thermal_cdev_idr);
 static DEFINE_MUTEX(thermal_idr_lock);
@@ -64,6 +66,8 @@
 
 static struct thermal_governor *def_governor;
 
+static struct workqueue_struct *thermal_passive_wq;
+
 static struct thermal_governor *__find_governor(const char *name)
 {
 	struct thermal_governor *pos;
@@ -392,14 +396,15 @@
 	mutex_unlock(&thermal_list_lock);
 }
 
-static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
+static void thermal_zone_device_set_polling(struct workqueue_struct *queue,
+					    struct thermal_zone_device *tz,
 					    int delay)
 {
 	if (delay > 1000)
-		mod_delayed_work(system_freezable_wq, &tz->poll_queue,
+		mod_delayed_work(queue, &tz->poll_queue,
 				 round_jiffies(msecs_to_jiffies(delay)));
 	else if (delay)
-		mod_delayed_work(system_freezable_wq, &tz->poll_queue,
+		mod_delayed_work(queue, &tz->poll_queue,
 				 msecs_to_jiffies(delay));
 	else
 		cancel_delayed_work(&tz->poll_queue);
@@ -410,11 +415,13 @@
 	mutex_lock(&tz->lock);
 
 	if (tz->passive)
-		thermal_zone_device_set_polling(tz, tz->passive_delay);
+		thermal_zone_device_set_polling(thermal_passive_wq,
+						tz, tz->passive_delay);
 	else if (tz->polling_delay)
-		thermal_zone_device_set_polling(tz, tz->polling_delay);
+		thermal_zone_device_set_polling(system_freezable_wq,
+						tz, tz->polling_delay);
 	else
-		thermal_zone_device_set_polling(tz, 0);
+		thermal_zone_device_set_polling(NULL, tz, 0);
 
 	mutex_unlock(&tz->lock);
 }
@@ -450,7 +457,7 @@
 	}
 }
 
-static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
+void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
 {
 	enum thermal_trip_type type;
 
@@ -513,7 +520,6 @@
 		if (!ret && *temp < crit_temp)
 			*temp = tz->emul_temperature;
 	}
- 
 	mutex_unlock(&tz->lock);
 exit:
 	return ret;
@@ -1194,6 +1200,25 @@
 }
 
 static ssize_t
+thermal_cooling_device_min_state_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct thermal_cooling_device *cdev = to_cooling_device(dev);
+	unsigned long state;
+	int ret;
+
+	if (cdev->ops->get_min_state)
+		ret = cdev->ops->get_min_state(cdev, &state);
+	else
+		ret = -EPERM;
+
+	if (ret)
+		return ret;
+
+	return snprintf(buf, PAGE_SIZE, "%lu\n", state);
+}
+
+static ssize_t
 thermal_cooling_device_cur_state_show(struct device *dev,
 				      struct device_attribute *attr, char *buf)
 {
@@ -1213,8 +1238,7 @@
 				       const char *buf, size_t count)
 {
 	struct thermal_cooling_device *cdev = to_cooling_device(dev);
-	unsigned long state;
-	int result;
+	long state;
 
 	if (!sscanf(buf, "%ld\n", &state))
 		return -EINVAL;
@@ -1222,9 +1246,35 @@
 	if ((long)state < 0)
 		return -EINVAL;
 
-	result = cdev->ops->set_cur_state(cdev, state);
-	if (result)
-		return result;
+	cdev->sysfs_cur_state_req = state;
+
+	cdev->updated = false;
+	thermal_cdev_update(cdev);
+
+	return count;
+}
+
+static ssize_t
+thermal_cooling_device_min_state_store(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf, size_t count)
+{
+	struct thermal_cooling_device *cdev = to_cooling_device(dev);
+	long state;
+	int ret = 0;
+
+	ret = sscanf(buf, "%ld\n", &state);
+	if (ret <= 0)
+		return (ret < 0) ? ret : -EINVAL;
+
+	if ((long)state < 0)
+		return -EINVAL;
+
+	cdev->sysfs_min_state_req = state;
+
+	cdev->updated = false;
+	thermal_cdev_update(cdev);
+
 	return count;
 }
 
@@ -1235,6 +1285,9 @@
 static DEVICE_ATTR(cur_state, 0644,
 		   thermal_cooling_device_cur_state_show,
 		   thermal_cooling_device_cur_state_store);
+static DEVICE_ATTR(min_state, 0644,
+		   thermal_cooling_device_min_state_show,
+		   thermal_cooling_device_min_state_store);
 
 static ssize_t
 thermal_cooling_device_trip_point_show(struct device *dev,
@@ -1255,6 +1308,7 @@
 	&dev_attr_cdev_type.attr,
 	&dev_attr_max_state.attr,
 	&dev_attr_cur_state.attr,
+	&dev_attr_min_state.attr,
 	NULL,
 };
 
@@ -1548,6 +1602,8 @@
 	cdev->device.class = &thermal_class;
 	cdev->device.groups = cooling_device_attr_groups;
 	cdev->devdata = devdata;
+	cdev->sysfs_cur_state_req = 0;
+	cdev->sysfs_min_state_req = ULONG_MAX;
 	dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
 	result = device_register(&cdev->device);
 	if (result) {
@@ -1682,7 +1738,7 @@
 void thermal_cdev_update(struct thermal_cooling_device *cdev)
 {
 	struct thermal_instance *instance;
-	unsigned long target = 0;
+	unsigned long current_target = 0, min_target = ULONG_MAX;
 
 	mutex_lock(&cdev->lock);
 	/* cooling device is updated*/
@@ -1692,19 +1748,29 @@
 	}
 
 	/* Make sure cdev enters the deepest cooling state */
+	current_target = cdev->sysfs_cur_state_req;
+	min_target = cdev->sysfs_min_state_req;
 	list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) {
 		dev_dbg(&cdev->device, "zone%d->target=%lu\n",
 				instance->tz->id, instance->target);
 		if (instance->target == THERMAL_NO_TARGET)
 			continue;
-		if (instance->target > target)
-			target = instance->target;
+		if (instance->tz->governor->min_state_throttle) {
+			if (instance->target < min_target)
+				min_target = instance->target;
+		} else {
+			if (instance->target > current_target)
+				current_target = instance->target;
+		}
 	}
-	cdev->ops->set_cur_state(cdev, target);
+	cdev->ops->set_cur_state(cdev, current_target);
+	if (cdev->ops->set_min_state)
+		cdev->ops->set_min_state(cdev, min_target);
 	cdev->updated = true;
 	mutex_unlock(&cdev->lock);
-	trace_cdev_update(cdev, target);
-	dev_dbg(&cdev->device, "set to state %lu\n", target);
+	trace_cdev_update(cdev, current_target);
+	dev_dbg(&cdev->device, "set to state %lu min state %lu\n",
+				current_target, min_target);
 }
 EXPORT_SYMBOL(thermal_cdev_update);
 
@@ -2069,7 +2135,7 @@
 
 	mutex_unlock(&thermal_list_lock);
 
-	thermal_zone_device_set_polling(tz, 0);
+	thermal_zone_device_set_polling(NULL, tz, 0);
 
 	if (tz->type[0])
 		device_remove_file(&tz->device, &dev_attr_type);
@@ -2172,6 +2238,8 @@
 	.n_mcgrps = ARRAY_SIZE(thermal_event_mcgrps),
 };
 
+static int allow_netlink_events;
+
 int thermal_generate_netlink_event(struct thermal_zone_device *tz,
 					enum events event)
 {
@@ -2186,6 +2254,9 @@
 	if (!tz)
 		return -EINVAL;
 
+	if (!allow_netlink_events)
+		return -ENODEV;
+
 	/* allocate memory */
 	size = nla_total_size(sizeof(struct thermal_genl_event)) +
 	       nla_total_size(0);
@@ -2237,7 +2308,13 @@
 
 static int genetlink_init(void)
 {
-	return genl_register_family(&thermal_event_genl_family);
+	int ret;
+
+	ret = genl_register_family(&thermal_event_genl_family);
+	if (!ret)
+		allow_netlink_events = true;
+
+	return ret;
 }
 
 static void genetlink_exit(void)
@@ -2247,6 +2324,8 @@
 #else /* !CONFIG_NET */
 static inline int genetlink_init(void) { return 0; }
 static inline void genetlink_exit(void) {}
+static inline int thermal_generate_netlink_event(struct thermal_zone_device *tz,
+		enum events event) { return -ENODEV; }
 #endif /* !CONFIG_NET */
 
 static int __init thermal_register_governors(void)
@@ -2316,21 +2395,26 @@
 {
 	int result;
 
+	thermal_passive_wq = alloc_workqueue("thermal_passive_wq",
+						WQ_HIGHPRI | WQ_UNBOUND
+						| WQ_FREEZABLE,
+						THERMAL_MAX_ACTIVE);
+	if (!thermal_passive_wq) {
+		result = -ENOMEM;
+		goto init_exit;
+	}
+
 	result = thermal_register_governors();
 	if (result)
-		goto error;
+		goto destroy_wq;
 
 	result = class_register(&thermal_class);
 	if (result)
 		goto unregister_governors;
 
-	result = genetlink_init();
-	if (result)
-		goto unregister_class;
-
 	result = of_parse_thermal_zones();
 	if (result)
-		goto exit_netlink;
+		goto exit_zone_parse;
 
 	result = register_pm_notifier(&thermal_pm_nb);
 	if (result)
@@ -2339,13 +2423,13 @@
 
 	return 0;
 
-exit_netlink:
-	genetlink_exit();
-unregister_class:
+exit_zone_parse:
 	class_unregister(&thermal_class);
 unregister_governors:
 	thermal_unregister_governors();
-error:
+destroy_wq:
+	destroy_workqueue(thermal_passive_wq);
+init_exit:
 	idr_destroy(&thermal_tz_idr);
 	idr_destroy(&thermal_cdev_idr);
 	mutex_destroy(&thermal_idr_lock);
@@ -2358,6 +2442,7 @@
 {
 	unregister_pm_notifier(&thermal_pm_nb);
 	of_thermal_destroy_zones();
+	destroy_workqueue(thermal_passive_wq);
 	genetlink_exit();
 	class_unregister(&thermal_class);
 	thermal_unregister_governors();
@@ -2368,5 +2453,19 @@
 	mutex_destroy(&thermal_governor_lock);
 }
 
-fs_initcall(thermal_init);
+static int __init thermal_netlink_init(void)
+{
+	int ret = 0;
+
+	ret = genetlink_init();
+	if (!ret)
+		goto exit_netlink;
+
+	thermal_exit();
+exit_netlink:
+	return ret;
+}
+
+subsys_initcall(thermal_init);
+fs_initcall(thermal_netlink_init);
 module_exit(thermal_exit);
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 749d41a..9408f3f 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -56,6 +56,7 @@
 
 int thermal_register_governor(struct thermal_governor *);
 void thermal_unregister_governor(struct thermal_governor *);
+void handle_thermal_trip(struct thermal_zone_device *tz, int trip);
 
 #ifdef CONFIG_THERMAL_GOV_STEP_WISE
 int thermal_gov_step_wise_register(void);
@@ -105,6 +106,10 @@
 bool of_thermal_is_trip_valid(struct thermal_zone_device *, int);
 const struct thermal_trip *
 of_thermal_get_trip_points(struct thermal_zone_device *);
+int of_thermal_aggregate_trip(struct thermal_zone_device *tz,
+			      enum thermal_trip_type type,
+			      int *low, int *high);
+void of_thermal_handle_trip(struct thermal_zone_device *tz);
 #else
 static inline int of_parse_thermal_zones(void) { return 0; }
 static inline void of_thermal_destroy_zones(void) { }
@@ -122,6 +127,15 @@
 {
 	return NULL;
 }
+static inline int of_thermal_aggregate_trip(struct thermal_zone_device *tz,
+					    enum thermal_trip_type type,
+					    int *low, int *high)
+{
+	return -ENODEV;
+}
+static inline
+void of_thermal_handle_trip(struct thermal_zone_device *tz)
+{ }
 #endif
 
 #endif /* __THERMAL_CORE_H__ */
diff --git a/drivers/thermal/tsens-dbg.c b/drivers/thermal/tsens-dbg.c
new file mode 100644
index 0000000..d965a5c
--- /dev/null
+++ b/drivers/thermal/tsens-dbg.c
@@ -0,0 +1,104 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the term_tm of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/arch_timer.h>
+#include "tsens.h"
+
+/* debug defines */
+#define	TSENS_DBG_BUS_ID_0			0
+#define	TSENS_DBG_BUS_ID_1			1
+#define	TSENS_DBG_BUS_ID_2			2
+#define	TSENS_DBG_BUS_ID_15			15
+#define	TSENS_DEBUG_LOOP_COUNT_ID_0		2
+#define	TSENS_DEBUG_LOOP_COUNT			5
+#define	TSENS_DEBUG_STATUS_REG_START		10
+#define	TSENS_DEBUG_OFFSET_RANGE		16
+#define	TSENS_DEBUG_OFFSET_WORD1		0x4
+#define	TSENS_DEBUG_OFFSET_WORD2		0x8
+#define	TSENS_DEBUG_OFFSET_WORD3		0xc
+#define	TSENS_DEBUG_OFFSET_ROW			0x10
+#define	TSENS_DEBUG_DECIDEGC			-950
+#define	TSENS_DEBUG_CYCLE_MS			64
+#define	TSENS_DEBUG_POLL_MS			200
+#define	TSENS_DEBUG_BUS_ID2_MIN_CYCLE		50
+#define	TSENS_DEBUG_BUS_ID2_MAX_CYCLE		51
+#define	TSENS_DEBUG_ID_MASK_1_4			0xffffffe1
+#define	DEBUG_SIZE				10
+
+#define TSENS_DEBUG_CONTROL(n)			((n) + 0x1130)
+#define TSENS_DEBUG_DATA(n)			((n) + 0x1134)
+
+struct tsens_dbg_func {
+	int (*dbg_func)(struct tsens_device *, u32, u32, int *);
+};
+
+static int tsens_dbg_log_temp_reads(struct tsens_device *data, u32 id,
+					u32 dbg_type, int *temp)
+{
+	struct tsens_sensor *sensor;
+	struct tsens_device *tmdev = NULL;
+	u32 idx = 0;
+
+	if (!data)
+		return -EINVAL;
+
+	pr_debug("%d %d\n", id, dbg_type);
+	tmdev = data;
+	sensor = &tmdev->sensor[id];
+	idx = tmdev->tsens_dbg.sensor_dbg_info[sensor->hw_id].idx;
+	tmdev->tsens_dbg.sensor_dbg_info[sensor->hw_id].temp[idx%10] = *temp;
+	tmdev->tsens_dbg.sensor_dbg_info[sensor->hw_id].time_stmp[idx%10] =
+					sched_clock();
+	idx++;
+	tmdev->tsens_dbg.sensor_dbg_info[sensor->hw_id].idx = idx;
+
+	return 0;
+}
+
+static int tsens_dbg_log_interrupt_timestamp(struct tsens_device *data,
+						u32 id, u32 dbg_type, int *val)
+{
+	struct tsens_device *tmdev = NULL;
+	u32 idx = 0;
+
+	if (!data)
+		return -EINVAL;
+
+	pr_debug("%d %d\n", id, dbg_type);
+	tmdev = data;
+	/* debug */
+	idx = tmdev->tsens_dbg.tsens_thread_iq_dbg.idx;
+	tmdev->tsens_dbg.tsens_thread_iq_dbg.dbg_count[idx%10]++;
+	tmdev->tsens_dbg.tsens_thread_iq_dbg.time_stmp[idx%10] =
+							sched_clock();
+	tmdev->tsens_dbg.tsens_thread_iq_dbg.idx++;
+
+	return 0;
+}
+
+static struct tsens_dbg_func dbg_arr[] = {
+	[TSENS_DBG_LOG_TEMP_READS] = {tsens_dbg_log_temp_reads},
+	[TSENS_DBG_LOG_INTERRUPT_TIMESTAMP] = {
+			tsens_dbg_log_interrupt_timestamp},
+};
+
+int tsens2xxx_dbg(struct tsens_device *data, u32 id, u32 dbg_type, int *val)
+{
+	if (dbg_type >= TSENS_DBG_LOG_MAX)
+		return -EINVAL;
+
+	dbg_arr[dbg_type].dbg_func(data, id, dbg_type, val);
+
+	return 0;
+}
+EXPORT_SYMBOL(tsens2xxx_dbg);
diff --git a/drivers/thermal/tsens.h b/drivers/thermal/tsens.h
new file mode 100644
index 0000000..45d244e
--- /dev/null
+++ b/drivers/thermal/tsens.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QCOM_TSENS_H__
+#define __QCOM_TSENS_H__
+
+#include <linux/kernel.h>
+#include <linux/thermal.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#define DEBUG_SIZE				10
+#define TSENS_MAX_SENSORS			16
+#define TSENS_CONTROLLER_ID(n)			((n) + 0x1000)
+#define TSENS_CTRL_ADDR(n)			(n)
+#define TSENS_TM_SN_STATUS(n)			((n) + 0x10a0)
+
+enum tsens_dbg_type {
+	TSENS_DBG_POLL,
+	TSENS_DBG_LOG_TEMP_READS,
+	TSENS_DBG_LOG_INTERRUPT_TIMESTAMP,
+	TSENS_DBG_LOG_MAX
+};
+
+#define tsens_sec_to_msec_value		1000
+
+struct tsens_device;
+
+#if defined(CONFIG_THERMAL_TSENS)
+int tsens2xxx_dbg(struct tsens_device *data, u32 id, u32 dbg_type, int *temp);
+#else
+static inline int tsens2xxx_dbg(struct tsens_device *data, u32 id,
+						u32 dbg_type, int *temp)
+{ return -ENXIO; }
+#endif
+
+struct tsens_dbg {
+	u32				dbg_count[DEBUG_SIZE];
+	u32				idx;
+	unsigned long long		time_stmp[DEBUG_SIZE];
+	unsigned long			temp[DEBUG_SIZE];
+};
+
+struct tsens_dbg_context {
+	struct tsens_device		*tmdev;
+	struct tsens_dbg		tsens_thread_iq_dbg;
+	struct tsens_dbg		sensor_dbg_info[TSENS_MAX_SENSORS];
+	int				tsens_critical_wd_cnt;
+	struct delayed_work		tsens_critical_poll_test;
+};
+
+struct tsens_context {
+	enum thermal_device_mode	high_th_state;
+	enum thermal_device_mode	low_th_state;
+	enum thermal_device_mode	crit_th_state;
+	int				high_temp;
+	int				low_temp;
+	int				crit_temp;
+};
+
+struct tsens_sensor {
+	struct tsens_device		*tmdev;
+	struct thermal_zone_device	*tzd;
+	u32				hw_id;
+	u32				id;
+	const char			*sensor_name;
+	struct tsens_context		thr_state;
+};
+
+/**
+ * struct tsens_ops - operations as supported by the tsens device
+ * @init: Function to initialize the tsens device
+ * @get_temp: Function which returns the temp in millidegC
+ */
+struct tsens_ops {
+	int (*hw_init)(struct tsens_device *);
+	int (*get_temp)(struct tsens_sensor *, int *);
+	int (*set_trip_temp)(struct tsens_sensor *, int, int);
+	int (*interrupts_reg)(struct tsens_device *);
+	int (*dbg)(struct tsens_device *, u32, u32, int *);
+};
+
+struct tsens_irqs {
+	const char			*name;
+	irqreturn_t (*handler)(int, void *);
+};
+
+/**
+ * struct tsens_data - tsens instance specific data
+ * @num_sensors: Max number of sensors supported by platform
+ * @ops: operations the tsens instance supports
+ * @hw_ids: Subset of sensors ids supported by platform, if not the first n
+ */
+struct tsens_data {
+	const u32			num_sensors;
+	const struct tsens_ops		*ops;
+	unsigned int			*hw_ids;
+	u32				temp_factor;
+	bool				cycle_monitor;
+	u32				cycle_compltn_monitor_val;
+	bool				wd_bark;
+	u32				wd_bark_val;
+};
+
+struct tsens_device {
+	struct device			*dev;
+	struct platform_device		*pdev;
+	struct list_head		list;
+	u32				num_sensors;
+	struct regmap			*map;
+	struct regmap_field		*status_field;
+	void				*tsens_addr;
+	const struct tsens_ops		*ops;
+	struct tsens_dbg_context	tsens_dbg;
+	spinlock_t			tsens_crit_lock;
+	spinlock_t			tsens_upp_low_lock;
+	const struct tsens_data		*ctrl_data;
+	struct tsens_sensor		sensor[0];
+};
+
+extern const struct tsens_data data_tsens2xxx, data_tsens23xx, data_tsens24xx;
+
+#endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c
new file mode 100644
index 0000000..0f59dc5
--- /dev/null
+++ b/drivers/thermal/tsens2xxx.c
@@ -0,0 +1,543 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the term_tm of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/vmalloc.h>
+#include "tsens.h"
+
+#define TSENS_DRIVER_NAME			"msm-tsens"
+
+#define TSENS_TM_INT_EN(n)			((n) + 0x1004)
+#define TSENS_TM_CRITICAL_INT_STATUS(n)		((n) + 0x1014)
+#define TSENS_TM_CRITICAL_INT_CLEAR(n)		((n) + 0x1018)
+#define TSENS_TM_CRITICAL_INT_MASK(n)		((n) + 0x101c)
+#define TSENS_TM_CRITICAL_WD_BARK		BIT(31)
+#define TSENS_TM_CRITICAL_CYCLE_MONITOR		BIT(30)
+#define TSENS_TM_CRITICAL_INT_EN		BIT(2)
+#define TSENS_TM_UPPER_INT_EN			BIT(1)
+#define TSENS_TM_LOWER_INT_EN			BIT(0)
+#define TSENS_TM_SN_UPPER_LOWER_THRESHOLD(n)	((n) + 0x1020)
+#define TSENS_TM_SN_ADDR_OFFSET			0x4
+#define TSENS_TM_UPPER_THRESHOLD_SET(n)		((n) << 12)
+#define TSENS_TM_UPPER_THRESHOLD_VALUE_SHIFT(n)	((n) >> 12)
+#define TSENS_TM_LOWER_THRESHOLD_VALUE(n)	((n) & 0xfff)
+#define TSENS_TM_UPPER_THRESHOLD_VALUE(n)	(((n) & 0xfff000) >> 12)
+#define TSENS_TM_UPPER_THRESHOLD_MASK		0xfff000
+#define TSENS_TM_LOWER_THRESHOLD_MASK		0xfff
+#define TSENS_TM_UPPER_THRESHOLD_SHIFT		12
+#define TSENS_TM_SN_CRITICAL_THRESHOLD(n)	((n) + 0x1060)
+#define TSENS_STATUS_ADDR_OFFSET		2
+#define TSENS_TM_UPPER_INT_MASK(n)		(((n) & 0xffff0000) >> 16)
+#define TSENS_TM_LOWER_INT_MASK(n)		((n) & 0xffff)
+#define TSENS_TM_UPPER_LOWER_INT_STATUS(n)	((n) + 0x1008)
+#define TSENS_TM_UPPER_LOWER_INT_CLEAR(n)	((n) + 0x100c)
+#define TSENS_TM_UPPER_LOWER_INT_MASK(n)	((n) + 0x1010)
+#define TSENS_TM_UPPER_INT_SET(n)		(1 << (n + 16))
+#define TSENS_TM_SN_CRITICAL_THRESHOLD_MASK	0xfff
+#define TSENS_TM_SN_STATUS_VALID_BIT		BIT(21)
+#define TSENS_TM_SN_STATUS_CRITICAL_STATUS	BIT(19)
+#define TSENS_TM_SN_STATUS_UPPER_STATUS		BIT(18)
+#define TSENS_TM_SN_STATUS_LOWER_STATUS		BIT(17)
+#define TSENS_TM_SN_LAST_TEMP_MASK		0xfff
+#define TSENS_TM_CODE_BIT_MASK			0xfff
+#define TSENS_TM_CODE_SIGN_BIT			0x800
+
+#define TSENS_EN				BIT(0)
+
+static void msm_tsens_convert_temp(int last_temp, int *temp)
+{
+	int code_mask = ~TSENS_TM_CODE_BIT_MASK;
+
+	if (last_temp & TSENS_TM_CODE_SIGN_BIT) {
+		/* Sign extension for negative value */
+		last_temp |= code_mask;
+	}
+
+	*temp = last_temp * 100;
+}
+
+static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
+{
+	struct tsens_device *tmdev = NULL;
+	unsigned int code;
+	void __iomem *sensor_addr;
+	int last_temp = 0, last_temp2 = 0, last_temp3 = 0;
+
+	if (!sensor)
+		return -EINVAL;
+
+	tmdev = sensor->tmdev;
+	sensor_addr = TSENS_TM_SN_STATUS(tmdev->tsens_addr);
+
+	code = readl_relaxed_no_log(sensor_addr +
+			(sensor->hw_id << TSENS_STATUS_ADDR_OFFSET));
+	last_temp = code & TSENS_TM_SN_LAST_TEMP_MASK;
+
+	if (code & TSENS_TM_SN_STATUS_VALID_BIT) {
+		msm_tsens_convert_temp(last_temp, temp);
+		return 0;
+	}
+
+	code = readl_relaxed_no_log(sensor_addr +
+		(sensor->hw_id << TSENS_STATUS_ADDR_OFFSET));
+	last_temp2 = code & TSENS_TM_SN_LAST_TEMP_MASK;
+	if (code & TSENS_TM_SN_STATUS_VALID_BIT) {
+		last_temp = last_temp2;
+		msm_tsens_convert_temp(last_temp, temp);
+		return 0;
+	}
+
+	code = readl_relaxed_no_log(sensor_addr +
+			(sensor->hw_id <<
+			TSENS_STATUS_ADDR_OFFSET));
+	last_temp3 = code & TSENS_TM_SN_LAST_TEMP_MASK;
+	if (code & TSENS_TM_SN_STATUS_VALID_BIT) {
+		last_temp = last_temp3;
+		msm_tsens_convert_temp(last_temp, temp);
+		return 0;
+	}
+
+	if (last_temp == last_temp2)
+		last_temp = last_temp2;
+	else if (last_temp2 == last_temp3)
+		last_temp = last_temp3;
+
+	msm_tsens_convert_temp(last_temp, temp);
+
+	if (tmdev->ops->dbg)
+		tmdev->ops->dbg(tmdev, (u32) sensor->hw_id,
+					TSENS_DBG_LOG_TEMP_READS, temp);
+
+	return 0;
+}
+
+static int tsens_tm_activate_trip_type(struct tsens_sensor *tm_sensor,
+			int trip, enum thermal_trip_activation_mode mode)
+{
+	struct tsens_device *tmdev = NULL;
+	unsigned int reg_cntl, mask;
+	unsigned long flags;
+	int rc = 0;
+
+	/* clear the interrupt and unmask */
+	if (!tm_sensor || trip < 0)
+		return -EINVAL;
+
+	tmdev = tm_sensor->tmdev;
+	if (!tmdev)
+		return -EINVAL;
+
+	spin_lock_irqsave(&tmdev->tsens_upp_low_lock, flags);
+	mask = (tm_sensor->hw_id);
+	switch (trip) {
+	case THERMAL_TRIP_CRITICAL:
+		tmdev->sensor[tm_sensor->hw_id].
+			thr_state.crit_th_state = mode;
+		reg_cntl = readl_relaxed(TSENS_TM_CRITICAL_INT_MASK
+							(tmdev->tsens_addr));
+		if (mode == THERMAL_TRIP_ACTIVATION_DISABLED)
+			writel_relaxed(reg_cntl | (1 << mask),
+				(TSENS_TM_CRITICAL_INT_MASK
+				(tmdev->tsens_addr)));
+		else
+			writel_relaxed(reg_cntl & ~(1 << mask),
+				(TSENS_TM_CRITICAL_INT_MASK
+				(tmdev->tsens_addr)));
+		break;
+	case THERMAL_TRIP_ACTIVE:
+		tmdev->sensor[tm_sensor->hw_id].
+			thr_state.high_th_state = mode;
+		reg_cntl = readl_relaxed(TSENS_TM_UPPER_LOWER_INT_MASK
+						(tmdev->tsens_addr));
+		if (mode == THERMAL_TRIP_ACTIVATION_DISABLED)
+			writel_relaxed(reg_cntl |
+				(TSENS_TM_UPPER_INT_SET(mask)),
+				(TSENS_TM_UPPER_LOWER_INT_MASK
+				(tmdev->tsens_addr)));
+		else
+			writel_relaxed(reg_cntl &
+				~(TSENS_TM_UPPER_INT_SET(mask)),
+				(TSENS_TM_UPPER_LOWER_INT_MASK
+				(tmdev->tsens_addr)));
+		break;
+	case THERMAL_TRIP_PASSIVE:
+		tmdev->sensor[tm_sensor->hw_id].
+			thr_state.low_th_state = mode;
+		reg_cntl = readl_relaxed(TSENS_TM_UPPER_LOWER_INT_MASK
+						(tmdev->tsens_addr));
+		if (mode == THERMAL_TRIP_ACTIVATION_DISABLED)
+			writel_relaxed(reg_cntl | (1 << mask),
+			(TSENS_TM_UPPER_LOWER_INT_MASK(tmdev->tsens_addr)));
+		else
+			writel_relaxed(reg_cntl & ~(1 << mask),
+			(TSENS_TM_UPPER_LOWER_INT_MASK(tmdev->tsens_addr)));
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&tmdev->tsens_upp_low_lock, flags);
+	/* Activate and enable the respective trip threshold setting */
+	mb();
+
+	return rc;
+}
+
+static int tsens2xxx_set_trip_temp(struct tsens_sensor *tm_sensor,
+							int trip, int temp)
+{
+	unsigned int reg_cntl;
+	unsigned long flags;
+	struct tsens_device *tmdev = NULL;
+	int rc = 0;
+
+	if (!tm_sensor || trip < 0)
+		return -EINVAL;
+
+	tmdev = tm_sensor->tmdev;
+	if (!tmdev)
+		return -EINVAL;
+
+	spin_lock_irqsave(&tmdev->tsens_upp_low_lock, flags);
+	switch (trip) {
+	case THERMAL_TRIP_CRITICAL:
+		tmdev->sensor[tm_sensor->hw_id].
+				thr_state.crit_temp = temp;
+		temp &= TSENS_TM_SN_CRITICAL_THRESHOLD_MASK;
+		writel_relaxed(temp,
+			(TSENS_TM_SN_CRITICAL_THRESHOLD(tmdev->tsens_addr) +
+			(tm_sensor->hw_id * TSENS_TM_SN_ADDR_OFFSET)));
+		break;
+	case THERMAL_TRIP_ACTIVE:
+		tmdev->sensor[tm_sensor->hw_id].
+				thr_state.high_temp = temp;
+		reg_cntl = readl_relaxed((TSENS_TM_SN_UPPER_LOWER_THRESHOLD
+				(tmdev->tsens_addr)) +
+				(tm_sensor->hw_id *
+				TSENS_TM_SN_ADDR_OFFSET));
+		temp = TSENS_TM_UPPER_THRESHOLD_SET(temp);
+		temp &= TSENS_TM_UPPER_THRESHOLD_MASK;
+		reg_cntl &= ~TSENS_TM_UPPER_THRESHOLD_MASK;
+		writel_relaxed(reg_cntl | temp,
+			(TSENS_TM_SN_UPPER_LOWER_THRESHOLD(tmdev->tsens_addr) +
+			(tm_sensor->hw_id * TSENS_TM_SN_ADDR_OFFSET)));
+		break;
+	case THERMAL_TRIP_PASSIVE:
+		tmdev->sensor[tm_sensor->hw_id].
+				thr_state.low_temp = temp;
+		reg_cntl = readl_relaxed((TSENS_TM_SN_UPPER_LOWER_THRESHOLD
+				(tmdev->tsens_addr)) +
+				(tm_sensor->hw_id *
+				TSENS_TM_SN_ADDR_OFFSET));
+		temp &= TSENS_TM_LOWER_THRESHOLD_MASK;
+		reg_cntl &= ~TSENS_TM_LOWER_THRESHOLD_MASK;
+		writel_relaxed(reg_cntl | temp,
+			(TSENS_TM_SN_UPPER_LOWER_THRESHOLD(tmdev->tsens_addr) +
+			(tm_sensor->hw_id * TSENS_TM_SN_ADDR_OFFSET)));
+		break;
+	default:
+		pr_err("Invalid trip to TSENS: %d\n", trip);
+		rc = -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&tmdev->tsens_upp_low_lock, flags);
+	/* Set trip temperature thresholds */
+	mb();
+
+	rc = tsens_tm_activate_trip_type(tm_sensor, trip,
+				THERMAL_TRIP_ACTIVATION_ENABLED);
+	if (rc)
+		pr_err("Error during trip activation :%d\n", rc);
+
+	return rc;
+}
+
+static irqreturn_t tsens_tm_critical_irq_thread(int irq, void *data)
+{
+	struct tsens_device *tm = data;
+	unsigned int i, status;
+	unsigned long flags;
+	void __iomem *sensor_status_addr;
+	void __iomem *sensor_int_mask_addr;
+	void __iomem *sensor_critical_addr;
+	void __iomem *wd_critical_addr;
+	int wd_mask;
+
+	sensor_status_addr = TSENS_TM_SN_STATUS(tm->tsens_addr);
+	sensor_int_mask_addr =
+		TSENS_TM_CRITICAL_INT_MASK(tm->tsens_addr);
+	sensor_critical_addr =
+		TSENS_TM_SN_CRITICAL_THRESHOLD(tm->tsens_addr);
+	wd_critical_addr =
+		TSENS_TM_CRITICAL_INT_STATUS(tm->tsens_addr);
+
+	if (tm->ctrl_data->wd_bark) {
+		wd_mask = readl_relaxed(wd_critical_addr);
+		if (wd_mask & TSENS_TM_CRITICAL_WD_BARK) {
+			/*
+			 * Clear watchdog interrupt and
+			 * increment global wd count
+			 */
+			writel_relaxed(wd_mask | TSENS_TM_CRITICAL_WD_BARK,
+				(TSENS_TM_CRITICAL_INT_CLEAR
+				(tm->tsens_addr)));
+			writel_relaxed(wd_mask & ~(TSENS_TM_CRITICAL_WD_BARK),
+				(TSENS_TM_CRITICAL_INT_CLEAR
+				(tm->tsens_addr)));
+			tm->tsens_dbg.tsens_critical_wd_cnt++;
+			return IRQ_HANDLED;
+		}
+	}
+
+	for (i = 0; i < tm->num_sensors; i++) {
+		int int_mask, int_mask_val;
+		u32 addr_offset;
+
+		spin_lock_irqsave(&tm->tsens_crit_lock, flags);
+		addr_offset = tm->sensor[i].hw_id *
+						TSENS_TM_SN_ADDR_OFFSET;
+		status = readl_relaxed(sensor_status_addr + addr_offset);
+		int_mask = readl_relaxed(sensor_int_mask_addr);
+
+		if ((status & TSENS_TM_SN_STATUS_CRITICAL_STATUS) &&
+			!(int_mask & (1 << tm->sensor[i].hw_id))) {
+			int_mask = readl_relaxed(sensor_int_mask_addr);
+			int_mask_val = (1 << tm->sensor[i].hw_id);
+			/* Mask the corresponding interrupt for the sensors */
+			writel_relaxed(int_mask | int_mask_val,
+				TSENS_TM_CRITICAL_INT_MASK(
+					tm->tsens_addr));
+			/* Clear the corresponding sensors interrupt */
+			writel_relaxed(int_mask_val,
+				TSENS_TM_CRITICAL_INT_CLEAR(tm->tsens_addr));
+			writel_relaxed(0,
+				TSENS_TM_CRITICAL_INT_CLEAR(
+					tm->tsens_addr));
+			tm->sensor[i].thr_state.
+					crit_th_state = THERMAL_DEVICE_DISABLED;
+		}
+		spin_unlock_irqrestore(&tm->tsens_crit_lock, flags);
+	}
+
+	/* Mask critical interrupt */
+	mb();
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t tsens_tm_irq_thread(int irq, void *data)
+{
+	struct tsens_device *tm = data;
+	unsigned int i, status, threshold;
+	unsigned long flags;
+	void __iomem *sensor_status_addr;
+	void __iomem *sensor_int_mask_addr;
+	void __iomem *sensor_upper_lower_addr;
+	u32 addr_offset = 0;
+
+	sensor_status_addr = TSENS_TM_SN_STATUS(tm->tsens_addr);
+	sensor_int_mask_addr =
+		TSENS_TM_UPPER_LOWER_INT_MASK(tm->tsens_addr);
+	sensor_upper_lower_addr =
+		TSENS_TM_SN_UPPER_LOWER_THRESHOLD(tm->tsens_addr);
+
+	for (i = 0; i < tm->num_sensors; i++) {
+		bool upper_thr = false, lower_thr = false;
+		int int_mask, int_mask_val = 0;
+
+		spin_lock_irqsave(&tm->tsens_upp_low_lock, flags);
+		addr_offset = tm->sensor[i].hw_id *
+						TSENS_TM_SN_ADDR_OFFSET;
+		status = readl_relaxed(sensor_status_addr + addr_offset);
+		threshold = readl_relaxed(sensor_upper_lower_addr +
+								addr_offset);
+		int_mask = readl_relaxed(sensor_int_mask_addr);
+
+		if ((status & TSENS_TM_SN_STATUS_UPPER_STATUS) &&
+			!(int_mask &
+				(1 << (tm->sensor[i].hw_id + 16)))) {
+			int_mask = readl_relaxed(sensor_int_mask_addr);
+			int_mask_val = TSENS_TM_UPPER_INT_SET(
+					tm->sensor[i].hw_id);
+			/* Mask the corresponding interrupt for the sensors */
+			writel_relaxed(int_mask | int_mask_val,
+				TSENS_TM_UPPER_LOWER_INT_MASK(
+					tm->tsens_addr));
+			/* Clear the corresponding sensors interrupt */
+			writel_relaxed(int_mask_val,
+				TSENS_TM_UPPER_LOWER_INT_CLEAR(
+					tm->tsens_addr));
+			writel_relaxed(0,
+				TSENS_TM_UPPER_LOWER_INT_CLEAR(
+					tm->tsens_addr));
+			upper_thr = true;
+			tm->sensor[i].thr_state.
+					high_th_state = THERMAL_DEVICE_DISABLED;
+		}
+
+		if ((status & TSENS_TM_SN_STATUS_LOWER_STATUS) &&
+			!(int_mask &
+				(1 << tm->sensor[i].hw_id))) {
+			int_mask = readl_relaxed(sensor_int_mask_addr);
+			int_mask_val = (1 << tm->sensor[i].hw_id);
+			/* Mask the corresponding interrupt for the sensors */
+			writel_relaxed(int_mask | int_mask_val,
+				TSENS_TM_UPPER_LOWER_INT_MASK(
+					tm->tsens_addr));
+			/* Clear the corresponding sensors interrupt */
+			writel_relaxed(int_mask_val,
+				TSENS_TM_UPPER_LOWER_INT_CLEAR(
+					tm->tsens_addr));
+			writel_relaxed(0,
+				TSENS_TM_UPPER_LOWER_INT_CLEAR(
+					tm->tsens_addr));
+			lower_thr = true;
+			tm->sensor[i].thr_state.
+					low_th_state = THERMAL_DEVICE_DISABLED;
+		}
+		spin_unlock_irqrestore(&tm->tsens_upp_low_lock, flags);
+
+		if (upper_thr || lower_thr) {
+			int temp;
+			enum thermal_trip_type trip =
+					THERMAL_TRIP_CONFIGURABLE_LOW;
+
+			if (upper_thr)
+				trip = THERMAL_TRIP_CONFIGURABLE_HI;
+			tsens2xxx_get_temp(&tm->sensor[i], &temp);
+			/* Use id for multiple controllers */
+			pr_debug("sensor:%d trigger temp (%d degC)\n",
+				tm->sensor[i].hw_id,
+				(status & TSENS_TM_SN_LAST_TEMP_MASK));
+		}
+	}
+
+	/* Disable monitoring sensor trip threshold for triggered sensor */
+	mb();
+
+	if (tm->ops->dbg)
+		tm->ops->dbg(tm, 0, TSENS_DBG_LOG_INTERRUPT_TIMESTAMP, NULL);
+
+	return IRQ_HANDLED;
+}
+
+static int tsens2xxx_hw_init(struct tsens_device *tmdev)
+{
+	void __iomem *srot_addr;
+	void __iomem *sensor_int_mask_addr;
+	unsigned int srot_val;
+	int crit_mask;
+
+	srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_addr + 0x4);
+	srot_val = readl_relaxed(srot_addr);
+	if (!(srot_val & TSENS_EN)) {
+		pr_err("TSENS device is not enabled\n");
+		return -ENODEV;
+	}
+
+	if (tmdev->ctrl_data->cycle_monitor) {
+		sensor_int_mask_addr =
+			TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_addr);
+		crit_mask = readl_relaxed(sensor_int_mask_addr);
+		writel_relaxed(
+			crit_mask | tmdev->ctrl_data->cycle_compltn_monitor_val,
+			(TSENS_TM_CRITICAL_INT_MASK
+			(tmdev->tsens_addr)));
+		/*Update critical cycle monitoring*/
+		mb();
+	}
+	writel_relaxed(TSENS_TM_CRITICAL_INT_EN |
+		TSENS_TM_UPPER_INT_EN | TSENS_TM_LOWER_INT_EN,
+		TSENS_TM_INT_EN(tmdev->tsens_addr));
+
+	spin_lock_init(&tmdev->tsens_crit_lock);
+	spin_lock_init(&tmdev->tsens_upp_low_lock);
+
+	return 0;
+}
+
+static const struct tsens_irqs tsens2xxx_irqs[] = {
+	{ "tsens-upper-lower", tsens_tm_irq_thread},
+	{ "tsens-critical", tsens_tm_critical_irq_thread},
+};
+
+static int tsens2xxx_register_interrupts(struct tsens_device *tmdev)
+{
+	struct platform_device *pdev;
+	int i, rc;
+
+	if (!tmdev)
+		return -EINVAL;
+
+	pdev = tmdev->pdev;
+
+	for (i = 0; i < ARRAY_SIZE(tsens2xxx_irqs); i++) {
+		int irq;
+
+		irq = platform_get_irq_byname(pdev, tsens2xxx_irqs[i].name);
+		if (irq < 0) {
+			dev_err(&pdev->dev, "failed to get irq %s\n",
+					tsens2xxx_irqs[i].name);
+			return irq;
+		}
+
+		rc = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+				tsens2xxx_irqs[i].handler,
+				IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+				tsens2xxx_irqs[i].name, tmdev);
+		if (rc) {
+			dev_err(&pdev->dev, "failed to get irq %s\n",
+					tsens2xxx_irqs[i].name);
+			return rc;
+		}
+		enable_irq_wake(irq);
+	}
+
+	return 0;
+}
+
+static const struct tsens_ops ops_tsens2xxx = {
+	.hw_init	= tsens2xxx_hw_init,
+	.get_temp	= tsens2xxx_get_temp,
+	.set_trip_temp	= tsens2xxx_set_trip_temp,
+	.interrupts_reg	= tsens2xxx_register_interrupts,
+	.dbg		= tsens2xxx_dbg,
+};
+
+const struct tsens_data data_tsens2xxx = {
+	.cycle_monitor			= false,
+	.cycle_compltn_monitor_val	= 0,
+	.wd_bark			= false,
+	.wd_bark_val			= 0,
+	.ops				= &ops_tsens2xxx,
+};
+
+const struct tsens_data data_tsens23xx = {
+	.cycle_monitor			= true,
+	.cycle_compltn_monitor_val	= 0,
+	.wd_bark			= true,
+	.wd_bark_val			= 0,
+	.ops				= &ops_tsens2xxx,
+};
+
+const struct tsens_data data_tsens24xx = {
+	.cycle_monitor			= true,
+	.cycle_compltn_monitor_val	= 0,
+	.wd_bark			= true,
+	.wd_bark_val			= 1,
+	.ops				= &ops_tsens2xxx,
+};
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index a115f58..4d4fdf4 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -848,7 +848,7 @@
 	unsigned int stop_bit_len;
 	unsigned int rxstale;
 	unsigned int clk_div;
-	unsigned long ser_clk_cfg;
+	unsigned long ser_clk_cfg = 0;
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 	unsigned long clk_rate;
 
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index a6c1fae..a391b50 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1380,7 +1380,7 @@
 
 	dev_dbg(&intf->dev, "%s called\n", __func__);
 
-	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
@@ -1443,6 +1443,13 @@
 			break;
 		}
 	}
+
+	if (!data->bulk_out || !data->bulk_in) {
+		dev_err(&intf->dev, "bulk endpoints not found\n");
+		retcode = -ENODEV;
+		goto err_put;
+	}
+
 	/* Find int endpoint */
 	for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) {
 		endpoint = &iface_desc->endpoint[n].desc;
@@ -1468,8 +1475,10 @@
 	if (data->iin_ep_present) {
 		/* allocate int urb */
 		data->iin_urb = usb_alloc_urb(0, GFP_KERNEL);
-		if (!data->iin_urb)
+		if (!data->iin_urb) {
+			retcode = -ENOMEM;
 			goto error_register;
+		}
 
 		/* will reference data in int urb */
 		kref_get(&data->kref);
@@ -1477,8 +1486,10 @@
 		/* allocate buffer for interrupt in */
 		data->iin_buffer = kmalloc(data->iin_wMaxPacketSize,
 					GFP_KERNEL);
-		if (!data->iin_buffer)
+		if (!data->iin_buffer) {
+			retcode = -ENOMEM;
 			goto error_register;
+		}
 
 		/* fill interrupt urb */
 		usb_fill_int_urb(data->iin_urb, data->usb_dev,
@@ -1511,6 +1522,7 @@
 	sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
 	sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
 	usbtmc_free_int(data);
+err_put:
 	kref_put(&data->kref, usbtmc_delete);
 	return retcode;
 }
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 1f7036c..eef716b 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -275,6 +275,16 @@
 
 			/*
 			 * Adjust bInterval for quirked devices.
+			 */
+			/*
+			 * This quirk fixes bIntervals reported in ms.
+			 */
+			if (to_usb_device(ddev)->quirks &
+				USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) {
+				n = clamp(fls(d->bInterval) + 3, i, j);
+				i = j = n;
+			}
+			/*
 			 * This quirk fixes bIntervals reported in
 			 * linear microframes.
 			 */
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 2c4bd54..53d730e 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2540,6 +2540,7 @@
 	}
 	spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
 	/* Make sure that the other roothub is also deallocated. */
+	usb_atomic_notify_dead_bus(&hcd->self);
 }
 EXPORT_SYMBOL_GPL (usb_hc_died);
 
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index ffa53d8..c3d249f 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4279,7 +4279,7 @@
 	struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
 	int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN;
 
-	if (!udev->usb2_hw_lpm_capable)
+	if (!udev->usb2_hw_lpm_capable || !udev->bos)
 		return;
 
 	if (hub)
diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c
index 7728c91..af91b1e 100644
--- a/drivers/usb/core/notify.c
+++ b/drivers/usb/core/notify.c
@@ -17,6 +17,7 @@
 #include "usb.h"
 
 static BLOCKING_NOTIFIER_HEAD(usb_notifier_list);
+static ATOMIC_NOTIFIER_HEAD(usb_atomic_notifier_list);
 
 /**
  * usb_register_notify - register a notifier callback whenever a usb change happens
@@ -67,3 +68,33 @@
 {
 	blocking_notifier_call_chain(&usb_notifier_list, USB_BUS_REMOVE, ubus);
 }
+
+/**
+ * usb_register_atomic_notify - register a atomic notifier callback whenever a
+ * HC dies
+ * @nb: pointer to the atomic notifier block for the callback events.
+ *
+ */
+void usb_register_atomic_notify(struct notifier_block *nb)
+{
+	atomic_notifier_chain_register(&usb_atomic_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(usb_register_atomic_notify);
+
+/**
+ * usb_unregister_atomic_notify - unregister a atomic notifier callback
+ * @nb: pointer to the notifier block for the callback events.
+ *
+ */
+void usb_unregister_atomic_notify(struct notifier_block *nb)
+{
+	atomic_notifier_chain_unregister(&usb_atomic_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(usb_unregister_atomic_notify);
+
+
+void usb_atomic_notify_dead_bus(struct usb_bus *ubus)
+{
+	atomic_notifier_call_chain(&usb_atomic_notifier_list, USB_BUS_DIED,
+					 ubus);
+}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 24f9f98..96b21b0 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -170,6 +170,14 @@
 	/* M-Systems Flash Disk Pioneers */
 	{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
 
+	/* Baum Vario Ultra */
+	{ USB_DEVICE(0x0904, 0x6101), .driver_info =
+			USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+	{ USB_DEVICE(0x0904, 0x6102), .driver_info =
+			USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+	{ USB_DEVICE(0x0904, 0x6103), .driver_info =
+			USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+
 	/* Keytouch QWERTY Panel keyboard */
 	{ USB_DEVICE(0x0926, 0x3333), .driver_info =
 			USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 5331812..fbff25f 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -175,6 +175,7 @@
 extern void usb_notify_remove_device(struct usb_device *udev);
 extern void usb_notify_add_bus(struct usb_bus *ubus);
 extern void usb_notify_remove_bus(struct usb_bus *ubus);
+extern void usb_atomic_notify_dead_bus(struct usb_bus *ubus);
 extern void usb_hub_adjust_deviceremovable(struct usb_device *hdev,
 		struct usb_hub_descriptor *desc);
 
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 8c52a13..94f65e4 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -323,7 +323,7 @@
 static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
 		struct dwc3_event_buffer *evt)
 {
-	dma_free_coherent(dwc->dev, evt->length, evt->buf, evt->dma);
+	dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma);
 }
 
 /**
@@ -345,7 +345,7 @@
 
 	evt->dwc	= dwc;
 	evt->length	= length;
-	evt->buf	= dma_alloc_coherent(dwc->dev, length,
+	evt->buf	= dma_alloc_coherent(dwc->sysdev, length,
 			&evt->dma, GFP_KERNEL);
 	if (!evt->buf)
 		return ERR_PTR(-ENOMEM);
@@ -474,11 +474,11 @@
 	if (!WARN_ON(dwc->scratchbuf))
 		return 0;
 
-	scratch_addr = dma_map_single(dwc->dev, dwc->scratchbuf,
+	scratch_addr = dma_map_single(dwc->sysdev, dwc->scratchbuf,
 			dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE,
 			DMA_BIDIRECTIONAL);
-	if (dma_mapping_error(dwc->dev, scratch_addr)) {
-		dev_err(dwc->dev, "failed to map scratch buffer\n");
+	if (dma_mapping_error(dwc->sysdev, scratch_addr)) {
+		dev_err(dwc->sysdev, "failed to map scratch buffer\n");
 		ret = -EFAULT;
 		goto err0;
 	}
@@ -502,7 +502,7 @@
 	return 0;
 
 err1:
-	dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch *
+	dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
 			DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
 
 err0:
@@ -521,7 +521,7 @@
 	if (!WARN_ON(dwc->scratchbuf))
 		return;
 
-	dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch *
+	dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
 			DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
 	kfree(dwc->scratchbuf);
 }
@@ -1214,6 +1214,13 @@
 
 	dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node);
 
+	dwc->sysdev_is_parent = device_property_read_bool(dev,
+				"linux,sysdev_is_parent");
+	if (dwc->sysdev_is_parent)
+		dwc->sysdev = dwc->dev->parent;
+	else
+		dwc->sysdev = dwc->dev;
+
 	dwc->has_lpm_erratum = device_property_read_bool(dev,
 				"snps,has-lpm-erratum");
 	device_property_read_u8(dev, "snps,lpm-nyet-threshold",
@@ -1288,12 +1295,6 @@
 
 	spin_lock_init(&dwc->lock);
 
-	if (!dev->dma_mask) {
-		dev->dma_mask = dev->parent->dma_mask;
-		dev->dma_parms = dev->parent->dma_parms;
-		dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
-	}
-
 	pm_runtime_no_callbacks(dev);
 	pm_runtime_set_active(dev);
 	pm_runtime_enable(dev);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 7ded2b2..7968901 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -887,6 +887,7 @@
  * @ep0_bounced: true when we used bounce buffer
  * @ep0_expect_in: true when we expect a DATA IN transfer
  * @has_hibernation: true when dwc3 was configured with Hibernation
+ * @sysdev_is_parent: true when dwc3 device has a parent driver
  * @has_lpm_erratum: true when core was configured with LPM Erratum. Note that
  *			there's now way for software to detect this in runtime.
  * @is_utmi_l1_suspend: the core asserts output signal
@@ -956,6 +957,7 @@
 	spinlock_t		lock;
 
 	struct device		*dev;
+	struct device		*sysdev;
 
 	struct platform_device	*xhci;
 	struct resource		xhci_resources[DWC3_XHCI_RESOURCES_NUM];
@@ -1062,6 +1064,7 @@
 	unsigned		ep0_bounced:1;
 	unsigned		ep0_expect_in:1;
 	unsigned		has_hibernation:1;
+	unsigned		sysdev_is_parent:1;
 	unsigned		has_lpm_erratum:1;
 	unsigned		is_utmi_l1_suspend:1;
 	unsigned		is_fpga:1;
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index f456f12..8d2b429 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -21,6 +21,8 @@
 #include <linux/pm_runtime.h>
 #include <linux/ratelimit.h>
 #include <linux/interrupt.h>
+#include <asm/dma-iommu.h>
+#include <linux/iommu.h>
 #include <linux/ioport.h>
 #include <linux/clk.h>
 #include <linux/io.h>
@@ -155,6 +157,7 @@
 	void __iomem *base;
 	void __iomem *ahb2phy_base;
 	struct platform_device	*dwc3;
+	struct dma_iommu_mapping *iommu_map;
 	const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
 	struct list_head req_complete_list;
 	struct clk		*xo_clk;
@@ -1004,7 +1007,7 @@
 	int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
 					: (req->num_bufs + 1);
 
-	dep->trb_dma_pool = dma_pool_create(ep->name, dwc->dev,
+	dep->trb_dma_pool = dma_pool_create(ep->name, dwc->sysdev,
 					num_trbs * sizeof(struct dwc3_trb),
 					num_trbs * sizeof(struct dwc3_trb), 0);
 	if (!dep->trb_dma_pool) {
@@ -2091,6 +2094,9 @@
 		dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
 		dwc3_msm_config_gdsc(mdwc, 0);
 		clk_disable_unprepare(mdwc->sleep_clk);
+
+		if (mdwc->iommu_map)
+			arm_iommu_detach_device(mdwc->dev);
 	}
 
 	/* Remove bus voting */
@@ -2236,6 +2242,16 @@
 					PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
 
 		mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
+
+		if (mdwc->iommu_map) {
+			ret = arm_iommu_attach_device(mdwc->dev,
+					mdwc->iommu_map);
+			if (ret)
+				dev_err(mdwc->dev, "IOMMU attach failed (%d)\n",
+						ret);
+			else
+				dev_dbg(mdwc->dev, "attached to IOMMU\n");
+		}
 	}
 
 	atomic_set(&dwc->in_lpm, 0);
@@ -2766,6 +2782,41 @@
 	return ret;
 }
 
+#define SMMU_BASE	0x10000000 /* Device address range base */
+#define SMMU_SIZE	0x40000000 /* Device address range size */
+
+static int dwc3_msm_init_iommu(struct dwc3_msm *mdwc)
+{
+	struct device_node *node = mdwc->dev->of_node;
+	int atomic_ctx = 1;
+	int ret;
+
+	if (!of_property_read_bool(node, "iommus"))
+		return 0;
+
+	mdwc->iommu_map = arm_iommu_create_mapping(&platform_bus_type,
+			SMMU_BASE, SMMU_SIZE);
+	if (IS_ERR_OR_NULL(mdwc->iommu_map)) {
+		ret = PTR_ERR(mdwc->iommu_map) ?: -ENODEV;
+		dev_err(mdwc->dev, "Failed to create IOMMU mapping (%d)\n",
+				ret);
+		return ret;
+	}
+	dev_dbg(mdwc->dev, "IOMMU mapping created: %pK\n", mdwc->iommu_map);
+
+	ret = iommu_domain_set_attr(mdwc->iommu_map->domain, DOMAIN_ATTR_ATOMIC,
+			&atomic_ctx);
+	if (ret) {
+		dev_err(mdwc->dev, "IOMMU set atomic attribute failed (%d)\n",
+			ret);
+		arm_iommu_release_mapping(mdwc->iommu_map);
+		mdwc->iommu_map = NULL;
+		return ret;
+	}
+
+	return 0;
+}
+
 static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
 		char *buf)
 {
@@ -3070,12 +3121,16 @@
 
 	dwc3_set_notifier(&dwc3_msm_notify_event);
 
+	ret = dwc3_msm_init_iommu(mdwc);
+	if (ret)
+		goto err;
+
 	/* Assumes dwc3 is the first DT child of dwc3-msm */
 	dwc3_node = of_get_next_available_child(node, NULL);
 	if (!dwc3_node) {
 		dev_err(&pdev->dev, "failed to find dwc3 child\n");
 		ret = -ENODEV;
-		goto err;
+		goto uninit_iommu;
 	}
 
 	ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
@@ -3083,7 +3138,7 @@
 		dev_err(&pdev->dev,
 				"failed to add create dwc3 core\n");
 		of_node_put(dwc3_node);
-		goto err;
+		goto uninit_iommu;
 	}
 
 	mdwc->dwc3 = of_find_device_by_node(dwc3_node);
@@ -3194,6 +3249,9 @@
 	platform_device_put(mdwc->dwc3);
 	if (mdwc->bus_perf_client)
 		msm_bus_scale_unregister_client(mdwc->bus_perf_client);
+uninit_iommu:
+	if (mdwc->iommu_map)
+		arm_iommu_release_mapping(mdwc->iommu_map);
 err:
 	return ret;
 }
@@ -3271,6 +3329,12 @@
 
 	dwc3_msm_config_gdsc(mdwc, 0);
 
+	if (mdwc->iommu_map) {
+		if (!atomic_read(&dwc->in_lpm))
+			arm_iommu_detach_device(mdwc->dev);
+		arm_iommu_release_mapping(mdwc->iommu_map);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index c2b2938..bb32978 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -1011,8 +1011,8 @@
 		u32	transfer_size = 0;
 		u32	maxpacket;
 
-		ret = usb_gadget_map_request(&dwc->gadget, &req->request,
-				dep->number);
+		ret = usb_gadget_map_request_by_dev(dwc->sysdev,
+				&req->request, dep->number);
 		if (ret) {
 			dwc3_trace(trace_dwc3_ep0, "failed to map request");
 			return;
@@ -1040,8 +1040,8 @@
 				DWC3_TRBCTL_CONTROL_DATA, false);
 		ret = dwc3_ep0_start_trans(dwc, dep->number);
 	} else {
-		ret = usb_gadget_map_request(&dwc->gadget, &req->request,
-				dep->number);
+		ret = usb_gadget_map_request_by_dev(dwc->sysdev,
+				&req->request, dep->number);
 		if (ret) {
 			dwc3_trace(trace_dwc3_ep0, "failed to map request");
 			return;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index a6014fa..264d9af 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -295,8 +295,8 @@
 	if (dwc->ep0_bounced && dep->number <= 1)
 		dwc->ep0_bounced = false;
 
-	usb_gadget_unmap_request(&dwc->gadget, &req->request,
-			req->direction);
+	usb_gadget_unmap_request_by_dev(dwc->sysdev,
+			&req->request, req->direction);
 
 	trace_dwc3_gadget_giveback(req);
 
@@ -476,7 +476,7 @@
 	if (dep->trb_pool)
 		return 0;
 
-	dep->trb_pool = dma_zalloc_coherent(dwc->dev,
+	dep->trb_pool = dma_zalloc_coherent(dwc->sysdev,
 			sizeof(struct dwc3_trb) * num_trbs,
 			&dep->trb_pool_dma, GFP_KERNEL);
 	if (!dep->trb_pool) {
@@ -508,7 +508,7 @@
 		dbg_event(dep->number, "Clr_TRB", 0);
 		dev_dbg(dwc->dev, "Clr_TRB ring of %s\n", dep->name);
 
-		dma_free_coherent(dwc->dev,
+		dma_free_coherent(dwc->sysdev,
 				sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
 				dep->trb_pool, dep->trb_pool_dma);
 		dep->trb_pool = NULL;
@@ -1150,8 +1150,8 @@
 		 * here and stop, unmap, free and del each of the linked
 		 * requests instead of what we do now.
 		 */
-		usb_gadget_unmap_request(&dwc->gadget, &req->request,
-				req->direction);
+		usb_gadget_unmap_request_by_dev(dwc->sysdev,
+				&req->request, req->direction);
 		list_del(&req->list);
 		return ret;
 	}
@@ -1233,8 +1233,8 @@
 
 	trace_dwc3_ep_queue(req);
 
-	ret = usb_gadget_map_request(&dwc->gadget, &req->request,
-			dep->direction);
+	ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request,
+					    dep->direction);
 	if (ret)
 		return ret;
 
@@ -3615,7 +3615,7 @@
 
 	INIT_WORK(&dwc->wakeup_work, dwc3_gadget_wakeup_work);
 
-	dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+	dwc->ctrl_req = dma_alloc_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
 			&dwc->ctrl_req_addr, GFP_KERNEL);
 	if (!dwc->ctrl_req) {
 		dev_err(dwc->dev, "failed to allocate ctrl request\n");
@@ -3623,8 +3623,9 @@
 		goto err0;
 	}
 
-	dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
-			&dwc->ep0_trb_addr, GFP_KERNEL);
+	dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev,
+					  sizeof(*dwc->ep0_trb) * 2,
+					  &dwc->ep0_trb_addr, GFP_KERNEL);
 	if (!dwc->ep0_trb) {
 		dev_err(dwc->dev, "failed to allocate ep0 trb\n");
 		ret = -ENOMEM;
@@ -3637,7 +3638,7 @@
 		goto err2;
 	}
 
-	dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
+	dwc->ep0_bounce = dma_alloc_coherent(dwc->sysdev,
 			DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
 			GFP_KERNEL);
 	if (!dwc->ep0_bounce) {
@@ -3716,18 +3717,18 @@
 
 err4:
 	dwc3_gadget_free_endpoints(dwc);
-	dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
+	dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE,
 			dwc->ep0_bounce, dwc->ep0_bounce_addr);
 
 err3:
 	kfree(dwc->setup_buf);
 
 err2:
-	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
+	dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
 			dwc->ep0_trb, dwc->ep0_trb_addr);
 
 err1:
-	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+	dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
 			dwc->ctrl_req, dwc->ctrl_req_addr);
 
 err0:
@@ -3747,16 +3748,16 @@
 
 	dwc3_gadget_free_endpoints(dwc);
 
-	dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
+	dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE,
 			dwc->ep0_bounce, dwc->ep0_bounce_addr);
 
 	kfree(dwc->setup_buf);
 	kfree(dwc->zlp_buf);
 
-	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
+	dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
 			dwc->ep0_trb, dwc->ep0_trb_addr);
 
-	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+	dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
 			dwc->ctrl_req, dwc->ctrl_req_addr);
 }
 
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 800bcae..e52bf45 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -72,12 +72,7 @@
 		return -ENOMEM;
 	}
 
-	arch_setup_dma_ops(&xhci->dev, 0, 0, NULL, 0);
-	dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask);
-
 	xhci->dev.parent	= dwc->dev;
-	xhci->dev.dma_mask	= dwc->dev->dma_mask;
-	xhci->dev.dma_parms	= dwc->dev->dma_parms;
 
 	dwc->xhci = xhci;
 
@@ -100,9 +95,9 @@
 	}
 
 	phy_create_lookup(dwc->usb2_generic_phy, "usb2-phy",
-			  dev_name(&xhci->dev));
+			  dev_name(dwc->dev));
 	phy_create_lookup(dwc->usb3_generic_phy, "usb3-phy",
-			  dev_name(&xhci->dev));
+			  dev_name(dwc->dev));
 
 	/* Platform device gets added as part of state machine */
 
@@ -115,9 +110,9 @@
 void dwc3_host_exit(struct dwc3 *dwc)
 {
 	phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy",
-			  dev_name(&dwc->xhci->dev));
+			  dev_name(dwc->dev));
 	phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
-			  dev_name(&dwc->xhci->dev));
+			  dev_name(dwc->dev));
 	if (!dwc->is_drd)
 		platform_device_unregister(dwc->xhci);
 }
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index a30766c..5e3828d 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -535,13 +535,15 @@
 {
 	struct usb_composite_dev *cdev = acm->port.func.config->cdev;
 	int			status;
+	__le16			serial_state;
 
 	spin_lock(&acm->lock);
 	if (acm->notify_req) {
 		dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n",
 			acm->port_num, acm->serial_state);
+		serial_state = cpu_to_le16(acm->serial_state);
 		status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
-				0, &acm->serial_state, sizeof(acm->serial_state));
+				0, &serial_state, sizeof(acm->serial_state));
 	} else {
 		acm->pending = true;
 		status = 0;
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index a832d27..fbe6910 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1284,7 +1284,7 @@
 	card = midi->card;
 	midi->card = NULL;
 	if (card)
-		snd_card_free(card);
+		snd_card_free_when_closed(card);
 
 	usb_free_all_descriptors(f);
 }
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index 4d8694a..c6aa884 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -1425,6 +1425,7 @@
 	struct usb_request *req;
 	int i;
 
+	mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
 	while ((req = mtp_req_get(dev, &dev->tx_idle)))
 		mtp_request_free(req, dev->ep_in);
 	for (i = 0; i < RX_REQ_MAX; i++)
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 29b41b5..c7689d0 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -625,7 +625,7 @@
 	uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst;
 	uvc_ss_streaming_comp.wBytesPerInterval =
 		cpu_to_le16(max_packet_size * max_packet_mult *
-			    opts->streaming_maxburst);
+			    (opts->streaming_maxburst + 1));
 
 	/* Allocate endpoints. */
 	ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index f33f972..f1fe81c 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -136,6 +136,59 @@
 MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
 #endif
 
+static ssize_t config_imod_store(struct device *pdev,
+		struct device_attribute *attr, const char *buff, size_t size)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(pdev);
+	struct xhci_hcd *xhci;
+	u32 temp;
+	u32 imod;
+	unsigned long flags;
+
+	if (kstrtouint(buff, 10, &imod) != 1)
+		return 0;
+
+	imod &= ER_IRQ_INTERVAL_MASK;
+	xhci = hcd_to_xhci(hcd);
+
+	if (xhci->shared_hcd->state == HC_STATE_SUSPENDED
+		&& hcd->state == HC_STATE_SUSPENDED)
+		return -EACCES;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	temp = readl_relaxed(&xhci->ir_set->irq_control);
+	temp &= ~ER_IRQ_INTERVAL_MASK;
+	temp |= imod;
+	writel_relaxed(temp, &xhci->ir_set->irq_control);
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	return size;
+}
+
+static ssize_t config_imod_show(struct device *pdev,
+		struct device_attribute *attr, char *buff)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(pdev);
+	struct xhci_hcd *xhci;
+	u32 temp;
+	unsigned long flags;
+
+	xhci = hcd_to_xhci(hcd);
+
+	if (xhci->shared_hcd->state == HC_STATE_SUSPENDED
+		&& hcd->state == HC_STATE_SUSPENDED)
+		return -EACCES;
+
+	spin_lock_irqsave(&xhci->lock, flags);
+	temp = readl_relaxed(&xhci->ir_set->irq_control) &
+			ER_IRQ_INTERVAL_MASK;
+	spin_unlock_irqrestore(&xhci->lock, flags);
+
+	return snprintf(buff, PAGE_SIZE, "%08u\n", temp);
+}
+
+static DEVICE_ATTR(config_imod, 0644, config_imod_show, config_imod_store);
+
 static int xhci_plat_probe(struct platform_device *pdev)
 {
 	const struct of_device_id *match;
@@ -278,6 +331,11 @@
 	if (ret)
 		goto dealloc_usb2_hcd;
 
+	ret = device_create_file(&pdev->dev, &dev_attr_config_imod);
+	if (ret)
+		dev_err(&pdev->dev, "%s: unable to create imod sysfs entry\n",
+					__func__);
+
 	pm_runtime_mark_last_busy(&pdev->dev);
 	pm_runtime_put_autosuspend(&pdev->dev);
 
@@ -312,6 +370,7 @@
 	pm_runtime_disable(&dev->dev);
 	xhci->xhc_state |= XHCI_STATE_REMOVING;
 
+	device_remove_file(&dev->dev, &dev_attr_config_imod);
 	usb_remove_hcd(xhci->shared_hcd);
 	usb_phy_shutdown(hcd->usb_phy);
 
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 2975e80..9a67ae3 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -346,6 +346,9 @@
 	if (iface_desc->desc.bInterfaceClass != 0x0A)
 		return -ENODEV;
 
+	if (iface_desc->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	/* allocate memory for our device state and initialize it */
 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (dev == NULL)
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index 7717651..d3d1247 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -366,6 +366,10 @@
 
 	hdev = interface_to_usbdev(intf);
 	desc = intf->cur_altsetting;
+
+	if (desc->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	endpoint = &desc->endpoint[0].desc;
 
 	/* valid only for SS root hub */
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 356d312..9ff6652 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -708,6 +708,11 @@
 
 	interface = intf->cur_altsetting;
 
+	if (interface->desc.bNumEndpoints < 3) {
+		usb_put_dev(usbdev);
+		return -ENODEV;
+	}
+
 	/*
 	 * Allocate parport interface 
 	 */
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 338575f..358feca 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2467,8 +2467,8 @@
 	pm_runtime_get_sync(musb->controller);
 	musb_host_cleanup(musb);
 	musb_gadget_cleanup(musb);
-	spin_lock_irqsave(&musb->lock, flags);
 	musb_platform_disable(musb);
+	spin_lock_irqsave(&musb->lock, flags);
 	musb_generic_disable(musb);
 	spin_unlock_irqrestore(&musb->lock, flags);
 	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index d4d7c56..cb443df 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -232,8 +232,27 @@
 			transferred < cppi41_channel->packet_sz)
 		cppi41_channel->prog_len = 0;
 
-	if (cppi41_channel->is_tx)
-		empty = musb_is_tx_fifo_empty(hw_ep);
+	if (cppi41_channel->is_tx) {
+		u8 type;
+
+		if (is_host_active(musb))
+			type = hw_ep->out_qh->type;
+		else
+			type = hw_ep->ep_in.type;
+
+		if (type == USB_ENDPOINT_XFER_ISOC)
+			/*
+			 * Don't use the early-TX-interrupt workaround below
+			 * for Isoch transfter. Since Isoch are periodic
+			 * transfer, by the time the next transfer is
+			 * scheduled, the current one should be done already.
+			 *
+			 * This avoids audio playback underrun issue.
+			 */
+			empty = true;
+		else
+			empty = musb_is_tx_fifo_empty(hw_ep);
+	}
 
 	if (!cppi41_channel->is_tx || empty) {
 		cppi41_trans_done(cppi41_channel);
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index bf155ae9..c59e33f 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -33,12 +33,8 @@
 #define QUSB2PHY_PLL_COMMON_STATUS_ONE	0x1A0
 #define CORE_READY_STATUS		BIT(0)
 
-/* In case Efuse register shows zero, use this value */
-#define TUNE2_DEFAULT_HIGH_NIBBLE	0xB
-#define TUNE2_DEFAULT_LOW_NIBBLE	0x3
-
-/* Get TUNE2's high nibble value read from efuse */
-#define TUNE2_HIGH_NIBBLE_VAL(val, pos, mask)	((val >> pos) & mask)
+/* Get TUNE value from efuse bit-mask */
+#define TUNE_VAL_MASK(val, pos, mask)	((val >> pos) & mask)
 
 #define QUSB2PHY_INTR_CTRL		0x22C
 #define DMSE_INTR_HIGH_SEL              BIT(4)
@@ -51,7 +47,8 @@
 #define DMSE_INTERRUPT			BIT(1)
 #define DPSE_INTERRUPT			BIT(0)
 
-#define QUSB2PHY_PORT_TUNE2		0x240
+#define QUSB2PHY_PORT_TUNE1		0x23c
+#define QUSB2PHY_TEST1			0x24C
 
 #define QUSB2PHY_1P8_VOL_MIN           1800000 /* uV */
 #define QUSB2PHY_1P8_VOL_MAX           1800000 /* uV */
@@ -64,14 +61,14 @@
 #define LINESTATE_DP			BIT(0)
 #define LINESTATE_DM			BIT(1)
 
-unsigned int phy_tune2;
-module_param(phy_tune2, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(phy_tune2, "QUSB PHY v2 TUNE2");
+unsigned int phy_tune1;
+module_param(phy_tune1, uint, 0644);
+MODULE_PARM_DESC(phy_tune1, "QUSB PHY v2 TUNE1");
 
 struct qusb_phy {
 	struct usb_phy		phy;
 	void __iomem		*base;
-	void __iomem		*tune2_efuse_reg;
+	void __iomem		*efuse_reg;
 
 	struct clk		*ref_clk_src;
 	struct clk		*ref_clk;
@@ -87,9 +84,9 @@
 	int			host_init_seq_len;
 	int			*qusb_phy_host_init_seq;
 
-	u32			tune2_val;
-	int			tune2_efuse_bit_pos;
-	int			tune2_efuse_num_of_bits;
+	u32			tune_val;
+	int			efuse_bit_pos;
+	int			efuse_num_of_bits;
 
 	bool			power_enabled;
 	bool			clocks_enabled;
@@ -288,40 +285,35 @@
 	return ret;
 }
 
-static void qusb_phy_get_tune2_param(struct qusb_phy *qphy)
+static void qusb_phy_get_tune1_param(struct qusb_phy *qphy)
 {
-	u8 num_of_bits;
+	u8 reg;
 	u32 bit_mask = 1;
 
 	pr_debug("%s(): num_of_bits:%d bit_pos:%d\n", __func__,
-				qphy->tune2_efuse_num_of_bits,
-				qphy->tune2_efuse_bit_pos);
+				qphy->efuse_num_of_bits,
+				qphy->efuse_bit_pos);
 
 	/* get bit mask based on number of bits to use with efuse reg */
-	if (qphy->tune2_efuse_num_of_bits) {
-		num_of_bits = qphy->tune2_efuse_num_of_bits;
-		bit_mask = (bit_mask << num_of_bits) - 1;
-	}
+	bit_mask = (bit_mask << qphy->efuse_num_of_bits) - 1;
 
 	/*
-	 * Read EFUSE register having TUNE2 parameter's high nibble.
-	 * If efuse register shows value as 0x0, then use default value
-	 * as 0xB as high nibble. Otherwise use efuse register based
-	 * value for this purpose.
+	 * if efuse reg is updated (i.e non-zero) then use it to program
+	 * tune parameters
 	 */
-	qphy->tune2_val = readl_relaxed(qphy->tune2_efuse_reg);
-	pr_debug("%s(): bit_mask:%d efuse based tune2 value:%d\n",
-				__func__, bit_mask, qphy->tune2_val);
+	qphy->tune_val = readl_relaxed(qphy->efuse_reg);
+	pr_debug("%s(): bit_mask:%d efuse based tune1 value:%d\n",
+				__func__, bit_mask, qphy->tune_val);
 
-	qphy->tune2_val = TUNE2_HIGH_NIBBLE_VAL(qphy->tune2_val,
-				qphy->tune2_efuse_bit_pos, bit_mask);
+	qphy->tune_val = TUNE_VAL_MASK(qphy->tune_val,
+				qphy->efuse_bit_pos, bit_mask);
+	reg = readb_relaxed(qphy->base + QUSB2PHY_PORT_TUNE1);
+	if (qphy->tune_val) {
+		reg = reg & 0x0f;
+		reg |= (qphy->tune_val << 4);
+	}
 
-	if (!qphy->tune2_val)
-		qphy->tune2_val = TUNE2_DEFAULT_HIGH_NIBBLE;
-
-	/* Get TUNE2 byte value using high and low nibble value */
-	qphy->tune2_val = ((qphy->tune2_val << 0x4) |
-					TUNE2_DEFAULT_LOW_NIBBLE);
+	qphy->tune_val = reg;
 }
 
 static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt,
@@ -427,27 +419,22 @@
 	if (qphy->qusb_phy_init_seq)
 		qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
 				qphy->init_seq_len, 0);
-	/*
-	 * Check for EFUSE value only if tune2_efuse_reg is available
-	 * and try to read EFUSE value only once i.e. not every USB
-	 * cable connect case.
-	 */
-	if (qphy->tune2_efuse_reg) {
-		if (!qphy->tune2_val)
-			qusb_phy_get_tune2_param(qphy);
+	if (qphy->efuse_reg) {
+		if (!qphy->tune_val)
+			qusb_phy_get_tune1_param(qphy);
 
-		pr_debug("%s(): Programming TUNE2 parameter as:%x\n", __func__,
-				qphy->tune2_val);
-		writel_relaxed(qphy->tune2_val,
-				qphy->base + QUSB2PHY_PORT_TUNE2);
+		pr_debug("%s(): Programming TUNE1 parameter as:%x\n", __func__,
+				qphy->tune_val);
+		writel_relaxed(qphy->tune_val,
+				qphy->base + QUSB2PHY_PORT_TUNE1);
 	}
 
-	/* If phy_tune2 modparam set, override tune2 value */
-	if (phy_tune2) {
-		pr_debug("%s(): (modparam) TUNE2 val:0x%02x\n",
-						__func__, phy_tune2);
-		writel_relaxed(phy_tune2,
-				qphy->base + QUSB2PHY_PORT_TUNE2);
+	/* If phy_tune1 modparam set, override tune1 value */
+	if (phy_tune1) {
+		pr_debug("%s(): (modparam) TUNE1 val:0x%02x\n",
+						__func__, phy_tune1);
+		writel_relaxed(phy_tune1,
+				qphy->base + QUSB2PHY_PORT_TUNE1);
 	}
 
 	/* ensure above writes are completed before re-enabling PHY */
@@ -550,6 +537,14 @@
 			writel_relaxed(intr_mask,
 				qphy->base + QUSB2PHY_INTR_CTRL);
 
+			/* enable phy auto-resume */
+			writel_relaxed(0x91,
+					qphy->base + QUSB2PHY_TEST1);
+			/* flush the previous write before next write */
+			wmb();
+			writel_relaxed(0x90,
+				qphy->base + QUSB2PHY_TEST1);
+
 			dev_dbg(phy->dev, "%s: intr_mask = %x\n",
 			__func__, intr_mask);
 
@@ -730,23 +725,23 @@
 	}
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-							"tune2_efuse_addr");
+							"efuse_addr");
 	if (res) {
-		qphy->tune2_efuse_reg = devm_ioremap_nocache(dev, res->start,
+		qphy->efuse_reg = devm_ioremap_nocache(dev, res->start,
 							resource_size(res));
-		if (!IS_ERR_OR_NULL(qphy->tune2_efuse_reg)) {
+		if (!IS_ERR_OR_NULL(qphy->efuse_reg)) {
 			ret = of_property_read_u32(dev->of_node,
-					"qcom,tune2-efuse-bit-pos",
-					&qphy->tune2_efuse_bit_pos);
+					"qcom,efuse-bit-pos",
+					&qphy->efuse_bit_pos);
 			if (!ret) {
 				ret = of_property_read_u32(dev->of_node,
-						"qcom,tune2-efuse-num-bits",
-						&qphy->tune2_efuse_num_of_bits);
+						"qcom,efuse-num-bits",
+						&qphy->efuse_num_of_bits);
 			}
 
 			if (ret) {
 				dev_err(dev,
-				"DT Value for tune2 efuse is invalid.\n");
+				"DT Value for efuse is invalid.\n");
 				return -EINVAL;
 			}
 		}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 42cc72e..af67a0d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -233,6 +233,14 @@
 #define BANDRICH_PRODUCT_1012			0x1012
 
 #define QUALCOMM_VENDOR_ID			0x05C6
+/* These Quectel products use Qualcomm's vendor ID */
+#define QUECTEL_PRODUCT_UC20			0x9003
+#define QUECTEL_PRODUCT_UC15			0x9090
+
+#define QUECTEL_VENDOR_ID			0x2c7c
+/* These Quectel products use Quectel's vendor ID */
+#define QUECTEL_PRODUCT_EC21			0x0121
+#define QUECTEL_PRODUCT_EC25			0x0125
 
 #define CMOTECH_VENDOR_ID			0x16d8
 #define CMOTECH_PRODUCT_6001			0x6001
@@ -1161,7 +1169,14 @@
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
-	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
+	/* Quectel products using Qualcomm vendor ID */
+	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
+	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
+	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	/* Quectel products using Quectel vendor ID */
+	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
+	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 696458d..38b3f0d 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -169,6 +169,8 @@
 	{DEVICE_SWI(0x413c, 0x81a9)},	/* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
 	{DEVICE_SWI(0x413c, 0x81b1)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
 	{DEVICE_SWI(0x413c, 0x81b3)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
+	{DEVICE_SWI(0x413c, 0x81b5)},	/* Dell Wireless 5811e QDL */
+	{DEVICE_SWI(0x413c, 0x81b6)},	/* Dell Wireless 5811e QDL */
 
 	/* Huawei devices */
 	{DEVICE_HWI(0x03f0, 0x581d)},	/* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
index 252c7bd..d01496f 100644
--- a/drivers/usb/wusbcore/wa-hc.c
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -39,6 +39,9 @@
 	int result;
 	struct device *dev = &iface->dev;
 
+	if (iface->cur_altsetting->desc.bNumEndpoints < 3)
+		return -ENODEV;
+
 	result = wa_rpipes_create(wa);
 	if (result < 0)
 		goto error_rpipes_create;
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 0aa6c3c..35a1e77 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -823,6 +823,9 @@
 	struct hwarc *hwarc;
 	struct device *dev = &iface->dev;
 
+	if (iface->cur_altsetting->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	result = -ENOMEM;
 	uwb_rc = uwb_rc_alloc();
 	if (uwb_rc == NULL) {
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index 2bfc846..6345e85 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -362,6 +362,9 @@
 				 result);
 	}
 
+	if (iface->cur_altsetting->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	result = -ENOMEM;
 	i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
 	if (i1480_usb == NULL) {
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index b87f5cf..4db10d7 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1167,6 +1167,8 @@
 	p->userfont = 0;
 }
 
+static void set_vc_hi_font(struct vc_data *vc, bool set);
+
 static void fbcon_deinit(struct vc_data *vc)
 {
 	struct display *p = &fb_display[vc->vc_num];
@@ -1202,6 +1204,9 @@
 	if (free_font)
 		vc->vc_font.data = NULL;
 
+	if (vc->vc_hi_font_mask)
+		set_vc_hi_font(vc, false);
+
 	if (!con_is_bound(&fb_con))
 		fbcon_exit();
 
@@ -2438,32 +2443,10 @@
 	return 0;
 }
 
-static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
-			     const u8 * data, int userfont)
+/* set/clear vc_hi_font_mask and update vc attrs accordingly */
+static void set_vc_hi_font(struct vc_data *vc, bool set)
 {
-	struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
-	struct fbcon_ops *ops = info->fbcon_par;
-	struct display *p = &fb_display[vc->vc_num];
-	int resize;
-	int cnt;
-	char *old_data = NULL;
-
-	if (con_is_visible(vc) && softback_lines)
-		fbcon_set_origin(vc);
-
-	resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
-	if (p->userfont)
-		old_data = vc->vc_font.data;
-	if (userfont)
-		cnt = FNTCHARCNT(data);
-	else
-		cnt = 256;
-	vc->vc_font.data = (void *)(p->fontdata = data);
-	if ((p->userfont = userfont))
-		REFCOUNT(data)++;
-	vc->vc_font.width = w;
-	vc->vc_font.height = h;
-	if (vc->vc_hi_font_mask && cnt == 256) {
+	if (!set) {
 		vc->vc_hi_font_mask = 0;
 		if (vc->vc_can_do_color) {
 			vc->vc_complement_mask >>= 1;
@@ -2486,7 +2469,7 @@
 			    ((c & 0xfe00) >> 1) | (c & 0xff);
 			vc->vc_attr >>= 1;
 		}
-	} else if (!vc->vc_hi_font_mask && cnt == 512) {
+	} else {
 		vc->vc_hi_font_mask = 0x100;
 		if (vc->vc_can_do_color) {
 			vc->vc_complement_mask <<= 1;
@@ -2518,8 +2501,38 @@
 			} else
 				vc->vc_video_erase_char = c & ~0x100;
 		}
-
 	}
+}
+
+static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+			     const u8 * data, int userfont)
+{
+	struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+	struct fbcon_ops *ops = info->fbcon_par;
+	struct display *p = &fb_display[vc->vc_num];
+	int resize;
+	int cnt;
+	char *old_data = NULL;
+
+	if (con_is_visible(vc) && softback_lines)
+		fbcon_set_origin(vc);
+
+	resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
+	if (p->userfont)
+		old_data = vc->vc_font.data;
+	if (userfont)
+		cnt = FNTCHARCNT(data);
+	else
+		cnt = 256;
+	vc->vc_font.data = (void *)(p->fontdata = data);
+	if ((p->userfont = userfont))
+		REFCOUNT(data)++;
+	vc->vc_font.width = w;
+	vc->vc_font.height = h;
+	if (vc->vc_hi_font_mask && cnt == 256)
+		set_vc_hi_font(vc, false);
+	else if (!vc->vc_hi_font_mask && cnt == 512)
+		set_vc_hi_font(vc, true);
 
 	if (resize) {
 		int cols, rows;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 9d2738e..2c2e679 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -427,6 +427,8 @@
 		 * Prime this virtqueue with one buffer so the hypervisor can
 		 * use it to signal us later (it can't be broken yet!).
 		 */
+		update_balloon_stats(vb);
+
 		sg_init_one(&sg, vb->stats, sizeof vb->stats);
 		if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
 		    < 0)
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 4ce10bc..4b85746 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -27,10 +27,10 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/types.h>
+#include <linux/syscore_ops.h>
 #include <linux/acpi.h>
 #include <acpi/processor.h>
 #include <xen/xen.h>
-#include <xen/xen-ops.h>
 #include <xen/interface/platform.h>
 #include <asm/xen/hypercall.h>
 
@@ -466,15 +466,33 @@
 	return rc;
 }
 
-static int xen_acpi_processor_resume(struct notifier_block *nb,
-				     unsigned long action, void *data)
+static void xen_acpi_processor_resume_worker(struct work_struct *dummy)
 {
+	int rc;
+
 	bitmap_zero(acpi_ids_done, nr_acpi_bits);
-	return xen_upload_processor_pm_data();
+
+	rc = xen_upload_processor_pm_data();
+	if (rc != 0)
+		pr_info("ACPI data upload failed, error = %d\n", rc);
 }
 
-struct notifier_block xen_acpi_processor_resume_nb = {
-	.notifier_call = xen_acpi_processor_resume,
+static void xen_acpi_processor_resume(void)
+{
+	static DECLARE_WORK(wq, xen_acpi_processor_resume_worker);
+
+	/*
+	 * xen_upload_processor_pm_data() calls non-atomic code.
+	 * However, the context for xen_acpi_processor_resume is syscore
+	 * with only the boot CPU online and in an atomic context.
+	 *
+	 * So defer the upload for some point safer.
+	 */
+	schedule_work(&wq);
+}
+
+static struct syscore_ops xap_syscore_ops = {
+	.resume	= xen_acpi_processor_resume,
 };
 
 static int __init xen_acpi_processor_init(void)
@@ -527,7 +545,7 @@
 	if (rc)
 		goto err_unregister;
 
-	xen_resume_notifier_register(&xen_acpi_processor_resume_nb);
+	register_syscore_ops(&xap_syscore_ops);
 
 	return 0;
 err_unregister:
@@ -544,7 +562,7 @@
 {
 	int i;
 
-	xen_resume_notifier_unregister(&xen_acpi_processor_resume_nb);
+	unregister_syscore_ops(&xap_syscore_ops);
 	kfree(acpi_ids_done);
 	kfree(acpi_id_present);
 	kfree(acpi_id_cst_present);
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 98f87fe..61cfcce 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -352,7 +352,6 @@
 static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
 {
 	struct dentry *dir;
-	struct fscrypt_info *ci;
 	int dir_has_key, cached_with_key;
 
 	if (flags & LOOKUP_RCU)
@@ -364,18 +363,11 @@
 		return 0;
 	}
 
-	ci = d_inode(dir)->i_crypt_info;
-	if (ci && ci->ci_keyring_key &&
-	    (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
-					  (1 << KEY_FLAG_REVOKED) |
-					  (1 << KEY_FLAG_DEAD))))
-		ci = NULL;
-
 	/* this should eventually be an flag in d_flags */
 	spin_lock(&dentry->d_lock);
 	cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
 	spin_unlock(&dentry->d_lock);
-	dir_has_key = (ci != NULL);
+	dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
 	dput(dir);
 
 	/*
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 9b774f4..80bb956 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -350,7 +350,7 @@
 		fname->disk_name.len = iname->len;
 		return 0;
 	}
-	ret = get_crypt_info(dir);
+	ret = fscrypt_get_encryption_info(dir);
 	if (ret && ret != -EOPNOTSUPP)
 		return ret;
 
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 67fb6d8..bb46063 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -99,6 +99,7 @@
 	kfree(full_key_descriptor);
 	if (IS_ERR(keyring_key))
 		return PTR_ERR(keyring_key);
+	down_read(&keyring_key->sem);
 
 	if (keyring_key->type != &key_type_logon) {
 		printk_once(KERN_WARNING
@@ -106,11 +107,9 @@
 		res = -ENOKEY;
 		goto out;
 	}
-	down_read(&keyring_key->sem);
 	ukp = user_key_payload(keyring_key);
 	if (ukp->datalen != sizeof(struct fscrypt_key)) {
 		res = -EINVAL;
-		up_read(&keyring_key->sem);
 		goto out;
 	}
 	master_key = (struct fscrypt_key *)ukp->data;
@@ -121,17 +120,11 @@
 				"%s: key size incorrect: %d\n",
 				__func__, master_key->size);
 		res = -ENOKEY;
-		up_read(&keyring_key->sem);
 		goto out;
 	}
 	res = derive_key_aes(ctx->nonce, master_key->raw, raw_key);
-	up_read(&keyring_key->sem);
-	if (res)
-		goto out;
-
-	crypt_info->ci_keyring_key = keyring_key;
-	return 0;
 out:
+	up_read(&keyring_key->sem);
 	key_put(keyring_key);
 	return res;
 }
@@ -173,12 +166,11 @@
 	if (!ci)
 		return;
 
-	key_put(ci->ci_keyring_key);
 	crypto_free_skcipher(ci->ci_ctfm);
 	kmem_cache_free(fscrypt_info_cachep, ci);
 }
 
-int get_crypt_info(struct inode *inode)
+int fscrypt_get_encryption_info(struct inode *inode)
 {
 	struct fscrypt_info *crypt_info;
 	struct fscrypt_context ctx;
@@ -188,21 +180,15 @@
 	u8 *raw_key = NULL;
 	int res;
 
+	if (inode->i_crypt_info)
+		return 0;
+
 	res = fscrypt_initialize();
 	if (res)
 		return res;
 
 	if (!inode->i_sb->s_cop->get_context)
 		return -EOPNOTSUPP;
-retry:
-	crypt_info = ACCESS_ONCE(inode->i_crypt_info);
-	if (crypt_info) {
-		if (!crypt_info->ci_keyring_key ||
-				key_validate(crypt_info->ci_keyring_key) == 0)
-			return 0;
-		fscrypt_put_encryption_info(inode, crypt_info);
-		goto retry;
-	}
 
 	res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
 	if (res < 0) {
@@ -230,7 +216,6 @@
 	crypt_info->ci_data_mode = ctx.contents_encryption_mode;
 	crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
 	crypt_info->ci_ctfm = NULL;
-	crypt_info->ci_keyring_key = NULL;
 	memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
 				sizeof(crypt_info->ci_master_key));
 
@@ -285,14 +270,8 @@
 	if (res)
 		goto out;
 
-	kzfree(raw_key);
-	raw_key = NULL;
-	if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) {
-		put_crypt_info(crypt_info);
-		goto retry;
-	}
-	return 0;
-
+	if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
+		crypt_info = NULL;
 out:
 	if (res == -ENOKEY)
 		res = 0;
@@ -300,6 +279,7 @@
 	kzfree(raw_key);
 	return res;
 }
+EXPORT_SYMBOL(fscrypt_get_encryption_info);
 
 void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
 {
@@ -317,17 +297,3 @@
 	put_crypt_info(ci);
 }
 EXPORT_SYMBOL(fscrypt_put_encryption_info);
-
-int fscrypt_get_encryption_info(struct inode *inode)
-{
-	struct fscrypt_info *ci = inode->i_crypt_info;
-
-	if (!ci ||
-		(ci->ci_keyring_key &&
-		 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
-					       (1 << KEY_FLAG_REVOKED) |
-					       (1 << KEY_FLAG_DEAD)))))
-		return get_crypt_info(inode);
-	return 0;
-}
-EXPORT_SYMBOL(fscrypt_get_encryption_info);
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 004eebb..a5807fd 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1171,10 +1171,9 @@
 	set_buffer_uptodate(dir_block);
 	err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
 	if (err)
-		goto out;
+		return err;
 	set_buffer_verified(dir_block);
-out:
-	return err;
+	return ext4_mark_inode_dirty(handle, inode);
 }
 
 static int ext4_convert_inline_data_nolock(handle_t *handle,
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index afe29ba4..5fa9ba1 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3830,7 +3830,7 @@
 	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
 		   EXT4_DESC_PER_BLOCK(sb);
 	if (ext4_has_feature_meta_bg(sb)) {
-		if (le32_to_cpu(es->s_first_meta_bg) >= db_count) {
+		if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
 			ext4_msg(sb, KERN_WARNING,
 				 "first meta block group too large: %u "
 				 "(group descriptor block count %u)",
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 4448ed3..3eeed8f 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -131,31 +131,26 @@
 }
 
 static int ext4_xattr_block_csum_verify(struct inode *inode,
-					sector_t block_nr,
-					struct ext4_xattr_header *hdr)
+					struct buffer_head *bh)
 {
-	if (ext4_has_metadata_csum(inode->i_sb) &&
-	    (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
-		return 0;
-	return 1;
+	struct ext4_xattr_header *hdr = BHDR(bh);
+	int ret = 1;
+
+	if (ext4_has_metadata_csum(inode->i_sb)) {
+		lock_buffer(bh);
+		ret = (hdr->h_checksum == ext4_xattr_block_csum(inode,
+							bh->b_blocknr, hdr));
+		unlock_buffer(bh);
+	}
+	return ret;
 }
 
 static void ext4_xattr_block_csum_set(struct inode *inode,
-				      sector_t block_nr,
-				      struct ext4_xattr_header *hdr)
+				      struct buffer_head *bh)
 {
-	if (!ext4_has_metadata_csum(inode->i_sb))
-		return;
-
-	hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
-}
-
-static inline int ext4_handle_dirty_xattr_block(handle_t *handle,
-						struct inode *inode,
-						struct buffer_head *bh)
-{
-	ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh));
-	return ext4_handle_dirty_metadata(handle, inode, bh);
+	if (ext4_has_metadata_csum(inode->i_sb))
+		BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode,
+						bh->b_blocknr, BHDR(bh));
 }
 
 static inline const struct xattr_handler *
@@ -218,7 +213,7 @@
 	if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
 	    BHDR(bh)->h_blocks != cpu_to_le32(1))
 		return -EFSCORRUPTED;
-	if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
+	if (!ext4_xattr_block_csum_verify(inode, bh))
 		return -EFSBADCRC;
 	error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
 				       bh->b_data);
@@ -601,23 +596,22 @@
 			}
 		}
 
+		ext4_xattr_block_csum_set(inode, bh);
 		/*
 		 * Beware of this ugliness: Releasing of xattr block references
 		 * from different inodes can race and so we have to protect
 		 * from a race where someone else frees the block (and releases
 		 * its journal_head) before we are done dirtying the buffer. In
 		 * nojournal mode this race is harmless and we actually cannot
-		 * call ext4_handle_dirty_xattr_block() with locked buffer as
+		 * call ext4_handle_dirty_metadata() with locked buffer as
 		 * that function can call sync_dirty_buffer() so for that case
 		 * we handle the dirtying after unlocking the buffer.
 		 */
 		if (ext4_handle_valid(handle))
-			error = ext4_handle_dirty_xattr_block(handle, inode,
-							      bh);
+			error = ext4_handle_dirty_metadata(handle, inode, bh);
 		unlock_buffer(bh);
 		if (!ext4_handle_valid(handle))
-			error = ext4_handle_dirty_xattr_block(handle, inode,
-							      bh);
+			error = ext4_handle_dirty_metadata(handle, inode, bh);
 		if (IS_SYNC(inode))
 			ext4_handle_sync(handle);
 		dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
@@ -846,13 +840,14 @@
 				ext4_xattr_cache_insert(ext4_mb_cache,
 					bs->bh);
 			}
+			ext4_xattr_block_csum_set(inode, bs->bh);
 			unlock_buffer(bs->bh);
 			if (error == -EFSCORRUPTED)
 				goto bad_block;
 			if (!error)
-				error = ext4_handle_dirty_xattr_block(handle,
-								      inode,
-								      bs->bh);
+				error = ext4_handle_dirty_metadata(handle,
+								   inode,
+								   bs->bh);
 			if (error)
 				goto cleanup;
 			goto inserted;
@@ -950,10 +945,11 @@
 					ce->e_reusable = 0;
 				ea_bdebug(new_bh, "reusing; refcount now=%d",
 					  ref);
+				ext4_xattr_block_csum_set(inode, new_bh);
 				unlock_buffer(new_bh);
-				error = ext4_handle_dirty_xattr_block(handle,
-								      inode,
-								      new_bh);
+				error = ext4_handle_dirty_metadata(handle,
+								   inode,
+								   new_bh);
 				if (error)
 					goto cleanup_dquot;
 			}
@@ -1003,11 +999,12 @@
 				goto getblk_failed;
 			}
 			memcpy(new_bh->b_data, s->base, new_bh->b_size);
+			ext4_xattr_block_csum_set(inode, new_bh);
 			set_buffer_uptodate(new_bh);
 			unlock_buffer(new_bh);
 			ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
-			error = ext4_handle_dirty_xattr_block(handle,
-							      inode, new_bh);
+			error = ext4_handle_dirty_metadata(handle, inode,
+							   new_bh);
 			if (error)
 				goto cleanup;
 		}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index a6a3389..51519c2 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -207,7 +207,7 @@
 	struct gfs2_sbd *ln_sbd;
 	u64 ln_number;
 	unsigned int ln_type;
-};
+} __packed __aligned(sizeof(int));
 
 #define lm_name_equal(name1, name2) \
         (((name1)->ln_number == (name2)->ln_number) &&	\
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 927da49..7d4b557 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1125,10 +1125,8 @@
 
 	/* Set up a default-sized revoke table for the new mount. */
 	err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
-	if (err) {
-		kfree(journal);
-		return NULL;
-	}
+	if (err)
+		goto err_cleanup;
 
 	spin_lock_init(&journal->j_history_lock);
 
@@ -1145,23 +1143,25 @@
 	journal->j_wbufsize = n;
 	journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *),
 					GFP_KERNEL);
-	if (!journal->j_wbuf) {
-		kfree(journal);
-		return NULL;
-	}
+	if (!journal->j_wbuf)
+		goto err_cleanup;
 
 	bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize);
 	if (!bh) {
 		pr_err("%s: Cannot get buffer for journal superblock\n",
 			__func__);
-		kfree(journal->j_wbuf);
-		kfree(journal);
-		return NULL;
+		goto err_cleanup;
 	}
 	journal->j_sb_buffer = bh;
 	journal->j_superblock = (journal_superblock_t *)bh->b_data;
 
 	return journal;
+
+err_cleanup:
+	kfree(journal->j_wbuf);
+	jbd2_journal_destroy_revoke(journal);
+	kfree(journal);
+	return NULL;
 }
 
 /* jbd2_journal_init_dev and jbd2_journal_init_inode:
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index 91171dc..3cd7305 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -280,6 +280,7 @@
 
 fail1:
 	jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]);
+	journal->j_revoke_table[0] = NULL;
 fail0:
 	return -ENOMEM;
 }
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 609840d..1536aeb 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -7426,11 +7426,11 @@
 	struct nfs41_exchange_id_data *cdata =
 					(struct nfs41_exchange_id_data *)data;
 
-	nfs_put_client(cdata->args.client);
 	if (cdata->xprt) {
 		xprt_put(cdata->xprt);
 		rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient);
 	}
+	nfs_put_client(cdata->args.client);
 	kfree(cdata->res.impl_id);
 	kfree(cdata->res.server_scope);
 	kfree(cdata->res.server_owner);
@@ -7537,10 +7537,8 @@
 	task_setup_data.callback_data = calldata;
 
 	task = rpc_run_task(&task_setup_data);
-	if (IS_ERR(task)) {
-	status = PTR_ERR(task);
-		goto out_impl_id;
-	}
+	if (IS_ERR(task))
+		return PTR_ERR(task);
 
 	if (!xprt) {
 		status = rpc_wait_for_completion_task(task);
@@ -7568,6 +7566,7 @@
 	kfree(calldata->res.server_owner);
 out_calldata:
 	kfree(calldata);
+	nfs_put_client(clp);
 	goto out;
 }
 
diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c
index 2797d2f..afd9771 100644
--- a/fs/sdcardfs/dentry.c
+++ b/fs/sdcardfs/dentry.c
@@ -46,7 +46,8 @@
 	spin_unlock(&dentry->d_lock);
 
 	/* check uninitialized obb_dentry and
-	 * whether the base obbpath has been changed or not */
+	 * whether the base obbpath has been changed or not
+	 */
 	if (is_obbpath_invalid(dentry)) {
 		d_drop(dentry);
 		return 0;
@@ -106,12 +107,10 @@
 static void sdcardfs_d_release(struct dentry *dentry)
 {
 	/* release and reset the lower paths */
-	if(has_graft_path(dentry)) {
+	if (has_graft_path(dentry))
 		sdcardfs_put_reset_orig_path(dentry);
-	}
 	sdcardfs_put_reset_lower_path(dentry);
 	free_dentry_private_data(dentry);
-	return;
 }
 
 static int sdcardfs_hash_ci(const struct dentry *dentry,
@@ -128,12 +127,10 @@
 	unsigned long hash;
 
 	name = qstr->name;
-	//len = vfat_striptail_len(qstr);
 	len = qstr->len;
 
 	hash = init_name_hash(dentry);
 	while (len--)
-		//hash = partial_name_hash(nls_tolower(t, *name++), hash);
 		hash = partial_name_hash(tolower(*name++), hash);
 	qstr->hash = end_name_hash(hash);
 
@@ -146,20 +143,8 @@
 static int sdcardfs_cmp_ci(const struct dentry *dentry,
 		unsigned int len, const char *str, const struct qstr *name)
 {
-	/* This function is copy of vfat_cmpi */
-	// FIXME Should we support national language?
-	//struct nls_table *t = MSDOS_SB(parent->d_sb)->nls_io;
-	//unsigned int alen, blen;
+	/* FIXME Should we support national language? */
 
-	/* A filename cannot end in '.' or we treat it like it has none */
-	/*
-	alen = vfat_striptail_len(name);
-	blen = __vfat_striptail_len(len, str);
-	if (alen == blen) {
-		if (nls_strnicmp(t, name->name, str, alen) == 0)
-			return 0;
-	}
-	*/
 	if (name->len == len) {
 		if (str_n_case_eq(name->name, str, len))
 			return 0;
@@ -167,14 +152,16 @@
 	return 1;
 }
 
-static void sdcardfs_canonical_path(const struct path *path, struct path *actual_path) {
+static void sdcardfs_canonical_path(const struct path *path,
+				struct path *actual_path)
+{
 	sdcardfs_get_real_lower(path->dentry, actual_path);
 }
 
 const struct dentry_operations sdcardfs_ci_dops = {
 	.d_revalidate	= sdcardfs_d_revalidate,
 	.d_release	= sdcardfs_d_release,
-	.d_hash 	= sdcardfs_hash_ci,
+	.d_hash	= sdcardfs_hash_ci,
 	.d_compare	= sdcardfs_cmp_ci,
 	.d_canonical_path = sdcardfs_canonical_path,
 };
diff --git a/fs/sdcardfs/derived_perm.c b/fs/sdcardfs/derived_perm.c
index fc5a632..14747a8 100644
--- a/fs/sdcardfs/derived_perm.c
+++ b/fs/sdcardfs/derived_perm.c
@@ -37,7 +37,8 @@
 
 /* helper function for derived state */
 void setup_derived_state(struct inode *inode, perm_t perm, userid_t userid,
-                        uid_t uid, bool under_android, struct inode *top)
+						uid_t uid, bool under_android,
+						struct inode *top)
 {
 	struct sdcardfs_inode_info *info = SDCARDFS_I(inode);
 
@@ -50,12 +51,17 @@
 	set_top(info, top);
 }
 
-/* While renaming, there is a point where we want the path from dentry, but the name from newdentry */
-void get_derived_permission_new(struct dentry *parent, struct dentry *dentry, const struct qstr *name)
+/* While renaming, there is a point where we want the path from dentry,
+ * but the name from newdentry
+ */
+void get_derived_permission_new(struct dentry *parent, struct dentry *dentry,
+				const struct qstr *name)
 {
 	struct sdcardfs_inode_info *info = SDCARDFS_I(d_inode(dentry));
-	struct sdcardfs_inode_info *parent_info= SDCARDFS_I(d_inode(parent));
+	struct sdcardfs_inode_info *parent_info = SDCARDFS_I(d_inode(parent));
 	appid_t appid;
+	unsigned long user_num;
+	int err;
 	struct qstr q_Android = QSTR_LITERAL("Android");
 	struct qstr q_data = QSTR_LITERAL("data");
 	struct qstr q_obb = QSTR_LITERAL("obb");
@@ -84,7 +90,11 @@
 	case PERM_PRE_ROOT:
 		/* Legacy internal layout places users at top level */
 		info->perm = PERM_ROOT;
-		info->userid = simple_strtoul(name->name, NULL, 10);
+		err = kstrtoul(name->name, 10, &user_num);
+		if (err)
+			info->userid = 0;
+		else
+			info->userid = user_num;
 		set_top(info, &info->vfs_inode);
 		break;
 	case PERM_ROOT:
@@ -118,9 +128,8 @@
 	case PERM_ANDROID_MEDIA:
 		info->perm = PERM_ANDROID_PACKAGE;
 		appid = get_appid(name->name);
-		if (appid != 0 && !is_excluded(name->name, parent_info->userid)) {
+		if (appid != 0 && !is_excluded(name->name, parent_info->userid))
 			info->d_uid = multiuser_get_uid(parent_info->userid, appid);
-		}
 		set_top(info, &info->vfs_inode);
 		break;
 	case PERM_ANDROID_PACKAGE:
@@ -257,7 +266,8 @@
 	return 0;
 }
 
-static int needs_fixup(perm_t perm) {
+static int needs_fixup(perm_t perm)
+{
 	if (perm == PERM_ANDROID_DATA || perm == PERM_ANDROID_OBB
 			|| perm == PERM_ANDROID_MEDIA)
 		return 1;
@@ -295,9 +305,9 @@
 			}
 			spin_unlock(&child->d_lock);
 		}
-	} else 	if (descendant_may_need_fixup(info, limit)) {
+	} else if (descendant_may_need_fixup(info, limit)) {
 		list_for_each_entry(child, &dentry->d_subdirs, d_child) {
-				__fixup_perms_recursive(child, limit, depth + 1);
+			__fixup_perms_recursive(child, limit, depth + 1);
 		}
 	}
 	spin_unlock(&dentry->d_lock);
@@ -313,19 +323,17 @@
 {
 	struct dentry *parent;
 
-	if(!dentry || !d_inode(dentry)) {
-		printk(KERN_ERR "sdcardfs: %s: invalid dentry\n", __func__);
+	if (!dentry || !d_inode(dentry)) {
+		pr_err("sdcardfs: %s: invalid dentry\n", __func__);
 		return;
 	}
 	/* FIXME:
 	 * 1. need to check whether the dentry is updated or not
 	 * 2. remove the root dentry update
 	 */
-	if(IS_ROOT(dentry)) {
-		//setup_default_pre_root_state(d_inode(dentry));
-	} else {
+	if (!IS_ROOT(dentry)) {
 		parent = dget_parent(dentry);
-		if(parent) {
+		if (parent) {
 			get_derived_permission(parent, dentry);
 			dput(parent);
 		}
@@ -337,15 +345,15 @@
 {
 	int ret = 0;
 	struct dentry *parent = dget_parent(dentry);
-	struct sdcardfs_inode_info *parent_info= SDCARDFS_I(d_inode(parent));
+	struct sdcardfs_inode_info *parent_info = SDCARDFS_I(d_inode(parent));
 	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
 	struct qstr obb = QSTR_LITERAL("obb");
 
-	if(parent_info->perm == PERM_ANDROID &&
+	if (parent_info->perm == PERM_ANDROID &&
 			qstr_case_eq(&dentry->d_name, &obb)) {
 
 		/* /Android/obb is the base obbpath of DERIVED_UNIFIED */
-		if(!(sbi->options.multiuser == false
+		if (!(sbi->options.multiuser == false
 				&& parent_info->userid == 0)) {
 			ret = 1;
 		}
@@ -365,19 +373,19 @@
 
 	/* check the base obbpath has been changed.
 	 * this routine can check an uninitialized obb dentry as well.
-	 * regarding the uninitialized obb, refer to the sdcardfs_mkdir() */
+	 * regarding the uninitialized obb, refer to the sdcardfs_mkdir()
+	 */
 	spin_lock(&di->lock);
-	if(di->orig_path.dentry) {
- 		if(!di->lower_path.dentry) {
+	if (di->orig_path.dentry) {
+		if (!di->lower_path.dentry) {
 			ret = 1;
 		} else {
 			path_get(&di->lower_path);
-			//lower_parent = lock_parent(lower_path->dentry);
 
 			path_buf = kmalloc(PATH_MAX, GFP_ATOMIC);
-			if(!path_buf) {
+			if (!path_buf) {
 				ret = 1;
-				printk(KERN_ERR "sdcardfs: fail to allocate path_buf in %s.\n", __func__);
+				pr_err("sdcardfs: fail to allocate path_buf in %s.\n", __func__);
 			} else {
 				obbpath_s = d_path(&di->lower_path, path_buf, PATH_MAX);
 				if (d_unhashed(di->lower_path.dentry) ||
@@ -387,7 +395,6 @@
 				kfree(path_buf);
 			}
 
-			//unlock_dir(lower_parent);
 			pathcpy(&lower_path, &di->lower_path);
 			need_put = 1;
 		}
@@ -402,13 +409,13 @@
 {
 	int ret = 0;
 	struct dentry *parent = dget_parent(dentry);
-	struct sdcardfs_inode_info *parent_info= SDCARDFS_I(d_inode(parent));
+	struct sdcardfs_inode_info *parent_info = SDCARDFS_I(d_inode(parent));
 	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
 	struct qstr q_obb = QSTR_LITERAL("obb");
 
 	spin_lock(&SDCARDFS_D(dentry)->lock);
 	if (sbi->options.multiuser) {
-		if(parent_info->perm == PERM_PRE_ROOT &&
+		if (parent_info->perm == PERM_PRE_ROOT &&
 				qstr_case_eq(&dentry->d_name, &q_obb)) {
 			ret = 1;
 		}
@@ -423,7 +430,8 @@
 /* The lower_path will be stored to the dentry's orig_path
  * and the base obbpath will be copyed to the lower_path variable.
  * if an error returned, there's no change in the lower_path
- * returns: -ERRNO if error (0: no error) */
+ * returns: -ERRNO if error (0: no error)
+ */
 int setup_obb_dentry(struct dentry *dentry, struct path *lower_path)
 {
 	int err = 0;
@@ -432,13 +440,14 @@
 
 	/* A local obb dentry must have its own orig_path to support rmdir
 	 * and mkdir of itself. Usually, we expect that the sbi->obbpath
-	 * is avaiable on this stage. */
+	 * is avaiable on this stage.
+	 */
 	sdcardfs_set_orig_path(dentry, lower_path);
 
 	err = kern_path(sbi->obbpath_s,
 			LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &obbpath);
 
-	if(!err) {
+	if (!err) {
 		/* the obbpath base has been found */
 		pathcpy(lower_path, &obbpath);
 	} else {
@@ -446,8 +455,9 @@
 		 * setup the lower_path with its orig_path.
 		 * but, the current implementation just returns an error
 		 * because the sdcard daemon also regards this case as
-		 * a lookup fail. */
-		printk(KERN_INFO "sdcardfs: the sbi->obbpath is not available\n");
+		 * a lookup fail.
+		 */
+		pr_info("sdcardfs: the sbi->obbpath is not available\n");
 	}
 	return err;
 }
diff --git a/fs/sdcardfs/file.c b/fs/sdcardfs/file.c
index 0f2db26..eee4eb5 100644
--- a/fs/sdcardfs/file.c
+++ b/fs/sdcardfs/file.c
@@ -65,7 +65,7 @@
 
 	/* check disk space */
 	if (!check_min_free_space(dentry, count, 0)) {
-		printk(KERN_INFO "No minimum free space.\n");
+		pr_err("No minimum free space.\n");
 		return -ENOSPC;
 	}
 
@@ -160,8 +160,7 @@
 	lower_file = sdcardfs_lower_file(file);
 	if (willwrite && !lower_file->f_mapping->a_ops->writepage) {
 		err = -EINVAL;
-		printk(KERN_ERR "sdcardfs: lower file system does not "
-		       "support writeable mmap\n");
+		pr_err("sdcardfs: lower file system does not support writeable mmap\n");
 		goto out;
 	}
 
@@ -173,14 +172,14 @@
 	if (!SDCARDFS_F(file)->lower_vm_ops) {
 		err = lower_file->f_op->mmap(lower_file, vma);
 		if (err) {
-			printk(KERN_ERR "sdcardfs: lower mmap failed %d\n", err);
+			pr_err("sdcardfs: lower mmap failed %d\n", err);
 			goto out;
 		}
 		saved_vm_ops = vma->vm_ops; /* save: came from lower ->mmap */
 		err = do_munmap(current->mm, vma->vm_start,
 				vma->vm_end - vma->vm_start);
 		if (err) {
-			printk(KERN_ERR "sdcardfs: do_munmap failed %d\n", err);
+			pr_err("sdcardfs: do_munmap failed %d\n", err);
 			goto out;
 		}
 	}
@@ -248,9 +247,8 @@
 
 	if (err)
 		kfree(SDCARDFS_F(file));
-	else {
+	else
 		sdcardfs_copy_and_fix_attrs(inode, sdcardfs_lower_inode(inode));
-	}
 
 out_revert_cred:
 	REVERT_CRED(saved_cred);
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index 96a9f87..92afceb 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -240,14 +240,15 @@
 }
 #endif
 
-static int touch(char *abs_path, mode_t mode) {
+static int touch(char *abs_path, mode_t mode)
+{
 	struct file *filp = filp_open(abs_path, O_RDWR|O_CREAT|O_EXCL|O_NOFOLLOW, mode);
+
 	if (IS_ERR(filp)) {
 		if (PTR_ERR(filp) == -EEXIST) {
 			return 0;
-		}
-		else {
-			printk(KERN_ERR "sdcardfs: failed to open(%s): %ld\n",
+		} else {
+			pr_err("sdcardfs: failed to open(%s): %ld\n",
 						abs_path, PTR_ERR(filp));
 			return PTR_ERR(filp);
 		}
@@ -283,7 +284,7 @@
 
 	/* check disk space */
 	if (!check_min_free_space(dentry, 0, 1)) {
-		printk(KERN_INFO "sdcardfs: No minimum free space.\n");
+		pr_err("sdcardfs: No minimum free space.\n");
 		err = -ENOSPC;
 		goto out_revert;
 	}
@@ -315,19 +316,21 @@
 	}
 
 	/* if it is a local obb dentry, setup it with the base obbpath */
-	if(need_graft_path(dentry)) {
+	if (need_graft_path(dentry)) {
 
 		err = setup_obb_dentry(dentry, &lower_path);
-		if(err) {
+		if (err) {
 			/* if the sbi->obbpath is not available, the lower_path won't be
 			 * changed by setup_obb_dentry() but the lower path is saved to
 			 * its orig_path. this dentry will be revalidated later.
-			 * but now, the lower_path should be NULL */
+			 * but now, the lower_path should be NULL
+			 */
 			sdcardfs_put_reset_lower_path(dentry);
 
 			/* the newly created lower path which saved to its orig_path or
 			 * the lower_path is the base obbpath.
-			 * therefore, an additional path_get is required */
+			 * therefore, an additional path_get is required
+			 */
 			path_get(&lower_path);
 		} else
 			make_nomedia_in_obb = 1;
@@ -357,7 +360,7 @@
 		set_fs_pwd(current->fs, &lower_path);
 		touch_err = touch(".nomedia", 0664);
 		if (touch_err) {
-			printk(KERN_ERR "sdcardfs: failed to create .nomedia in %s: %d\n",
+			pr_err("sdcardfs: failed to create .nomedia in %s: %d\n",
 							lower_path.dentry->d_name.name, touch_err);
 			goto out;
 		}
@@ -391,7 +394,8 @@
 	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
 
 	/* sdcardfs_get_real_lower(): in case of remove an user's obb dentry
-	 * the dentry on the original path should be deleted. */
+	 * the dentry on the original path should be deleted.
+	 */
 	sdcardfs_get_real_lower(dentry, &lower_path);
 
 	lower_dentry = lower_path.dentry;
@@ -642,7 +646,7 @@
 	release_top(SDCARDFS_I(inode));
 	tmp.i_sb = inode->i_sb;
 	if (IS_POSIXACL(inode))
-		printk(KERN_WARNING "%s: This may be undefined behavior... \n", __func__);
+		pr_warn("%s: This may be undefined behavior...\n", __func__);
 	err = generic_permission(&tmp, mask);
 	/* XXX
 	 * Original sdcardfs code calls inode_permission(lower_inode,.. )
@@ -660,6 +664,7 @@
 		 * we check it with AID_MEDIA_RW permission
 		 */
 		struct inode *lower_inode;
+
 		OVERRIDE_CRED(SDCARDFS_SB(inode->sb));
 
 		lower_inode = sdcardfs_lower_inode(inode);
@@ -730,7 +735,8 @@
 	/* prepare our own lower struct iattr (with the lower file) */
 	memcpy(&lower_ia, ia, sizeof(lower_ia));
 	/* Allow touch updating timestamps. A previous permission check ensures
-	 * we have write access. Changes to mode, owner, and group are ignored*/
+	 * we have write access. Changes to mode, owner, and group are ignored
+	 */
 	ia->ia_valid |= ATTR_FORCE;
 	err = setattr_prepare(&tmp_d, ia);
 
@@ -816,10 +822,12 @@
 	return err;
 }
 
-static int sdcardfs_fillattr(struct vfsmount *mnt, struct inode *inode, struct kstat *stat)
+static int sdcardfs_fillattr(struct vfsmount *mnt,
+				struct inode *inode, struct kstat *stat)
 {
 	struct sdcardfs_inode_info *info = SDCARDFS_I(inode);
 	struct inode *top = grab_top(info);
+
 	if (!top)
 		return -EINVAL;
 
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index 7d26c26..f028bfd 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -36,8 +36,7 @@
 
 void sdcardfs_destroy_dentry_cache(void)
 {
-	if (sdcardfs_dentry_cachep)
-		kmem_cache_destroy(sdcardfs_dentry_cachep);
+	kmem_cache_destroy(sdcardfs_dentry_cachep);
 }
 
 void free_dentry_private_data(struct dentry *dentry)
@@ -73,6 +72,7 @@
 {
 	struct inode *current_lower_inode = sdcardfs_lower_inode(inode);
 	userid_t current_userid = SDCARDFS_I(inode)->userid;
+
 	if (current_lower_inode == ((struct inode_data *)candidate_data)->lower_inode &&
 			current_userid == ((struct inode_data *)candidate_data)->id)
 		return 1; /* found a match */
@@ -102,7 +102,7 @@
 			      * instead.
 			      */
 			     lower_inode->i_ino, /* hashval */
-			     sdcardfs_inode_test,	/* inode comparison function */
+			     sdcardfs_inode_test, /* inode comparison function */
 			     sdcardfs_inode_set, /* inode init function */
 			     &data); /* data passed to test+set fxns */
 	if (!inode) {
@@ -213,8 +213,8 @@
 	bool found;
 };
 
-static int sdcardfs_name_match(struct dir_context *ctx, const char *name, int namelen,
-		loff_t offset, u64 ino, unsigned int d_type)
+static int sdcardfs_name_match(struct dir_context *ctx, const char *name,
+		int namelen, loff_t offset, u64 ino, unsigned int d_type)
 {
 	struct sdcardfs_name_data *buf = container_of(ctx, struct sdcardfs_name_data, ctx);
 	struct qstr candidate = QSTR_INIT(name, namelen);
@@ -303,24 +303,27 @@
 	if (!err) {
 		/* check if the dentry is an obb dentry
 		 * if true, the lower_inode must be replaced with
-		 * the inode of the graft path */
+		 * the inode of the graft path
+		 */
 
-		if(need_graft_path(dentry)) {
+		if (need_graft_path(dentry)) {
 
 			/* setup_obb_dentry()
- 			 * The lower_path will be stored to the dentry's orig_path
+			 * The lower_path will be stored to the dentry's orig_path
 			 * and the base obbpath will be copyed to the lower_path variable.
 			 * if an error returned, there's no change in the lower_path
-			 * 		returns: -ERRNO if error (0: no error) */
+			 * returns: -ERRNO if error (0: no error)
+			 */
 			err = setup_obb_dentry(dentry, &lower_path);
 
-			if(err) {
+			if (err) {
 				/* if the sbi->obbpath is not available, we can optionally
 				 * setup the lower_path with its orig_path.
 				 * but, the current implementation just returns an error
 				 * because the sdcard daemon also regards this case as
-				 * a lookup fail. */
-				printk(KERN_INFO "sdcardfs: base obbpath is not available\n");
+				 * a lookup fail.
+				 */
+				pr_info("sdcardfs: base obbpath is not available\n");
 				sdcardfs_put_reset_orig_path(dentry);
 				goto out;
 			}
@@ -374,9 +377,9 @@
 
 /*
  * On success:
- * 	fills dentry object appropriate values and returns NULL.
+ * fills dentry object appropriate values and returns NULL.
  * On fail (== error)
- * 	returns error ptr
+ * returns error ptr
  *
  * @dir : Parent inode.
  * @dentry : Target dentry to lookup. we should set each of fields.
@@ -396,7 +399,7 @@
 	if (!check_caller_access_to_name(d_inode(parent), &dentry->d_name)) {
 		ret = ERR_PTR(-EACCES);
 		goto out_err;
-        }
+	}
 
 	/* save current_cred and override it */
 	OVERRIDE_CRED_PTR(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
@@ -412,9 +415,7 @@
 
 	ret = __sdcardfs_lookup(dentry, flags, &lower_parent_path, SDCARDFS_I(dir)->userid);
 	if (IS_ERR(ret))
-	{
 		goto out;
-	}
 	if (ret)
 		dentry = ret;
 	if (d_inode(dentry)) {
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index 4e2aded..7344635 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -29,7 +29,7 @@
 	Opt_gid,
 	Opt_debug,
 	Opt_mask,
-	Opt_multiuser, // May need?
+	Opt_multiuser,
 	Opt_userid,
 	Opt_reserved_mb,
 	Opt_err,
@@ -72,6 +72,7 @@
 
 	while ((p = strsep(&options, ",")) != NULL) {
 		int token;
+
 		if (!*p)
 			continue;
 
@@ -116,19 +117,17 @@
 			break;
 		/* unknown option */
 		default:
-			if (!silent) {
-				printk( KERN_ERR "Unrecognized mount option \"%s\" "
-						"or missing value", p);
-			}
+			if (!silent)
+				pr_err("Unrecognized mount option \"%s\" or missing value", p);
 			return -EINVAL;
 		}
 	}
 
 	if (*debug) {
-		printk( KERN_INFO "sdcardfs : options - debug:%d\n", *debug);
-		printk( KERN_INFO "sdcardfs : options - uid:%d\n",
+		pr_info("sdcardfs : options - debug:%d\n", *debug);
+		pr_info("sdcardfs : options - uid:%d\n",
 							opts->fs_low_uid);
-		printk( KERN_INFO "sdcardfs : options - gid:%d\n",
+		pr_info("sdcardfs : options - gid:%d\n",
 							opts->fs_low_gid);
 	}
 
@@ -148,6 +147,7 @@
 
 	while ((p = strsep(&options, ",")) != NULL) {
 		int token;
+
 		if (!*p)
 			continue;
 
@@ -173,22 +173,20 @@
 		case Opt_fsuid:
 		case Opt_fsgid:
 		case Opt_reserved_mb:
-			printk( KERN_WARNING "Option \"%s\" can't be changed during remount\n", p);
+			pr_warn("Option \"%s\" can't be changed during remount\n", p);
 			break;
 		/* unknown option */
 		default:
-			if (!silent) {
-				printk( KERN_ERR "Unrecognized mount option \"%s\" "
-						"or missing value", p);
-			}
+			if (!silent)
+				pr_err("Unrecognized mount option \"%s\" or missing value", p);
 			return -EINVAL;
 		}
 	}
 
 	if (debug) {
-		printk( KERN_INFO "sdcardfs : options - debug:%d\n", debug);
-		printk( KERN_INFO "sdcardfs : options - gid:%d\n", vfsopts->gid);
-		printk( KERN_INFO "sdcardfs : options - mask:%d\n", vfsopts->mask);
+		pr_info("sdcardfs : options - debug:%d\n", debug);
+		pr_info("sdcardfs : options - gid:%d\n", vfsopts->gid);
+		pr_info("sdcardfs : options - mask:%d\n", vfsopts->mask);
 	}
 
 	return 0;
@@ -223,8 +221,8 @@
 #endif
 
 DEFINE_MUTEX(sdcardfs_super_list_lock);
-LIST_HEAD(sdcardfs_super_list);
 EXPORT_SYMBOL_GPL(sdcardfs_super_list_lock);
+LIST_HEAD(sdcardfs_super_list);
 EXPORT_SYMBOL_GPL(sdcardfs_super_list);
 
 /*
@@ -242,31 +240,30 @@
 	struct sdcardfs_vfsmount_options *mnt_opt = mnt->data;
 	struct inode *inode;
 
-	printk(KERN_INFO "sdcardfs version 2.0\n");
+	pr_info("sdcardfs version 2.0\n");
 
 	if (!dev_name) {
-		printk(KERN_ERR
-		       "sdcardfs: read_super: missing dev_name argument\n");
+		pr_err("sdcardfs: read_super: missing dev_name argument\n");
 		err = -EINVAL;
 		goto out;
 	}
 
-	printk(KERN_INFO "sdcardfs: dev_name -> %s\n", dev_name);
-	printk(KERN_INFO "sdcardfs: options -> %s\n", (char *)raw_data);
-	printk(KERN_INFO "sdcardfs: mnt -> %p\n", mnt);
+	pr_info("sdcardfs: dev_name -> %s\n", dev_name);
+	pr_info("sdcardfs: options -> %s\n", (char *)raw_data);
+	pr_info("sdcardfs: mnt -> %p\n", mnt);
 
 	/* parse lower path */
 	err = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
 			&lower_path);
 	if (err) {
-		printk(KERN_ERR	"sdcardfs: error accessing lower directory '%s'\n", dev_name);
+		pr_err("sdcardfs: error accessing lower directory '%s'\n", dev_name);
 		goto out;
 	}
 
 	/* allocate superblock private data */
 	sb->s_fs_info = kzalloc(sizeof(struct sdcardfs_sb_info), GFP_KERNEL);
 	if (!SDCARDFS_SB(sb)) {
-		printk(KERN_CRIT "sdcardfs: read_super: out of memory\n");
+		pr_crit("sdcardfs: read_super: out of memory\n");
 		err = -ENOMEM;
 		goto out_free;
 	}
@@ -275,7 +272,7 @@
 	/* parse options */
 	err = parse_options(sb, raw_data, silent, &debug, mnt_opt, &sb_info->options);
 	if (err) {
-		printk(KERN_ERR	"sdcardfs: invalid options\n");
+		pr_err("sdcardfs: invalid options\n");
 		goto out_freesbi;
 	}
 
@@ -328,14 +325,15 @@
 	/* setup permission policy */
 	sb_info->obbpath_s = kzalloc(PATH_MAX, GFP_KERNEL);
 	mutex_lock(&sdcardfs_super_list_lock);
-	if(sb_info->options.multiuser) {
-		setup_derived_state(d_inode(sb->s_root), PERM_PRE_ROOT, sb_info->options.fs_user_id, AID_ROOT, false, d_inode(sb->s_root));
+	if (sb_info->options.multiuser) {
+		setup_derived_state(d_inode(sb->s_root), PERM_PRE_ROOT,
+					sb_info->options.fs_user_id, AID_ROOT,
+					false, d_inode(sb->s_root));
 		snprintf(sb_info->obbpath_s, PATH_MAX, "%s/obb", dev_name);
-		/*err =  prepare_dir(sb_info->obbpath_s,
-					sb_info->options.fs_low_uid,
-					sb_info->options.fs_low_gid, 00755);*/
 	} else {
-		setup_derived_state(d_inode(sb->s_root), PERM_ROOT, sb_info->options.fs_user_id, AID_ROOT, false, d_inode(sb->s_root));
+		setup_derived_state(d_inode(sb->s_root), PERM_ROOT,
+					sb_info->options.fs_user_id, AID_ROOT,
+					false, d_inode(sb->s_root));
 		snprintf(sb_info->obbpath_s, PATH_MAX, "%s/Android/obb", dev_name);
 	}
 	fixup_tmp_permissions(d_inode(sb->s_root));
@@ -344,7 +342,7 @@
 	mutex_unlock(&sdcardfs_super_list_lock);
 
 	if (!silent)
-		printk(KERN_INFO "sdcardfs: mounted on top of %s type %s\n",
+		pr_info("sdcardfs: mounted on top of %s type %s\n",
 				dev_name, lower_sb->s_type->name);
 	goto out; /* all is well */
 
@@ -368,8 +366,10 @@
 
 /* A feature which supports mount_nodev() with options */
 static struct dentry *mount_nodev_with_options(struct vfsmount *mnt,
-	struct file_system_type *fs_type, int flags, const char *dev_name, void *data,
-        int (*fill_super)(struct vfsmount *, struct super_block *, const char *, void *, int))
+			struct file_system_type *fs_type, int flags,
+			const char *dev_name, void *data,
+			int (*fill_super)(struct vfsmount *, struct super_block *,
+						const char *, void *, int))
 
 {
 	int error;
@@ -401,19 +401,22 @@
 						raw_data, sdcardfs_read_super);
 }
 
-static struct dentry *sdcardfs_mount_wrn(struct file_system_type *fs_type, int flags,
-		    const char *dev_name, void *raw_data)
+static struct dentry *sdcardfs_mount_wrn(struct file_system_type *fs_type,
+		    int flags, const char *dev_name, void *raw_data)
 {
 	WARN(1, "sdcardfs does not support mount. Use mount2.\n");
 	return ERR_PTR(-EINVAL);
 }
 
-void *sdcardfs_alloc_mnt_data(void) {
+void *sdcardfs_alloc_mnt_data(void)
+{
 	return kmalloc(sizeof(struct sdcardfs_vfsmount_options), GFP_KERNEL);
 }
 
-void sdcardfs_kill_sb(struct super_block *sb) {
+void sdcardfs_kill_sb(struct super_block *sb)
+{
 	struct sdcardfs_sb_info *sbi;
+
 	if (sb->s_magic == SDCARDFS_SUPER_MAGIC) {
 		sbi = SDCARDFS_SB(sb);
 		mutex_lock(&sdcardfs_super_list_lock);
diff --git a/fs/sdcardfs/packagelist.c b/fs/sdcardfs/packagelist.c
index 2cc076c..5ea6469 100644
--- a/fs/sdcardfs/packagelist.c
+++ b/fs/sdcardfs/packagelist.c
@@ -48,6 +48,7 @@
 static unsigned int full_name_case_hash(const void *salt, const unsigned char *name, unsigned int len)
 {
 	unsigned long hash = init_name_hash(salt);
+
 	while (len--)
 		hash = partial_name_hash(tolower(*name++), hash);
 	return end_name_hash(hash);
@@ -89,6 +90,7 @@
 appid_t get_appid(const char *key)
 {
 	struct qstr q;
+
 	qstr_init(&q, key);
 	return __get_appid(&q);
 }
@@ -114,6 +116,7 @@
 appid_t get_ext_gid(const char *key)
 {
 	struct qstr q;
+
 	qstr_init(&q, key);
 	return __get_ext_gid(&q);
 }
@@ -144,7 +147,8 @@
 
 /* Kernel has already enforced everything we returned through
  * derive_permissions_locked(), so this is used to lock down access
- * even further, such as enforcing that apps hold sdcard_rw. */
+ * even further, such as enforcing that apps hold sdcard_rw.
+ */
 int check_caller_access_to_name(struct inode *parent_node, const struct qstr *name)
 {
 	struct qstr q_autorun = QSTR_LITERAL("autorun.inf");
@@ -161,26 +165,26 @@
 	}
 
 	/* Root always has access; access for any other UIDs should always
-	 * be controlled through packages.list. */
-	if (from_kuid(&init_user_ns, current_fsuid()) == 0) {
+	 * be controlled through packages.list.
+	 */
+	if (from_kuid(&init_user_ns, current_fsuid()) == 0)
 		return 1;
-	}
 
 	/* No extra permissions to enforce */
 	return 1;
 }
 
 /* This function is used when file opening. The open flags must be
- * checked before calling check_caller_access_to_name() */
-int open_flags_to_access_mode(int open_flags) {
-	if((open_flags & O_ACCMODE) == O_RDONLY) {
+ * checked before calling check_caller_access_to_name()
+ */
+int open_flags_to_access_mode(int open_flags)
+{
+	if ((open_flags & O_ACCMODE) == O_RDONLY)
 		return 0; /* R_OK */
-	} else if ((open_flags & O_ACCMODE) == O_WRONLY) {
+	if ((open_flags & O_ACCMODE) == O_WRONLY)
 		return 1; /* W_OK */
-	} else {
-		/* Probably O_RDRW, but treat as default to be safe */
+	/* Probably O_RDRW, but treat as default to be safe */
 		return 1; /* R_OK | W_OK */
-	}
 }
 
 static struct hashtable_entry *alloc_hashtable_entry(const struct qstr *key,
@@ -372,7 +376,6 @@
 	remove_packagelist_entry_locked(key);
 	fixup_all_perms_name(key);
 	mutex_unlock(&sdcardfs_super_list_lock);
-	return;
 }
 
 static void remove_ext_gid_entry_locked(const struct qstr *key, gid_t group)
@@ -395,7 +398,6 @@
 	mutex_lock(&sdcardfs_super_list_lock);
 	remove_ext_gid_entry_locked(key, group);
 	mutex_unlock(&sdcardfs_super_list_lock);
-	return;
 }
 
 static void remove_userid_all_entry_locked(userid_t userid)
@@ -423,7 +425,6 @@
 	remove_userid_all_entry_locked(userid);
 	fixup_all_perms_userid(userid);
 	mutex_unlock(&sdcardfs_super_list_lock);
-	return;
 }
 
 static void remove_userid_exclude_entry_locked(const struct qstr *key, userid_t userid)
@@ -448,7 +449,6 @@
 	remove_userid_exclude_entry_locked(key, userid);
 	fixup_all_perms_name_userid(key, userid);
 	mutex_unlock(&sdcardfs_super_list_lock);
-	return;
 }
 
 static void packagelist_destroy(void)
@@ -457,6 +457,7 @@
 	struct hlist_node *h_t;
 	HLIST_HEAD(free_list);
 	int i;
+
 	mutex_lock(&sdcardfs_super_list_lock);
 	hash_for_each_rcu(package_to_appid, i, hash_cur, hlist) {
 		hash_del_rcu(&hash_cur->hlist);
@@ -470,7 +471,7 @@
 	hlist_for_each_entry_safe(hash_cur, h_t, &free_list, dlist)
 		free_hashtable_entry(hash_cur);
 	mutex_unlock(&sdcardfs_super_list_lock);
-	printk(KERN_INFO "sdcardfs: destroyed packagelist pkgld\n");
+	pr_info("sdcardfs: destroyed packagelist pkgld\n");
 }
 
 #define SDCARDFS_CONFIGFS_ATTR(_pfx, _name)			\
@@ -586,7 +587,8 @@
 static void package_details_release(struct config_item *item)
 {
 	struct package_details *package_details = to_package_details(item);
-	printk(KERN_INFO "sdcardfs: removing %s\n", package_details->name.name);
+
+	pr_info("sdcardfs: removing %s\n", package_details->name.name);
 	remove_packagelist_entry(&package_details->name);
 	kfree(package_details->name.name);
 	kfree(package_details);
@@ -604,7 +606,7 @@
 };
 
 static struct configfs_item_operations package_details_item_ops = {
-      .release = package_details_release,
+	.release = package_details_release,
 };
 
 static struct config_item_type package_appid_type = {
@@ -638,7 +640,7 @@
 {
 	struct extension_details *extension_details = to_extension_details(item);
 
-	printk(KERN_INFO "sdcardfs: No longer mapping %s files to gid %d\n",
+	pr_info("sdcardfs: No longer mapping %s files to gid %d\n",
 			extension_details->name.name, extension_details->num);
 	remove_ext_gid_entry(&extension_details->name, extension_details->num);
 	kfree(extension_details->name.name);
@@ -660,6 +662,7 @@
 	struct extension_details *extension_details = kzalloc(sizeof(struct extension_details), GFP_KERNEL);
 	const char *tmp;
 	int ret;
+
 	if (!extension_details)
 		return ERR_PTR(-ENOMEM);
 
@@ -714,7 +717,8 @@
 static void extensions_drop_group(struct config_group *group, struct config_item *item)
 {
 	struct extensions_value *value = to_extensions_value(item);
-	printk(KERN_INFO "sdcardfs: No longer mapping any files to gid %d\n", value->num);
+
+	pr_info("sdcardfs: No longer mapping any files to gid %d\n", value->num);
 	kfree(value);
 }
 
@@ -856,7 +860,7 @@
 	mutex_init(&subsys->su_mutex);
 	ret = configfs_register_subsystem(subsys);
 	if (ret) {
-		printk(KERN_ERR "Error %d while registering subsystem %s\n",
+		pr_err("Error %d while registering subsystem %s\n",
 		       ret,
 		       subsys->su_group.cg_item.ci_namebuf);
 	}
@@ -874,18 +878,17 @@
 		kmem_cache_create("packagelist_hashtable_entry",
 					sizeof(struct hashtable_entry), 0, 0, NULL);
 	if (!hashtable_entry_cachep) {
-		printk(KERN_ERR "sdcardfs: failed creating pkgl_hashtable entry slab cache\n");
+		pr_err("sdcardfs: failed creating pkgl_hashtable entry slab cache\n");
 		return -ENOMEM;
 	}
 
 	configfs_sdcardfs_init();
-        return 0;
+	return 0;
 }
 
 void packagelist_exit(void)
 {
 	configfs_sdcardfs_exit();
 	packagelist_destroy();
-	if (hashtable_entry_cachep)
-		kmem_cache_destroy(hashtable_entry_cachep);
+	kmem_cache_destroy(hashtable_entry_cachep);
 }
diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h
index 09ec1e4..380982b 100644
--- a/fs/sdcardfs/sdcardfs.h
+++ b/fs/sdcardfs/sdcardfs.h
@@ -53,7 +53,7 @@
 #define SDCARDFS_ROOT_INO     1
 
 /* useful for tracking code reachability */
-#define UDBG printk(KERN_DEFAULT "DBG:%s:%s:%d\n", __FILE__, __func__, __LINE__)
+#define UDBG pr_default("DBG:%s:%s:%d\n", __FILE__, __func__, __LINE__)
 
 #define SDCARDFS_DIRENT_SIZE 256
 
@@ -87,11 +87,11 @@
 	} while (0)
 
 /* OVERRIDE_CRED() and REVERT_CRED()
- * 	OVERRID_CRED()
- * 		backup original task->cred
- * 		and modifies task->cred->fsuid/fsgid to specified value.
+ *	OVERRIDE_CRED()
+ *		backup original task->cred
+ *		and modifies task->cred->fsuid/fsgid to specified value.
  *	REVERT_CRED()
- * 		restore original task->cred->fsuid/fsgid.
+ *		restore original task->cred->fsuid/fsgid.
  * These two macro should be used in pair, and OVERRIDE_CRED() should be
  * placed at the beginning of a function, right after variable declaration.
  */
@@ -111,36 +111,32 @@
 
 #define REVERT_CRED(saved_cred)	revert_fsids(saved_cred)
 
-#define DEBUG_CRED()		\
-	printk("KAKJAGI: %s:%d fsuid %d fsgid %d\n", 	\
-		__FUNCTION__, __LINE__, 		\
-		(int)current->cred->fsuid, 		\
-		(int)current->cred->fsgid);
-
 /* Android 5.0 support */
 
 /* Permission mode for a specific node. Controls how file permissions
- * are derived for children nodes. */
+ * are derived for children nodes.
+ */
 typedef enum {
-    /* Nothing special; this node should just inherit from its parent. */
-    PERM_INHERIT,
-    /* This node is one level above a normal root; used for legacy layouts
-     * which use the first level to represent user_id. */
-    PERM_PRE_ROOT,
-    /* This node is "/" */
-    PERM_ROOT,
-    /* This node is "/Android" */
-    PERM_ANDROID,
-    /* This node is "/Android/data" */
-    PERM_ANDROID_DATA,
-    /* This node is "/Android/obb" */
-    PERM_ANDROID_OBB,
-    /* This node is "/Android/media" */
-    PERM_ANDROID_MEDIA,
-    /* This node is "/Android/[data|media|obb]/[package]" */
-    PERM_ANDROID_PACKAGE,
-    /* This node is "/Android/[data|media|obb]/[package]/cache" */
-    PERM_ANDROID_PACKAGE_CACHE,
+	/* Nothing special; this node should just inherit from its parent. */
+	PERM_INHERIT,
+	/* This node is one level above a normal root; used for legacy layouts
+	 * which use the first level to represent user_id.
+	 */
+	PERM_PRE_ROOT,
+	/* This node is "/" */
+	PERM_ROOT,
+	/* This node is "/Android" */
+	PERM_ANDROID,
+	/* This node is "/Android/data" */
+	PERM_ANDROID_DATA,
+	/* This node is "/Android/obb" */
+	PERM_ANDROID_OBB,
+	/* This node is "/Android/media" */
+	PERM_ANDROID_MEDIA,
+	/* This node is "/Android/[data|media|obb]/[package]" */
+	PERM_ANDROID_PACKAGE,
+	/* This node is "/Android/[data|media|obb]/[package]/cache" */
+	PERM_ANDROID_PACKAGE_CACHE,
 } perm_t;
 
 struct sdcardfs_sb_info;
@@ -150,7 +146,7 @@
 /* Do not directly use this function. Use OVERRIDE_CRED() instead. */
 const struct cred *override_fsids(struct sdcardfs_sb_info *sbi, struct sdcardfs_inode_info *info);
 /* Do not directly use this function, use REVERT_CRED() instead. */
-void revert_fsids(const struct cred * old_cred);
+void revert_fsids(const struct cred *old_cred);
 
 /* operations vectors defined in specific files */
 extern const struct file_operations sdcardfs_main_fops;
@@ -227,7 +223,8 @@
 	struct super_block *sb;
 	struct super_block *lower_sb;
 	/* derived perm policy : some of options have been added
-	 * to sdcardfs_mount_options (Android 4.4 support) */
+	 * to sdcardfs_mount_options (Android 4.4 support)
+	 */
 	struct sdcardfs_mount_options options;
 	spinlock_t lock;	/* protects obbpath */
 	char *obbpath_s;
@@ -338,7 +335,7 @@
 { \
 	struct path pname; \
 	spin_lock(&SDCARDFS_D(dent)->lock); \
-	if(SDCARDFS_D(dent)->pname.dentry) { \
+	if (SDCARDFS_D(dent)->pname.dentry) { \
 		pathcpy(&pname, &SDCARDFS_D(dent)->pname); \
 		SDCARDFS_D(dent)->pname.dentry = NULL; \
 		SDCARDFS_D(dent)->pname.mnt = NULL; \
@@ -354,17 +351,17 @@
 
 static inline bool sbinfo_has_sdcard_magic(struct sdcardfs_sb_info *sbinfo)
 {
-  return sbinfo && sbinfo->sb && sbinfo->sb->s_magic == SDCARDFS_SUPER_MAGIC;
+	return sbinfo && sbinfo->sb && sbinfo->sb->s_magic == SDCARDFS_SUPER_MAGIC;
 }
 
 /* grab a refererence if we aren't linking to ourself */
 static inline void set_top(struct sdcardfs_inode_info *info, struct inode *top)
 {
 	struct inode *old_top = NULL;
+
 	BUG_ON(IS_ERR_OR_NULL(top));
-	if (info->top && info->top != &info->vfs_inode) {
+	if (info->top && info->top != &info->vfs_inode)
 		old_top = info->top;
-	}
 	if (top != &info->vfs_inode)
 		igrab(top);
 	info->top = top;
@@ -374,11 +371,11 @@
 static inline struct inode *grab_top(struct sdcardfs_inode_info *info)
 {
 	struct inode *top = info->top;
-	if (top) {
+
+	if (top)
 		return igrab(top);
-	} else {
+	else
 		return NULL;
-	}
 }
 
 static inline void release_top(struct sdcardfs_inode_info *info)
@@ -386,21 +383,24 @@
 	iput(info->top);
 }
 
-static inline int get_gid(struct vfsmount *mnt, struct sdcardfs_inode_info *info) {
+static inline int get_gid(struct vfsmount *mnt, struct sdcardfs_inode_info *info)
+{
 	struct sdcardfs_vfsmount_options *opts = mnt->data;
 
-	if (opts->gid == AID_SDCARD_RW) {
+	if (opts->gid == AID_SDCARD_RW)
 		/* As an optimization, certain trusted system components only run
 		 * as owner but operate across all users. Since we're now handing
 		 * out the sdcard_rw GID only to trusted apps, we're okay relaxing
 		 * the user boundary enforcement for the default view. The UIDs
-		 * assigned to app directories are still multiuser aware. */
+		 * assigned to app directories are still multiuser aware.
+		 */
 		return AID_SDCARD_RW;
-	} else {
+	else
 		return multiuser_get_uid(info->userid, opts->gid);
-	}
 }
-static inline int get_mode(struct vfsmount *mnt, struct sdcardfs_inode_info *info) {
+
+static inline int get_mode(struct vfsmount *mnt, struct sdcardfs_inode_info *info)
+{
 	int owner_mode;
 	int filtered_mode;
 	struct sdcardfs_vfsmount_options *opts = mnt->data;
@@ -409,17 +409,18 @@
 
 	if (info->perm == PERM_PRE_ROOT) {
 		/* Top of multi-user view should always be visible to ensure
-		* secondary users can traverse inside. */
+		* secondary users can traverse inside.
+		*/
 		visible_mode = 0711;
 	} else if (info->under_android) {
 		/* Block "other" access to Android directories, since only apps
 		* belonging to a specific user should be in there; we still
-		* leave +x open for the default view. */
-		if (opts->gid == AID_SDCARD_RW) {
+		* leave +x open for the default view.
+		*/
+		if (opts->gid == AID_SDCARD_RW)
 			visible_mode = visible_mode & ~0006;
-		} else {
+		else
 			visible_mode = visible_mode & ~0007;
-		}
 	}
 	owner_mode = info->lower_inode->i_mode & 0700;
 	filtered_mode = visible_mode & (owner_mode | (owner_mode >> 3) | (owner_mode >> 6));
@@ -444,7 +445,7 @@
 	/* in case of a local obb dentry
 	 * the orig_path should be returned
 	 */
-	if(has_graft_path(dent))
+	if (has_graft_path(dent))
 		sdcardfs_get_orig_path(dent, real_lower);
 	else
 		sdcardfs_get_lower_path(dent, real_lower);
@@ -453,7 +454,7 @@
 static inline void sdcardfs_put_real_lower(const struct dentry *dent,
 						struct path *real_lower)
 {
-	if(has_graft_path(dent))
+	if (has_graft_path(dent))
 		sdcardfs_put_orig_path(dent, real_lower);
 	else
 		sdcardfs_put_lower_path(dent, real_lower);
@@ -497,6 +498,7 @@
 static inline struct dentry *lock_parent(struct dentry *dentry)
 {
 	struct dentry *dir = dget_parent(dentry);
+
 	inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
 	return dir;
 }
diff --git a/fs/sdcardfs/super.c b/fs/sdcardfs/super.c
index edda32b..a3393e9 100644
--- a/fs/sdcardfs/super.c
+++ b/fs/sdcardfs/super.c
@@ -36,7 +36,7 @@
 	if (!spd)
 		return;
 
-	if(spd->obbpath_s) {
+	if (spd->obbpath_s) {
 		kfree(spd->obbpath_s);
 		path_put(&spd->obbpath);
 	}
@@ -64,7 +64,7 @@
 	if (sbi->options.reserved_mb) {
 		/* Invalid statfs informations. */
 		if (buf->f_bsize == 0) {
-			printk(KERN_ERR "Returned block size is zero.\n");
+			pr_err("Returned block size is zero.\n");
 			return -EINVAL;
 		}
 
@@ -100,8 +100,7 @@
 	 * SILENT, but anything else left over is an error.
 	 */
 	if ((*flags & ~(MS_RDONLY | MS_MANDLOCK | MS_SILENT)) != 0) {
-		printk(KERN_ERR
-		       "sdcardfs: remount flags 0x%x unsupported\n", *flags);
+		pr_err("sdcardfs: remount flags 0x%x unsupported\n", *flags);
 		err = -EINVAL;
 	}
 
@@ -125,29 +124,33 @@
 	 * SILENT, but anything else left over is an error.
 	 */
 	if ((*flags & ~(MS_RDONLY | MS_MANDLOCK | MS_SILENT | MS_REMOUNT)) != 0) {
-		printk(KERN_ERR
-		       "sdcardfs: remount flags 0x%x unsupported\n", *flags);
+		pr_err("sdcardfs: remount flags 0x%x unsupported\n", *flags);
 		err = -EINVAL;
 	}
-	printk(KERN_INFO "Remount options were %s for vfsmnt %p.\n", options, mnt);
+	pr_info("Remount options were %s for vfsmnt %p.\n", options, mnt);
 	err = parse_options_remount(sb, options, *flags & ~MS_SILENT, mnt->data);
 
 
 	return err;
 }
 
-static void* sdcardfs_clone_mnt_data(void *data) {
-	struct sdcardfs_vfsmount_options* opt = kmalloc(sizeof(struct sdcardfs_vfsmount_options), GFP_KERNEL);
-	struct sdcardfs_vfsmount_options* old = data;
-	if(!opt) return NULL;
+static void *sdcardfs_clone_mnt_data(void *data)
+{
+	struct sdcardfs_vfsmount_options *opt = kmalloc(sizeof(struct sdcardfs_vfsmount_options), GFP_KERNEL);
+	struct sdcardfs_vfsmount_options *old = data;
+
+	if (!opt)
+		return NULL;
 	opt->gid = old->gid;
 	opt->mask = old->mask;
 	return opt;
 }
 
-static void sdcardfs_copy_mnt_data(void *data, void *newdata) {
-	struct sdcardfs_vfsmount_options* old = data;
-	struct sdcardfs_vfsmount_options* new = newdata;
+static void sdcardfs_copy_mnt_data(void *data, void *newdata)
+{
+	struct sdcardfs_vfsmount_options *old = data;
+	struct sdcardfs_vfsmount_options *new = newdata;
+
 	old->gid = new->gid;
 	old->mask = new->mask;
 }
@@ -218,8 +221,7 @@
 /* sdcardfs inode cache destructor */
 void sdcardfs_destroy_inode_cache(void)
 {
-	if (sdcardfs_inode_cachep)
-		kmem_cache_destroy(sdcardfs_inode_cachep);
+	kmem_cache_destroy(sdcardfs_inode_cachep);
 }
 
 /*
@@ -235,7 +237,8 @@
 		lower_sb->s_op->umount_begin(lower_sb);
 }
 
-static int sdcardfs_show_options(struct vfsmount *mnt, struct seq_file *m, struct dentry *root)
+static int sdcardfs_show_options(struct vfsmount *mnt, struct seq_file *m,
+			struct dentry *root)
 {
 	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(root->d_sb);
 	struct sdcardfs_mount_options *opts = &sbi->options;
@@ -248,7 +251,7 @@
 	if (vfsopts->gid != 0)
 		seq_printf(m, ",gid=%u", vfsopts->gid);
 	if (opts->multiuser)
-		seq_printf(m, ",multiuser");
+		seq_puts(m, ",multiuser");
 	if (vfsopts->mask)
 		seq_printf(m, ",mask=%u", vfsopts->mask);
 	if (opts->fs_user_id)
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 6726440..e9fb2e8 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -361,6 +361,7 @@
 /* Event queued up for userspace to read */
 struct drm_pending_event {
 	struct completion *completion;
+	void (*completion_release)(struct completion *completion);
 	struct drm_event *event;
 	struct fence *fence;
 	struct list_head link;
diff --git a/include/dt-bindings/clock/qcom,cpucc-sdm845.h b/include/dt-bindings/clock/qcom,cpucc-sdm845.h
index c1ff2a0..db3c940 100644
--- a/include/dt-bindings/clock/qcom,cpucc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,cpucc-sdm845.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,18 +14,18 @@
 #ifndef _DT_BINDINGS_CLK_MSM_CPU_CC_SDM845_H
 #define _DT_BINDINGS_CLK_MSM_CPU_CC_SDM845_H
 
-#define L3_CLUSTER0_VOTE_CLK					0
-#define L3_CLUSTER1_VOTE_CLK					1
-#define L3_CLK							2
-#define CPU0_PWRCL_CLK						3
-#define CPU1_PWRCL_CLK						4
-#define CPU2_PWRCL_CLK						5
-#define CPU3_PWRCL_CLK						6
-#define PWRCL_CLK						7
-#define CPU4_PERFCL_CLK						8
-#define CPU5_PERFCL_CLK						9
-#define CPU6_PERFCL_CLK						10
-#define CPU7_PERFCL_CLK						11
-#define PERFCL_CLK						12
+#define L3_CLK							0
+#define PWRCL_CLK						1
+#define PERFCL_CLK						2
+#define L3_CLUSTER0_VOTE_CLK					3
+#define L3_CLUSTER1_VOTE_CLK					4
+#define CPU0_PWRCL_CLK						5
+#define CPU1_PWRCL_CLK						6
+#define CPU2_PWRCL_CLK						7
+#define CPU3_PWRCL_CLK						8
+#define CPU4_PERFCL_CLK						9
+#define CPU5_PERFCL_CLK						10
+#define CPU6_PERFCL_CLK						11
+#define CPU7_PERFCL_CLK						12
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sdm845.h b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
index 41eb823..13de1e1 100644
--- a/include/dt-bindings/clock/qcom,gpucc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,42 +14,42 @@
 #ifndef _DT_BINDINGS_CLK_MSM_GPU_CC_SDM845_H
 #define _DT_BINDINGS_CLK_MSM_GPU_CC_SDM845_H
 
+/* GPUCC clock registers */
 #define GPU_CC_ACD_AHB_CLK					0
 #define GPU_CC_ACD_CXO_CLK					1
-#define GPU_CC_AHB_CLK						2
+#define GPU_CC_AHB_CLK					2
 #define GPU_CC_CRC_AHB_CLK					3
 #define GPU_CC_CX_APB_CLK					4
-#define GPU_CC_CX_GFX3D_CLK					5
-#define GPU_CC_CX_GFX3D_SLV_CLK					6
-#define GPU_CC_CX_GMU_CLK					7
-#define GPU_CC_CX_QDSS_AT_CLK					8
-#define GPU_CC_CX_QDSS_TRIG_CLK					9
-#define GPU_CC_CX_QDSS_TSCTR_CLK				10
-#define GPU_CC_CX_SNOC_DVM_CLK					11
-#define GPU_CC_CXO_AON_CLK					12
-#define GPU_CC_CXO_CLK						13
-#define GPU_CC_DEBUG_CLK					14
-#define GPU_CC_GX_CXO_CLK					15
-#define GPU_CC_GX_GMU_CLK					16
-#define GPU_CC_GX_QDSS_TSCTR_CLK				17
-#define GPU_CC_GX_VSENSE_CLK					18
-#define GPU_CC_PLL0						19
-#define GPU_CC_PLL0_OUT_EVEN					20
-#define GPU_CC_PLL0_OUT_MAIN					21
-#define GPU_CC_PLL0_OUT_ODD					22
-#define GPU_CC_PLL0_OUT_TEST					23
-#define GPU_CC_PLL1						24
-#define GPU_CC_PLL1_OUT_EVEN					25
-#define GPU_CC_PLL1_OUT_MAIN					26
-#define GPU_CC_PLL1_OUT_ODD					27
-#define GPU_CC_PLL1_OUT_TEST					28
-#define GPU_CC_PLL_TEST_CLK					29
-#define GPU_CC_RBCPR_AHB_CLK					30
-#define GPU_CC_RBCPR_CLK					31
-#define GPU_CC_RBCPR_CLK_SRC					32
-#define GPU_CC_SLEEP_CLK					33
-#define GPU_CC_SPDM_GX_GFX3D_DIV_CLK				34
+#define GPU_CC_CX_GMU_CLK					5
+#define GPU_CC_CX_QDSS_AT_CLK					6
+#define GPU_CC_CX_QDSS_TRIG_CLK					7
+#define GPU_CC_CX_QDSS_TSCTR_CLK					8
+#define GPU_CC_CX_SNOC_DVM_CLK						9
+#define GPU_CC_CXO_AON_CLK					10
+#define GPU_CC_CXO_CLK					11
+#define GPU_CC_DEBUG_CLK					12
+#define GPU_CC_GX_CXO_CLK					13
+#define GPU_CC_GX_GMU_CLK					14
+#define GPU_CC_GX_QDSS_TSCTR_CLK					15
+#define GPU_CC_GX_VSENSE_CLK					16
+#define GPU_CC_PLL0_OUT_MAIN					 17
+#define GPU_CC_PLL0_OUT_ODD						18
+#define GPU_CC_PLL0_OUT_TEST					19
+#define GPU_CC_PLL1						20
+#define GPU_CC_PLL1_OUT_EVEN					21
+#define GPU_CC_PLL1_OUT_MAIN					22
+#define GPU_CC_PLL1_OUT_ODD					23
+#define GPU_CC_PLL1_OUT_TEST					24
+#define GPU_CC_PLL_TEST_CLK					25
+#define GPU_CC_RBCPR_AHB_CLK					26
+#define GPU_CC_RBCPR_CLK					27
+#define GPU_CC_RBCPR_CLK_SRC					28
+#define GPU_CC_SLEEP_CLK					29
+#define GPU_CC_GMU_CLK_SRC					30
+#define GPU_CC_CX_GFX3D_CLK					31
+#define GPU_CC_CX_GFX3D_SLV_CLK					32
 
+/* GPUCC reset clock registers */
 #define GPUCC_GPU_CC_ACD_BCR					0
 #define GPUCC_GPU_CC_CX_BCR					1
 #define GPUCC_GPU_CC_GFX3D_AON_BCR				2
@@ -59,4 +59,9 @@
 #define GPUCC_GPU_CC_SPDM_BCR					6
 #define GPUCC_GPU_CC_XO_BCR					7
 
+/* GFX3D clock registers */
+#define GPU_CC_PLL0						0
+#define GPU_CC_PLL0_OUT_EVEN					1
+#define GPU_CC_GX_GFX3D_CLK_SRC					2
+#define GPU_CC_GX_GFX3D_CLK						3
 #endif
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
index 86ac8d4..900f268 100644
--- a/include/dt-bindings/msm/msm-bus-ids.h
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -87,6 +87,9 @@
 #define	MSM_BUS_BCM_ACV 7037
 #define	MSM_BUS_BCM_ALC 7038
 
+#define	MSM_BUS_RSC_APPS 8000
+#define	MSM_BUS_RSC_DISP 8001
+
 #define MSM_BUS_BCM_MC0_DISPLAY 27000
 #define MSM_BUS_BCM_SH0_DISPLAY 27001
 #define MSM_BUS_BCM_MM0_DISPLAY 27002
diff --git a/include/linux/batterydata-lib.h b/include/linux/batterydata-lib.h
new file mode 100644
index 0000000..39517f8
--- /dev/null
+++ b/include/linux/batterydata-lib.h
@@ -0,0 +1,218 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __BMS_BATTERYDATA_H
+#define __BMS_BATTERYDATA_H
+
+#include <linux/errno.h>
+
+#define FCC_CC_COLS		5
+#define FCC_TEMP_COLS		8
+
+#define PC_CC_ROWS             31
+#define PC_CC_COLS             13
+
+#define PC_TEMP_ROWS		31
+#define PC_TEMP_COLS		8
+
+#define ACC_IBAT_ROWS		4
+#define ACC_TEMP_COLS		3
+
+#define MAX_SINGLE_LUT_COLS	20
+
+#define MAX_BATT_ID_NUM		4
+#define DEGC_SCALE		10
+
+struct single_row_lut {
+	int x[MAX_SINGLE_LUT_COLS];
+	int y[MAX_SINGLE_LUT_COLS];
+	int cols;
+};
+
+/**
+ * struct sf_lut -
+ * @rows:	number of percent charge entries should be <= PC_CC_ROWS
+ * @cols:	number of charge cycle entries should be <= PC_CC_COLS
+ * @row_entries:	the charge cycles/temperature at which sf data
+ *			is available in the table.
+ *		The charge cycles must be in increasing order from 0 to rows.
+ * @percent:	the percent charge at which sf data is available in the table
+ *		The  percentcharge must be in decreasing order from 0 to cols.
+ * @sf:		the scaling factor data
+ */
+struct sf_lut {
+	int rows;
+	int cols;
+	int row_entries[PC_CC_COLS];
+	int percent[PC_CC_ROWS];
+	int sf[PC_CC_ROWS][PC_CC_COLS];
+};
+
+/**
+ * struct pc_temp_ocv_lut -
+ * @rows:	number of percent charge entries should be <= PC_TEMP_ROWS
+ * @cols:	number of temperature entries should be <= PC_TEMP_COLS
+ * @temp:	the temperatures at which ocv data is available in the table
+ *		The temperatures must be in increasing order from 0 to rows.
+ * @percent:	the percent charge at which ocv data is available in the table
+ *		The  percentcharge must be in decreasing order from 0 to cols.
+ * @ocv:	the open circuit voltage
+ */
+struct pc_temp_ocv_lut {
+	int rows;
+	int cols;
+	int temp[PC_TEMP_COLS];
+	int percent[PC_TEMP_ROWS];
+	int ocv[PC_TEMP_ROWS][PC_TEMP_COLS];
+};
+
+struct ibat_temp_acc_lut {
+	int rows;
+	int cols;
+	int temp[ACC_TEMP_COLS];
+	int ibat[ACC_IBAT_ROWS];
+	int acc[ACC_IBAT_ROWS][ACC_TEMP_COLS];
+};
+
+struct batt_ids {
+	int kohm[MAX_BATT_ID_NUM];
+	int num;
+};
+
+enum battery_type {
+	BATT_UNKNOWN = 0,
+	BATT_PALLADIUM,
+	BATT_DESAY,
+	BATT_OEM,
+	BATT_QRD_4V35_2000MAH,
+	BATT_QRD_4V2_1300MAH,
+};
+
+/**
+ * struct bms_battery_data -
+ * @fcc:		full charge capacity (mAmpHour)
+ * @fcc_temp_lut:	table to get fcc at a given temp
+ * @pc_temp_ocv_lut:	table to get percent charge given batt temp and cycles
+ * @pc_sf_lut:		table to get percent charge scaling factor given cycles
+ *			and percent charge
+ * @rbatt_sf_lut:	table to get battery resistance scaling factor given
+ *			temperature and percent charge
+ * @default_rbatt_mohm:	the default value of battery resistance to use when
+ *			readings from bms are not available.
+ * @delta_rbatt_mohm:	the resistance to be added towards lower soc to
+ *			compensate for battery capacitance.
+ * @rbatt_capacitve_mohm: the resistance to be added to compensate for
+ *				battery capacitance
+ * @flat_ocv_threshold_uv: the voltage where the battery's discharge curve
+ *				starts flattening out.
+ * @max_voltage_uv:	max voltage of the battery
+ * @cutoff_uv:		cutoff voltage of the battery
+ * @iterm_ua:		termination current of the battery when charging
+ *			to 100%
+ * @batt_id_kohm:	the best matched battery id resistor value
+ * @fastchg_current_ma: maximum fast charge current
+ * @fg_cc_cv_threshold_mv: CC to CV threashold voltage
+ */
+
+struct bms_battery_data {
+	unsigned int		fcc;
+	struct single_row_lut	*fcc_temp_lut;
+	struct single_row_lut	*fcc_sf_lut;
+	struct pc_temp_ocv_lut	*pc_temp_ocv_lut;
+	struct ibat_temp_acc_lut *ibat_acc_lut;
+	struct sf_lut		*pc_sf_lut;
+	struct sf_lut		*rbatt_sf_lut;
+	int			default_rbatt_mohm;
+	int			delta_rbatt_mohm;
+	int			rbatt_capacitive_mohm;
+	int			flat_ocv_threshold_uv;
+	int			max_voltage_uv;
+	int			cutoff_uv;
+	int			iterm_ua;
+	int			batt_id_kohm;
+	int			fastchg_current_ma;
+	int			fg_cc_cv_threshold_mv;
+	const char		*battery_type;
+};
+
+#define is_between(left, right, value) \
+		(((left) >= (right) && (left) >= (value) \
+			&& (value) >= (right)) \
+		|| ((left) <= (right) && (left) <= (value) \
+			&& (value) <= (right)))
+
+#if defined(CONFIG_PM8921_BMS) || \
+	defined(CONFIG_PM8921_BMS_MODULE) || \
+	defined(CONFIG_QPNP_BMS) || \
+	defined(CONFIG_QPNP_VM_BMS)
+extern struct bms_battery_data  palladium_1500_data;
+extern struct bms_battery_data  desay_5200_data;
+extern struct bms_battery_data  oem_batt_data;
+extern struct bms_battery_data QRD_4v35_2000mAh_data;
+extern struct bms_battery_data  qrd_4v2_1300mah_data;
+
+int interpolate_fcc(struct single_row_lut *fcc_temp_lut, int batt_temp);
+int interpolate_scalingfactor(struct sf_lut *sf_lut, int row_entry, int pc);
+int interpolate_scalingfactor_fcc(struct single_row_lut *fcc_sf_lut,
+				int cycles);
+int interpolate_pc(struct pc_temp_ocv_lut *pc_temp_ocv,
+				int batt_temp_degc, int ocv);
+int interpolate_ocv(struct pc_temp_ocv_lut *pc_temp_ocv,
+				int batt_temp_degc, int pc);
+int interpolate_slope(struct pc_temp_ocv_lut *pc_temp_ocv,
+					int batt_temp, int pc);
+int interpolate_acc(struct ibat_temp_acc_lut *ibat_acc_lut,
+					int batt_temp, int ibat);
+int linear_interpolate(int y0, int x0, int y1, int x1, int x);
+#else
+static inline int interpolate_fcc(struct single_row_lut *fcc_temp_lut,
+			int batt_temp)
+{
+	return -EINVAL;
+}
+static inline int interpolate_scalingfactor(struct sf_lut *sf_lut,
+			int row_entry, int pc)
+{
+	return -EINVAL;
+}
+static inline int interpolate_scalingfactor_fcc(
+			struct single_row_lut *fcc_sf_lut, int cycles)
+{
+	return -EINVAL;
+}
+static inline int interpolate_pc(struct pc_temp_ocv_lut *pc_temp_ocv,
+			int batt_temp_degc, int ocv)
+{
+	return -EINVAL;
+}
+static inline int interpolate_ocv(struct pc_temp_ocv_lut *pc_temp_ocv,
+			int batt_temp_degc, int pc)
+{
+	return -EINVAL;
+}
+static inline int interpolate_slope(struct pc_temp_ocv_lut *pc_temp_ocv,
+					int batt_temp, int pc)
+{
+	return -EINVAL;
+}
+static inline int linear_interpolate(int y0, int x0, int y1, int x1, int x)
+{
+	return -EINVAL;
+}
+static inline int interpolate_acc(struct ibat_temp_acc_lut *ibat_acc_lut,
+						int batt_temp, int ibat)
+{
+	return -EINVAL;
+}
+#endif
+
+#endif
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index a765333..edc5d04 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -556,7 +556,7 @@
  * struct ccp_cmd - CPP operation request
  * @entry: list element (ccp driver use only)
  * @work: work element used for callbacks (ccp driver use only)
- * @ccp: CCP device to be run on (ccp driver use only)
+ * @ccp: CCP device to be run on
  * @ret: operation return code (ccp driver use only)
  * @flags: cmd processing flags
  * @engine: CCP operation to perform
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index c156f50..4fa2623 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -30,6 +30,11 @@
 
 typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
 			    unsigned long voltage, u32 *power);
+typedef int (*plat_mitig_t)(int cpu, u32 clip_freq);
+
+struct cpu_cooling_ops {
+	plat_mitig_t ceil_limit, floor_limit;
+};
 
 #ifdef CONFIG_CPU_THERMAL
 /**
@@ -43,6 +48,10 @@
 cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
 			       u32 capacitance, get_static_t plat_static_func);
 
+struct thermal_cooling_device *
+cpufreq_platform_cooling_register(const struct cpumask *clip_cpus,
+					struct cpu_cooling_ops *ops);
+
 /**
  * of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
  * @np: a valid struct device_node to the cooling device device tree node.
@@ -112,6 +121,13 @@
 	return NULL;
 }
 
+static inline struct thermal_cooling_device *
+cpufreq_platform_cooling_register(const struct cpumask *clip_cpus,
+					struct cpu_cooling_ops *ops)
+{
+	return NULL;
+}
+
 static inline
 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
 {
diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h
index 7adf6cc..374eb79 100644
--- a/include/linux/devfreq_cooling.h
+++ b/include/linux/devfreq_cooling.h
@@ -20,8 +20,6 @@
 #include <linux/devfreq.h>
 #include <linux/thermal.h>
 
-#ifdef CONFIG_DEVFREQ_THERMAL
-
 /**
  * struct devfreq_cooling_power - Devfreq cooling power ops
  * @get_static_power:	Take voltage, in mV, and return the static power
@@ -43,6 +41,8 @@
 	unsigned long dyn_power_coeff;
 };
 
+#ifdef CONFIG_DEVFREQ_THERMAL
+
 struct thermal_cooling_device *
 of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
 				  struct devfreq_cooling_power *dfc_power);
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index ff8b11b..f6dfc29 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -79,7 +79,6 @@
 	u8 ci_filename_mode;
 	u8 ci_flags;
 	struct crypto_skcipher *ci_ctfm;
-	struct key *ci_keyring_key;
 	u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
 };
 
@@ -256,7 +255,6 @@
 extern int fscrypt_inherit_context(struct inode *, struct inode *,
 					void *, bool);
 /* keyinfo.c */
-extern int get_crypt_info(struct inode *);
 extern int fscrypt_get_encryption_info(struct inode *);
 extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
 
diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h
index 23ca415..fa79319 100644
--- a/include/linux/iio/sw_device.h
+++ b/include/linux/iio/sw_device.h
@@ -62,7 +62,7 @@
 				  const char *name,
 				  struct config_item_type *type)
 {
-#ifdef CONFIG_CONFIGFS_FS
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
 	config_group_init_type_name(&d->group, name, type);
 #endif
 }
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index a83ac84..16d3d26 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -1015,6 +1015,12 @@
 	bool smmu_enabled;
 };
 
+enum ipa_upstream_type {
+	IPA_UPSTEAM_MODEM = 1,
+	IPA_UPSTEAM_WLAN,
+	IPA_UPSTEAM_MAX
+};
+
 /**
  * struct  ipa_wdi_out_params - information provided to WDI client
  * @uc_door_bell_pa: physical address of IPA uc doorbell
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 7aebe23..3b94400 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -37,6 +37,7 @@
 	__s32		accept_ra_rtr_pref;
 	__s32		rtr_probe_interval;
 #ifdef CONFIG_IPV6_ROUTE_INFO
+	__s32		accept_ra_rt_info_min_plen;
 	__s32		accept_ra_rt_info_max_plen;
 #endif
 #endif
diff --git a/include/linux/log2.h b/include/linux/log2.h
index fd7ff3d..f38fae2 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -16,12 +16,6 @@
 #include <linux/bitops.h>
 
 /*
- * deal with unrepresentable constant logarithms
- */
-extern __attribute__((const, noreturn))
-int ____ilog2_NaN(void);
-
-/*
  * non-constant log of base 2 calculators
  * - the arch may override these in asm/bitops.h if they can be implemented
  *   more efficiently than using fls() and fls64()
@@ -85,7 +79,7 @@
 #define ilog2(n)				\
 (						\
 	__builtin_constant_p(n) ? (		\
-		(n) < 1 ? ____ilog2_NaN() :	\
+		(n) < 2 ? 0 :			\
 		(n) & (1ULL << 63) ? 63 :	\
 		(n) & (1ULL << 62) ? 62 :	\
 		(n) & (1ULL << 61) ? 61 :	\
@@ -148,10 +142,7 @@
 		(n) & (1ULL <<  4) ?  4 :	\
 		(n) & (1ULL <<  3) ?  3 :	\
 		(n) & (1ULL <<  2) ?  2 :	\
-		(n) & (1ULL <<  1) ?  1 :	\
-		(n) & (1ULL <<  0) ?  0 :	\
-		____ilog2_NaN()			\
-				   ) :		\
+		1 ) :				\
 	(sizeof(n) <= 4) ?			\
 	__ilog2_u32(n) :			\
 	__ilog2_u64(n)				\
diff --git a/include/linux/mailbox/qmp.h b/include/linux/mailbox/qmp.h
new file mode 100644
index 0000000..df3565b
--- /dev/null
+++ b/include/linux/mailbox/qmp.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QMP_H_
+#define _QMP_H_
+
+#include <linux/types.h>
+
+/**
+ * struct qmp_pkt - Packet structure to be used for TX and RX with QMP
+ * @size	size of data
+ * @data	Buffer holding data of this packet
+ */
+struct qmp_pkt {
+	u32 size;
+	void *data;
+};
+
+#endif /* _QMP_H_ */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 510a73a..d265f60 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -273,6 +273,7 @@
 						/* for byte mode */
 #define MMC_QUIRK_NONSTD_SDIO	(1<<2)		/* non-standard SDIO card attached */
 						/* (missing CIA registers) */
+#define MMC_QUIRK_BROKEN_CLK_GATING (1<<3)	/* clock gating the sdio bus will make card fail */
 #define MMC_QUIRK_NONSTD_FUNC_IF (1<<4)		/* SDIO card has nonstd function interfaces */
 #define MMC_QUIRK_DISABLE_CD	(1<<5)		/* disconnect CD/DAT[3] resistor */
 #define MMC_QUIRK_INAND_CMD38	(1<<6)		/* iNAND devices have broken CMD38 */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index fac3b5c..b7b92bf 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -316,6 +316,18 @@
 
 	mmc_pm_flag_t		pm_caps;	/* supported pm features */
 
+#ifdef CONFIG_MMC_CLKGATE
+	int			clk_requests;	/* internal reference counter */
+	unsigned int		clk_delay;	/* number of MCI clk hold cycles */
+	bool			clk_gated;	/* clock gated */
+	struct delayed_work	clk_gate_work; /* delayed clock gate */
+	unsigned int		clk_old;	/* old clock value cache */
+	spinlock_t		clk_lock;	/* lock for clk fields */
+	struct mutex		clk_gate_mutex;	/* mutex for clock gating */
+	struct device_attribute clkgate_delay_attr;
+	unsigned long           clkgate_delay;
+#endif
+
 	/* host specific block data */
 	unsigned int		max_seg_size;	/* see blk_queue_max_segment_size */
 	unsigned short		max_segs;	/* see blk_queue_max_segments */
@@ -523,6 +535,26 @@
 	return host->caps2 & MMC_CAP2_PACKED_WR;
 }
 
+#ifdef CONFIG_MMC_CLKGATE
+void mmc_host_clk_hold(struct mmc_host *host);
+void mmc_host_clk_release(struct mmc_host *host);
+unsigned int mmc_host_clk_rate(struct mmc_host *host);
+
+#else
+static inline void mmc_host_clk_hold(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_release(struct mmc_host *host)
+{
+}
+
+static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+	return host->ios.clock;
+}
+#endif
+
 static inline int mmc_card_hs(struct mmc_card *card)
 {
 	return card->host->ios.timing == MMC_TIMING_SD_HS ||
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index 541b10e..f5d2f72 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -367,6 +367,7 @@
 enum gsi_xfer_elem_type {
 	GSI_XFER_ELEM_DATA,
 	GSI_XFER_ELEM_IMME_CMD,
+	GSI_XFER_ELEM_NOP,
 };
 
 /**
@@ -409,6 +410,7 @@
  *
  *		    GSI_XFER_ELEM_DATA: for all data transfers
  *		    GSI_XFER_ELEM_IMME_CMD: for IPA immediate commands
+ *		    GSI_XFER_ELEM_NOP: for event generation only
  *
  * @xfer_user_data: cookie used in xfer_cb
  *
diff --git a/include/linux/of_batterydata.h b/include/linux/of_batterydata.h
new file mode 100644
index 0000000..5505371
--- /dev/null
+++ b/include/linux/of_batterydata.h
@@ -0,0 +1,64 @@
+/* Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/batterydata-lib.h>
+
+#ifdef CONFIG_OF_BATTERYDATA
+/**
+ * of_batterydata_read_data() - Populate battery data from the device tree
+ * @container_node: pointer to the battery-data container device node
+ *		containing the profile nodes.
+ * @batt_data: pointer to an allocated bms_battery_data structure that the
+ *		loaded profile will be written to.
+ * @batt_id_uv: ADC voltage of the battery id line used to differentiate
+ *		between different battery profiles. If there are multiple
+ *		battery data in the device tree, the one with the closest
+ *		battery id resistance will be automatically loaded.
+ *
+ * This routine loads the closest match battery data from device tree based on
+ * the battery id reading. Then, it will try to load all the relevant data from
+ * the device tree battery data profile.
+ *
+ * If any of the lookup table pointers are NULL, this routine will skip trying
+ * to read them.
+ */
+int of_batterydata_read_data(struct device_node *container_node,
+				struct bms_battery_data *batt_data,
+				int batt_id_uv);
+/**
+ * of_batterydata_get_best_profile() - Find matching battery data device node
+ * @batterydata_container_node: pointer to the battery-data container device
+ *		node containing the profile nodes.
+ * @batt_id_kohm: Battery ID in KOhms for which we want to find the profile.
+ * @batt_type: Battery type which we want to force load the profile.
+ *
+ * This routine returns a device_node pointer to the closest match battery data
+ * from device tree based on the battery id reading.
+ */
+struct device_node *of_batterydata_get_best_profile(
+		struct device_node *batterydata_container_node,
+		int batt_id_kohm, const char *batt_type);
+#else
+static inline int of_batterydata_read_data(struct device_node *container_node,
+				struct bms_battery_data *batt_data,
+				int batt_id_uv)
+{
+	return -ENXIO;
+}
+static inline struct device_node *of_batterydata_get_best_profile(
+		struct device_node *batterydata_container_node,
+		int batt_id_kohm, const char *batt_type)
+{
+	return -ENXIO;
+}
+#endif /* CONFIG_OF_QPNP */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 8462da2..2251428 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -84,6 +84,12 @@
 	ARMPMU_NR_ATTR_GROUPS
 };
 
+enum armpmu_pmu_states {
+	ARM_PMU_STATE_OFF,
+	ARM_PMU_STATE_RUNNING,
+	ARM_PMU_STATE_GOING_DOWN,
+};
+
 struct arm_pmu {
 	struct pmu	pmu;
 	cpumask_t	active_irqs;
@@ -108,6 +114,8 @@
 	void		(*free_irq)(struct arm_pmu *);
 	int		(*map_event)(struct perf_event *event);
 	int		num_events;
+	int		pmu_state;
+	int		percpu_irq;
 	atomic_t	active_events;
 	struct mutex	reserve_mutex;
 	u64		max_period;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 531b8b1..3c80583 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -270,6 +270,8 @@
 	atomic_t			exclusive_cnt; /* < 0: cpu; > 0: tsk */
 	int				task_ctx_nr;
 	int				hrtimer_interval_ms;
+	u32				events_across_hotplug:1,
+					reserved:31;
 
 	/* number of address filters this PMU can do */
 	unsigned int			nr_addr_filters;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 37fb247..b46d6a8 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -46,6 +46,7 @@
 	POWER_SUPPLY_CHARGE_TYPE_NONE,
 	POWER_SUPPLY_CHARGE_TYPE_TRICKLE,
 	POWER_SUPPLY_CHARGE_TYPE_FAST,
+	POWER_SUPPLY_CHARGE_TYPE_TAPER,
 };
 
 enum {
@@ -58,6 +59,9 @@
 	POWER_SUPPLY_HEALTH_COLD,
 	POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE,
 	POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE,
+	POWER_SUPPLY_HEALTH_WARM,
+	POWER_SUPPLY_HEALTH_COOL,
+	POWER_SUPPLY_HEALTH_HOT,
 };
 
 enum {
@@ -85,6 +89,29 @@
 	POWER_SUPPLY_SCOPE_DEVICE,
 };
 
+enum {
+	POWER_SUPPLY_DP_DM_UNKNOWN = 0,
+	POWER_SUPPLY_DP_DM_PREPARE = 1,
+	POWER_SUPPLY_DP_DM_UNPREPARE = 2,
+	POWER_SUPPLY_DP_DM_CONFIRMED_HVDCP3 = 3,
+	POWER_SUPPLY_DP_DM_DP_PULSE = 4,
+	POWER_SUPPLY_DP_DM_DM_PULSE = 5,
+	POWER_SUPPLY_DP_DM_DP0P6_DMF = 6,
+	POWER_SUPPLY_DP_DM_DP0P6_DM3P3 = 7,
+	POWER_SUPPLY_DP_DM_DPF_DMF = 8,
+	POWER_SUPPLY_DP_DM_DPR_DMR = 9,
+	POWER_SUPPLY_DP_DM_HVDCP3_SUPPORTED = 10,
+	POWER_SUPPLY_DP_DM_ICL_DOWN = 11,
+	POWER_SUPPLY_DP_DM_ICL_UP = 12,
+};
+
+enum {
+	POWER_SUPPLY_PL_NONE,
+	POWER_SUPPLY_PL_USBIN_USBIN,
+	POWER_SUPPLY_PL_USBIN_USBIN_EXT,
+	POWER_SUPPLY_PL_USBMID_USBMID,
+};
+
 enum power_supply_property {
 	/* Properties of type `int' */
 	POWER_SUPPLY_PROP_STATUS = 0,
@@ -114,6 +141,8 @@
 	POWER_SUPPLY_PROP_CHARGE_FULL,
 	POWER_SUPPLY_PROP_CHARGE_EMPTY,
 	POWER_SUPPLY_PROP_CHARGE_NOW,
+	POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+	POWER_SUPPLY_PROP_CHARGE_NOW_ERROR,
 	POWER_SUPPLY_PROP_CHARGE_AVG,
 	POWER_SUPPLY_PROP_CHARGE_COUNTER,
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
@@ -133,6 +162,7 @@
 	POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */
 	POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */
 	POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+	POWER_SUPPLY_PROP_CAPACITY_RAW,
 	POWER_SUPPLY_PROP_TEMP,
 	POWER_SUPPLY_PROP_TEMP_MAX,
 	POWER_SUPPLY_PROP_TEMP_MIN,
@@ -149,11 +179,51 @@
 	POWER_SUPPLY_PROP_SCOPE,
 	POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
 	POWER_SUPPLY_PROP_CALIBRATE,
-	POWER_SUPPLY_PROP_RESISTANCE,
 	/* Local extensions */
 	POWER_SUPPLY_PROP_USB_HC,
 	POWER_SUPPLY_PROP_USB_OTG,
-	POWER_SUPPLY_PROP_CHARGE_ENABLED,
+	POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED,
+	POWER_SUPPLY_PROP_STEP_CHARGING_STEP,
+	POWER_SUPPLY_PROP_PIN_ENABLED,
+	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_MAX,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_TRIM,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+	POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED,
+	POWER_SUPPLY_PROP_VCHG_LOOP_DBC_BYPASS,
+	POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW,
+	POWER_SUPPLY_PROP_HI_POWER,
+	POWER_SUPPLY_PROP_LOW_POWER,
+	POWER_SUPPLY_PROP_COOL_TEMP,
+	POWER_SUPPLY_PROP_WARM_TEMP,
+	POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
+	POWER_SUPPLY_PROP_RESISTANCE,
+	POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE,
+	POWER_SUPPLY_PROP_RESISTANCE_ID, /* in Ohms */
+	POWER_SUPPLY_PROP_RESISTANCE_NOW,
+	POWER_SUPPLY_PROP_FLASH_CURRENT_MAX,
+	POWER_SUPPLY_PROP_UPDATE_NOW,
+	POWER_SUPPLY_PROP_ESR_COUNT,
+	POWER_SUPPLY_PROP_BUCK_FREQ,
+	POWER_SUPPLY_PROP_BOOST_CURRENT,
+	POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE,
+	POWER_SUPPLY_PROP_CHARGE_DONE,
+	POWER_SUPPLY_PROP_FLASH_ACTIVE,
+	POWER_SUPPLY_PROP_FLASH_TRIGGER,
+	POWER_SUPPLY_PROP_FORCE_TLIM,
+	POWER_SUPPLY_PROP_DP_DM,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CURRENT_QNOVO,
+	POWER_SUPPLY_PROP_VOLTAGE_QNOVO,
+	POWER_SUPPLY_PROP_RERUN_AICL,
+	POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+	POWER_SUPPLY_PROP_SAFETY_TIMER_EXPIRED,
+	POWER_SUPPLY_PROP_RESTRICTED_CHARGING,
+	POWER_SUPPLY_PROP_CURRENT_CAPABILITY,
 	POWER_SUPPLY_PROP_TYPEC_MODE,
 	POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION, /* 0: N/C, 1: CC1, 2: CC2 */
 	POWER_SUPPLY_PROP_TYPEC_POWER_ROLE,
@@ -162,16 +232,26 @@
 	POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
 	POWER_SUPPLY_PROP_PD_CURRENT_MAX,
 	POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED,
+	POWER_SUPPLY_PROP_CHARGER_TEMP,
+	POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+	POWER_SUPPLY_PROP_PARALLEL_DISABLE,
 	POWER_SUPPLY_PROP_PE_START,
 	POWER_SUPPLY_PROP_SET_SHIP_MODE,
-	POWER_SUPPLY_PROP_BOOST_CURRENT,
-	POWER_SUPPLY_PROP_FORCE_TLIM,
+	POWER_SUPPLY_PROP_SOC_REPORTING_READY,
+	POWER_SUPPLY_PROP_DEBUG_BATTERY,
+	POWER_SUPPLY_PROP_FCC_DELTA,
+	POWER_SUPPLY_PROP_ICL_REDUCTION,
+	POWER_SUPPLY_PROP_PARALLEL_MODE,
+	POWER_SUPPLY_PROP_DIE_HEALTH,
+	POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
+	POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
 	/* Properties of type `const char *' */
 	POWER_SUPPLY_PROP_MODEL_NAME,
 	POWER_SUPPLY_PROP_MANUFACTURER,
 	POWER_SUPPLY_PROP_SERIAL_NUMBER,
+	POWER_SUPPLY_PROP_BATTERY_TYPE,
 };
 
 enum power_supply_type {
@@ -183,9 +263,17 @@
 	POWER_SUPPLY_TYPE_USB_DCP,	/* Dedicated Charging Port */
 	POWER_SUPPLY_TYPE_USB_CDP,	/* Charging Downstream Port */
 	POWER_SUPPLY_TYPE_USB_ACA,	/* Accessory Charger Adapters */
-	POWER_SUPPLY_TYPE_USB_TYPE_C,	/* Type C Port */
-	POWER_SUPPLY_TYPE_USB_PD,	/* Power Delivery Port */
-	POWER_SUPPLY_TYPE_USB_PD_DRP,	/* PD Dual Role Port */
+	POWER_SUPPLY_TYPE_USB_HVDCP,	/* High Voltage DCP */
+	POWER_SUPPLY_TYPE_USB_HVDCP_3,	/* Efficient High Voltage DCP */
+	POWER_SUPPLY_TYPE_USB_PD,       /* Power Delivery */
+	POWER_SUPPLY_TYPE_WIRELESS,	/* Accessory Charger Adapters */
+	POWER_SUPPLY_TYPE_BMS,		/* Battery Monitor System */
+	POWER_SUPPLY_TYPE_PARALLEL,	/* Parallel Path */
+	POWER_SUPPLY_TYPE_MAIN,		/* Main Path */
+	POWER_SUPPLY_TYPE_WIPOWER,	/* Wipower */
+	POWER_SUPPLY_TYPE_TYPEC,	/* Type-C */
+	POWER_SUPPLY_TYPE_UFP,		/* Type-C UFP */
+	POWER_SUPPLY_TYPE_DFP,		/* TYpe-C DFP */
 };
 
 /* Indicates USB Type-C CC connection status */
@@ -359,7 +447,7 @@
 #ifdef CONFIG_POWER_SUPPLY
 extern int power_supply_is_system_supplied(void);
 #else
-static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
+static inline int power_supply_is_system_supplied(void) { return -EIO; }
 #endif
 
 extern int power_supply_get_property(struct power_supply *psy,
@@ -411,6 +499,9 @@
 	case POWER_SUPPLY_PROP_CURRENT_NOW:
 	case POWER_SUPPLY_PROP_CURRENT_AVG:
 	case POWER_SUPPLY_PROP_CURRENT_BOOT:
+	case POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW:
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
+	case POWER_SUPPLY_PROP_FLASH_CURRENT_MAX:
 		return 1;
 	default:
 		break;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 45b5f91..867de7d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -3885,6 +3885,7 @@
 #define SCHED_CPUFREQ_RT	(1U << 0)
 #define SCHED_CPUFREQ_DL	(1U << 1)
 #define SCHED_CPUFREQ_IOWAIT	(1U << 2)
+#define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3)
 
 #define SCHED_CPUFREQ_RT_DL	(SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
 
diff --git a/include/linux/sde_rsc.h b/include/linux/sde_rsc.h
new file mode 100644
index 0000000..60cc768
--- /dev/null
+++ b/include/linux/sde_rsc.h
@@ -0,0 +1,245 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_RSC_H_
+#define _SDE_RSC_H_
+
+#include <linux/kernel.h>
+
+/* primary display rsc index */
+#define SDE_RSC_INDEX		0
+
+#define MAX_RSC_CLIENT_NAME_LEN 128
+
+/**
+ * event will be triggered before sde core power collapse,
+ * mdss gdsc is still on
+ */
+#define SDE_RSC_EVENT_PRE_CORE_PC 0x1
+/**
+ * event will be triggered after sde core collapse complete,
+ * mdss gdsc is off now
+ */
+#define SDE_RSC_EVENT_POST_CORE_PC 0x2
+/**
+ * event will be triggered before restoring the sde core from power collapse,
+ * mdss gdsc is still off
+ */
+#define SDE_RSC_EVENT_PRE_CORE_RESTORE 0x4
+/**
+ * event will be triggered after restoring the sde core from power collapse,
+ * mdss gdsc is on now
+ */
+#define SDE_RSC_EVENT_POST_CORE_RESTORE 0x8
+/**
+ * event attached with solver state enabled
+ * all clients in clk_state or cmd_state
+ */
+#define SDE_RSC_EVENT_SOLVER_ENABLED 0x10
+/**
+ * event attached with solver state disabled
+ * one of the client requested for vid state
+ */
+#define SDE_RSC_EVENT_SOLVER_DISABLED 0x20
+
+/**
+ * sde_rsc_state: sde rsc state information
+ * SDE_RSC_IDLE_STATE: A client requests for idle state when there is no
+ *                    pixel or cmd transfer expected. An idle vote from
+ *                    all clients lead to power collapse state.
+ * SDE_RSC_CLK_STATE:  A client requests for clk state when it wants to
+ *                    only avoid mode-2 entry/exit. For ex: V4L2 driver,
+ *                    sde power handle, etc.
+ * SDE_RSC_CMD_STATE:  A client requests for cmd state when it wants to
+ *                    enable the solver mode.
+ * SDE_RSC_VID_STATE:  A client requests for vid state it wants to avoid
+ *                    solver enable because client is fetching data from
+ *                    continuously.
+ */
+enum sde_rsc_state {
+	SDE_RSC_IDLE_STATE,
+	SDE_RSC_CLK_STATE,
+	SDE_RSC_CMD_STATE,
+	SDE_RSC_VID_STATE,
+};
+
+/**
+ * struct sde_rsc_client: stores the rsc client for sde driver
+ * @name:	name of the client
+ * @current_state:   current client state
+ * @crtc_id:		crtc_id associated with this rsc client.
+ * @rsc_index:	rsc index of a client - only index "0" valid.
+ * @list:	list to attach client master list
+ */
+struct sde_rsc_client {
+	char name[MAX_RSC_CLIENT_NAME_LEN];
+	short current_state;
+	int crtc_id;
+	u32 rsc_index;
+	struct list_head list;
+};
+
+/**
+ * struct sde_rsc_event: local event registration entry structure
+ * @cb_func:	Pointer to desired callback function
+ * @usr:	User pointer to pass to callback on event trigger
+ * @rsc_index:	rsc index of a client - only index "0" valid.
+ * @event_type:	refer comments in event_register
+ * @list:	list to attach event master list
+ */
+struct sde_rsc_event {
+	void (*cb_func)(uint32_t event_type, void *usr);
+	void *usr;
+	u32 rsc_index;
+	uint32_t event_type;
+	struct list_head list;
+};
+
+/**
+ * struct sde_rsc_cmd_config: provides panel configuration to rsc
+ * when client is command mode. It is not required to set it during
+ * video mode.
+ *
+ * @fps:	panel te interval
+ * @vtotal:	current vertical total (height + vbp + vfp)
+ * @jitter:	panel can set the jitter to wake up rsc/solver early
+ *              This value causes mdp core to exit certain mode
+ *              early. Default is 10% jitter
+ * @prefill_lines:	max prefill lines based on panel
+ */
+struct sde_rsc_cmd_config {
+	u32 fps;
+	u32 vtotal;
+	u32 jitter;
+	u32 prefill_lines;
+};
+
+#ifdef CONFIG_DRM_SDE_RSC
+/**
+ * sde_rsc_client_create() - create the client for sde rsc.
+ * Different displays like DSI, HDMI, DP, WB, etc should call this
+ * api to register their vote for rpmh. They still need to vote for
+ * power handle to get the clocks.
+
+ * @rsc_index:   A client will be created on this RSC. As of now only
+ *               SDE_RSC_INDEX is valid rsc index.
+ * @name:	 Caller needs to provide some valid string to identify
+ *               the client. "primary", "dp", "hdmi" are suggested name.
+ * @is_primary:	 Caller needs to provide information if client is primary
+ *               or not. Primary client votes will be redirected to
+ *               display rsc.
+ * @config:	 fps, vtotal, porches, etc configuration for command mode
+ *               panel
+ *
+ * Return: client node pointer.
+ */
+struct sde_rsc_client *sde_rsc_client_create(u32 rsc_index, char *name,
+		bool is_primary_display);
+
+/**
+ * sde_rsc_client_destroy() - Destroy the sde rsc client.
+ *
+ * @client:	 Client pointer provided by sde_rsc_client_create().
+ *
+ * Return: none
+ */
+void sde_rsc_client_destroy(struct sde_rsc_client *client);
+
+/**
+ * sde_rsc_client_state_update() - rsc client state update
+ * Video mode, cmd mode and clk state are supported as modes. A client need to
+ * set this property during panel time. A switching client can set the
+ * property to change the state
+ *
+ * @client:	 Client pointer provided by sde_rsc_client_create().
+ * @state:	 Client state - video/cmd
+ * @config:	 fps, vtotal, porches, etc configuration for command mode
+ *               panel
+ * @crtc_id:	 current client's crtc id
+ *
+ * Return: error code.
+ */
+int sde_rsc_client_state_update(struct sde_rsc_client *client,
+	enum sde_rsc_state state,
+	struct sde_rsc_cmd_config *config, int crtc_id);
+
+/**
+ * sde_rsc_client_vote() - ab/ib vote from rsc client
+ *
+ * @client:	 Client pointer provided by sde_rsc_client_create().
+ * @ab:		 aggregated bandwidth vote from client.
+ * @ib:		 instant bandwidth vote from client.
+ *
+ * Return: error code.
+ */
+int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
+	u64 ab_vote, u64 ib_vote);
+
+/**
+ * sde_rsc_register_event - register a callback function for an event
+ * @rsc_index:   A client will be created on this RSC. As of now only
+ *               SDE_RSC_INDEX is valid rsc index.
+ * @event_type:  event type to register; client sets 0x3 if it wants
+ *               to register for CORE_PC and CORE_RESTORE - both events.
+ * @cb_func:     Pointer to desired callback function
+ * @usr:         User pointer to pass to callback on event trigger
+ * Returns: sde_rsc_event pointer on success
+ */
+struct sde_rsc_event *sde_rsc_register_event(int rsc_index, uint32_t event_type,
+		void (*cb_func)(uint32_t event_type, void *usr), void *usr);
+
+/**
+ * sde_rsc_unregister_event - unregister callback for an event
+ * @sde_rsc_event: event returned by sde_rsc_register_event
+ */
+void sde_rsc_unregister_event(struct sde_rsc_event *event);
+
+#else
+
+static inline struct sde_rsc_client *sde_rsc_client_create(u32 rsc_index,
+		char *name, bool is_primary_display)
+{
+	return NULL;
+}
+
+static inline void sde_rsc_client_destroy(struct sde_rsc_client *client)
+{
+}
+
+static inline int sde_rsc_client_state_update(struct sde_rsc_client *client,
+	enum sde_rsc_state state,
+	struct sde_rsc_cmd_config *config, int crtc_id)
+{
+	return 0;
+}
+
+static inline int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
+	u64 ab_vote, u64 ib_vote)
+{
+	return 0;
+}
+
+static inline struct sde_rsc_event *sde_rsc_register_event(int rsc_index,
+		uint32_t event_type,
+		void (*cb_func)(uint32_t event_type, void *usr), void *usr)
+{
+	return NULL;
+}
+
+static inline void sde_rsc_unregister_event(struct sde_rsc_event *event)
+{
+}
+
+#endif /* CONFIG_DRM_SDE_RSC */
+
+#endif /* _SDE_RSC_H_ */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 8d0210e..8491bdc 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -142,6 +142,8 @@
 	int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
 	int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
 	int (*set_cur_state) (struct thermal_cooling_device *, unsigned long);
+	int (*set_min_state)(struct thermal_cooling_device *, unsigned long);
+	int (*get_min_state)(struct thermal_cooling_device *, unsigned long *);
 	int (*get_requested_power)(struct thermal_cooling_device *,
 				   struct thermal_zone_device *, u32 *);
 	int (*state2power)(struct thermal_cooling_device *,
@@ -161,6 +163,8 @@
 	struct mutex lock; /* protect thermal_instances list */
 	struct list_head thermal_instances;
 	struct list_head node;
+	unsigned long sysfs_cur_state_req;
+	unsigned long sysfs_min_state_req;
 };
 
 struct thermal_attr {
@@ -260,6 +264,7 @@
 	void (*unbind_from_tz)(struct thermal_zone_device *tz);
 	int (*throttle)(struct thermal_zone_device *tz, int trip);
 	struct list_head	governor_list;
+	int min_state_throttle;
 };
 
 /* Structure that holds binding parameters for a zone */
@@ -348,6 +353,12 @@
 	 * 		Used by thermal zone drivers (default 0).
 	 */
 	int offset;
+
+	/*
+	 * @tracks_low:	Indicates that the thermal zone params are for
+	 *		temperatures falling below the thresholds.
+	 */
+	bool tracks_low;
 };
 
 struct thermal_genl_event {
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 93094ba..1f39661 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1905,8 +1905,11 @@
 #define USB_DEVICE_REMOVE	0x0002
 #define USB_BUS_ADD		0x0003
 #define USB_BUS_REMOVE		0x0004
+#define USB_BUS_DIED		0x0005
 extern void usb_register_notify(struct notifier_block *nb);
 extern void usb_unregister_notify(struct notifier_block *nb);
+extern void usb_register_atomic_notify(struct notifier_block *nb);
+extern void usb_unregister_atomic_notify(struct notifier_block *nb);
 
 /* debugfs stuff */
 extern struct dentry *usb_debug_root;
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 1d0043d..de2a722 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -50,4 +50,10 @@
 /* device can't handle Link Power Management */
 #define USB_QUIRK_NO_LPM			BIT(10)
 
+/*
+ * Device reports its bInterval as linear frames instead of the
+ * USB 2.0 calculation.
+ */
+#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL	BIT(11)
+
 #endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 4d1c46a..c7b1dc7 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -196,6 +196,7 @@
 	struct iscsi_task	*task;		/* xmit task in progress */
 
 	/* xmit */
+	spinlock_t		taskqueuelock;  /* protects the next three lists */
 	struct list_head	mgmtqueue;	/* mgmt (control) xmit queue */
 	struct list_head	cmdqueue;	/* data-path cmd queue */
 	struct list_head	requeue;	/* tasks needing another run */
diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h
index d7cd961..693fceb 100644
--- a/include/trace/events/cma.h
+++ b/include/trace/events/cma.h
@@ -7,7 +7,7 @@
 #include <linux/types.h>
 #include <linux/tracepoint.h>
 
-TRACE_EVENT(cma_alloc,
+DECLARE_EVENT_CLASS(cma_alloc_class,
 
 	TP_PROTO(unsigned long pfn, const struct page *page,
 		 unsigned int count, unsigned int align),
@@ -60,6 +60,44 @@
 		  __entry->count)
 );
 
+TRACE_EVENT(cma_alloc_start,
+
+	TP_PROTO(unsigned int count, unsigned int align),
+
+	TP_ARGS(count, align),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, count)
+		__field(unsigned int, align)
+	),
+
+	TP_fast_assign(
+		__entry->count = count;
+		__entry->align = align;
+	),
+
+	TP_printk("count=%u align=%u",
+		  __entry->count,
+		  __entry->align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc,
+
+	TP_PROTO(unsigned long pfn, const struct page *page,
+		 unsigned int count, unsigned int align),
+
+	TP_ARGS(pfn, page, count, align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry,
+
+	TP_PROTO(unsigned long pfn, const struct page *page,
+		 unsigned int count, unsigned int align),
+
+	TP_ARGS(pfn, page, count, align)
+);
+
+
 #endif /* _TRACE_CMA_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 539b25a..0ee910d 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -96,6 +96,27 @@
 		__entry->dst_nid,
 		__entry->nr_pages)
 );
+
+TRACE_EVENT(mm_migrate_pages_start,
+
+	TP_PROTO(enum migrate_mode mode, int reason),
+
+	TP_ARGS(mode, reason),
+
+	TP_STRUCT__entry(
+		__field(enum migrate_mode, mode)
+		__field(int, reason)
+	),
+
+	TP_fast_assign(
+		__entry->mode	= mode;
+		__entry->reason	= reason;
+	),
+
+	TP_printk("mode=%s reason=%s",
+		__print_symbolic(__entry->mode, MIGRATE_MODE),
+		__print_symbolic(__entry->reason, MIGRATE_REASON))
+);
 #endif /* _TRACE_MIGRATE_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/rpmh.h b/include/trace/events/rpmh.h
index 62e7216..919877d 100644
--- a/include/trace/events/rpmh.h
+++ b/include/trace/events/rpmh.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,86 +20,89 @@
 
 DECLARE_EVENT_CLASS(rpmh_ack_recvd,
 
-	TP_PROTO(int m, u32 addr, int errno),
+	TP_PROTO(const char *s, int m, u32 addr, int errno),
 
-	TP_ARGS(m, addr, errno),
+	TP_ARGS(s, m, addr, errno),
 
 	TP_STRUCT__entry(
+		__field(const char *, name)
 		__field(int, m)
 		__field(u32, addr)
 		__field(int, errno)
 	),
 
 	TP_fast_assign(
+		__entry->name = s;
 		__entry->m = m;
 		__entry->addr = addr;
 		__entry->errno = errno;
 	),
 
-	TP_printk("ack: tcs-m:%d addr: 0x%08x errno: %d",
-			__entry->m, __entry->addr, __entry->errno)
+	TP_printk("%s: ack: tcs-m:%d addr: 0x%08x errno: %d",
+			__entry->name, __entry->m, __entry->addr, __entry->errno)
 );
 
 DEFINE_EVENT(rpmh_ack_recvd, rpmh_notify_irq,
-	TP_PROTO(int m, u32 addr, int err),
-	TP_ARGS(m, addr, err)
+	TP_PROTO(const char *s, int m, u32 addr, int err),
+	TP_ARGS(s, m, addr, err)
 );
 
 DEFINE_EVENT(rpmh_ack_recvd, rpmh_notify,
-	TP_PROTO(int m, u32 addr, int err),
-	TP_ARGS(m, addr, err)
+	TP_PROTO(const char *s, int m, u32 addr, int err),
+	TP_ARGS(s, m, addr, err)
 );
 
 TRACE_EVENT(rpmh_send_msg,
 
-	TP_PROTO(void *b, int m, int n, u32 h, u32 a, u32 v, bool c),
+	TP_PROTO(const char *s, int m, int n, u32 h, u32 a, u32 v, bool c, bool t),
 
-	TP_ARGS(b, m, n, h, a, v, c),
+	TP_ARGS(s, m, n, h, a, v, c, t),
 
 	TP_STRUCT__entry(
-		__field(void *, base)
+		__field(const char*, name)
 		__field(int, m)
 		__field(int, n)
 		__field(u32, hdr)
 		__field(u32, addr)
 		__field(u32, data)
 		__field(bool, complete)
+		__field(bool, trigger)
 	),
 
 	TP_fast_assign(
-		__entry->base = b;
+		__entry->name = s;
 		__entry->m = m;
 		__entry->n = n;
 		__entry->hdr = h;
 		__entry->addr = a;
 		__entry->data = v;
 		__entry->complete = c;
+		__entry->trigger = t;
 	),
 
-	TP_printk("msg: base: 0x%p  tcs(m): %d cmd(n): %d msgid: 0x%08x addr: 0x%08x data: 0x%08x complete: %d",
-			__entry->base + (672 * __entry->m) + (20 * __entry->n),
-			__entry->m, __entry->n, __entry->hdr,
-			__entry->addr, __entry->data, __entry->complete)
+	TP_printk("%s: send-msg: tcs(m): %d cmd(n): %d msgid: 0x%08x addr: 0x%08x data: 0x%08x complete: %d trigger: %d",
+			__entry->name, __entry->m, __entry->n, __entry->hdr,
+			__entry->addr, __entry->data, __entry->complete, __entry->trigger)
 );
 
 TRACE_EVENT(rpmh_control_msg,
 
-	TP_PROTO(void *r, u32 v),
+	TP_PROTO(const char *s, u32 v),
 
-	TP_ARGS(r, v),
+	TP_ARGS(s, v),
 
 	TP_STRUCT__entry(
-		__field(void *, reg)
+		__field(const char *, name)
 		__field(u32, data)
 	),
 
 	TP_fast_assign(
-		__entry->reg = r;
+		__entry->name = s;
 		__entry->data = v;
 	),
 
-	TP_printk("ctrl-msg: reg: 0x%p data: 0x%08x",
-			__entry->reg, __entry->data)
+	TP_printk("%s: ctrl-msg: data: 0x%08x",
+			__entry->name, __entry->data)
 );
 
 #endif /* _TRACE_RPMH_H */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 4823794..33ba430 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -428,6 +428,7 @@
 header-y += snmp.h
 header-y += sock_diag.h
 header-y += socket.h
+header-y += sockev.h
 header-y += sockios.h
 header-y += sonet.h
 header-y += sonypi.h
diff --git a/include/uapi/linux/esoc_ctrl.h b/include/uapi/linux/esoc_ctrl.h
index 1b17e1c..4201c95 100644
--- a/include/uapi/linux/esoc_ctrl.h
+++ b/include/uapi/linux/esoc_ctrl.h
@@ -5,11 +5,11 @@
 
 #define ESOC_CODE		0xCC
 
-#define ESOC_CMD_EXE		_IOW(ESOC_CODE, 1, __u32)
-#define ESOC_WAIT_FOR_REQ	_IOR(ESOC_CODE, 2, __u32)
-#define ESOC_NOTIFY		_IOW(ESOC_CODE, 3, __u32)
-#define ESOC_GET_STATUS		_IOR(ESOC_CODE, 4, __u32)
-#define ESOC_WAIT_FOR_CRASH	_IOR(ESOC_CODE, 6, __u32)
+#define ESOC_CMD_EXE		_IOW(ESOC_CODE, 1, unsigned int)
+#define ESOC_WAIT_FOR_REQ	_IOR(ESOC_CODE, 2, unsigned int)
+#define ESOC_NOTIFY		_IOW(ESOC_CODE, 3, unsigned int)
+#define ESOC_GET_STATUS		_IOR(ESOC_CODE, 4, unsigned int)
+#define ESOC_WAIT_FOR_CRASH	_IOR(ESOC_CODE, 6, unsigned int)
 #define ESOC_REG_REQ_ENG	_IO(ESOC_CODE, 7)
 #define ESOC_REG_CMD_ENG	_IO(ESOC_CODE, 8)
 
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 1049c78..c462f1d 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -179,6 +179,12 @@
 	DEVCONF_DROP_UNSOLICITED_NA,
 	DEVCONF_KEEP_ADDR_ON_DOWN,
 	DEVCONF_RTR_SOLICIT_MAX_INTERVAL,
+	DEVCONF_SEG6_ENABLED,
+	DEVCONF_SEG6_REQUIRE_HMAC,
+	DEVCONF_ENHANCED_DAD,
+	DEVCONF_ADDR_GEN_MODE,
+	DEVCONF_DISABLE_POLICY,
+	DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN,
 	DEVCONF_MAX
 };
 
diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h
index 941a816..c190446 100644
--- a/include/uapi/linux/msm_kgsl.h
+++ b/include/uapi/linux/msm_kgsl.h
@@ -327,6 +327,7 @@
 #define KGSL_PROP_DEVICE_QDSS_STM	0x19
 #define KGSL_PROP_MIN_ACCESS_LENGTH	0x1A
 #define KGSL_PROP_UBWC_MODE		0x1B
+#define KGSL_PROP_DEVICE_QTIMER		0x20
 
 struct kgsl_shadowprop {
 	unsigned long gpuaddr;
@@ -339,6 +340,11 @@
 	uint64_t size;
 };
 
+struct kgsl_qtimer_prop {
+	uint64_t gpuaddr;
+	uint64_t size;
+};
+
 struct kgsl_version {
 	unsigned int drv_major;
 	unsigned int drv_minor;
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 0dba4e4..2817ca1 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -27,7 +27,7 @@
 #define NETLINK_ECRYPTFS	19
 #define NETLINK_RDMA		20
 #define NETLINK_CRYPTO		21	/* Crypto layer */
-
+#define NETLINK_SOCKEV          22      /* Socket Administrative Events */
 #define NETLINK_INET_DIAG	NETLINK_SOCK_DIAG
 
 #define MAX_LINKS 32		
diff --git a/include/uapi/linux/sockev.h b/include/uapi/linux/sockev.h
new file mode 100644
index 0000000..b274fbc
--- /dev/null
+++ b/include/uapi/linux/sockev.h
@@ -0,0 +1,31 @@
+#ifndef _SOCKEV_H_
+#define _SOCKEV_H_
+
+#include <linux/types.h>
+#include <linux/netlink.h>
+#include <linux/socket.h>
+
+enum sknetlink_groups {
+	SKNLGRP_UNICAST,
+	SKNLGRP_SOCKEV,
+	__SKNLGRP_MAX
+};
+
+#define SOCKEV_STR_MAX 32
+
+/********************************************************************
+ *		Socket operation messages
+ ****/
+
+struct sknlsockevmsg {
+	__u8 event[SOCKEV_STR_MAX];
+	__u32 pid; /* (struct task_struct*)->pid */
+	__u16 skfamily; /* (struct socket*)->sk->sk_family */
+	__u8 skstate; /* (struct socket*)->sk->sk_state */
+	__u8 skprotocol; /* (struct socket*)->sk->sk_protocol */
+	__u16 sktype; /* (struct socket*)->sk->sk_type */
+	__u64 skflags; /* (struct socket*)->sk->sk_flags */
+};
+
+#endif /* _SOCKEV_H_ */
+
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index e854785..08aa800 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -570,6 +570,7 @@
 	NET_IPV6_PROXY_NDP=23,
 	NET_IPV6_ACCEPT_SOURCE_ROUTE=25,
 	NET_IPV6_ACCEPT_RA_FROM_LOCAL=26,
+	NET_IPV6_ACCEPT_RA_RT_INFO_MIN_PLEN=27,
 	__NET_IPV6_MAX
 };
 
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 37c6c00..a62870e 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -429,6 +429,9 @@
 	V4L2_MPEG_VIDEO_H264_LEVEL_5_0	= 14,
 	V4L2_MPEG_VIDEO_H264_LEVEL_5_1	= 15,
 	V4L2_MPEG_VIDEO_H264_LEVEL_5_2	= 16,
+#define V4L2_MPEG_VIDEO_H264_LEVEL_UNKNOWN \
+	V4L2_MPEG_VIDEO_H264_LEVEL_UNKNOWN
+	V4L2_MPEG_VIDEO_H264_LEVEL_UNKNOWN = 17,
 };
 #define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA	(V4L2_CID_MPEG_BASE+360)
 #define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA	(V4L2_CID_MPEG_BASE+361)
@@ -919,6 +922,9 @@
 	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_6_1	= 23,
 	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_MAIN_TIER_LEVEL_6_2	= 24,
 	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_HIGH_TIER_LEVEL_6_2	= 25,
+#define V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_UNKNOWN \
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_UNKNOWN
+	V4L2_MPEG_VIDC_VIDEO_HEVC_LEVEL_UNKNOWN = 26,
 };
 
 #define V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS \
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index fd379ec..86cb858 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -539,6 +539,7 @@
 #define V4L2_PIX_FMT_NV12_UBWC        v4l2_fourcc('Q', '1', '2', '8')
 /* UBWC 10-bit Y/CbCr 4:2:0 */
 #define V4L2_PIX_FMT_NV12_TP10_UBWC   v4l2_fourcc('Q', '1', '2', 'A')
+#define V4L2_PIX_FMT_NV12_P010_UBWC   v4l2_fourcc('Q', '1', '2', 'B')
 
 /* two non contiguous planes - one Y, one Cr + Cb interleaved  */
 #define V4L2_PIX_FMT_NV12M   v4l2_fourcc('N', 'M', '1', '2') /* 12  Y/CbCr 4:2:0  */
diff --git a/include/uapi/media/msm_sde_rotator.h b/include/uapi/media/msm_sde_rotator.h
index 790135a..212eb26 100644
--- a/include/uapi/media/msm_sde_rotator.h
+++ b/include/uapi/media/msm_sde_rotator.h
@@ -63,6 +63,7 @@
 #define SDE_PIX_FMT_Y_CBCR_H2V2_P010	V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010
 #define SDE_PIX_FMT_Y_CBCR_H2V2_TP10	V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_TP10
 #define SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC	V4L2_PIX_FMT_NV12_TP10_UBWC
+#define SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC	V4L2_PIX_FMT_NV12_P010_UBWC
 
 /*
  * struct msm_sde_rotator_fence - v4l2 buffer fence info
diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c
index 2bd6737..a57242e 100644
--- a/kernel/cgroup_pids.c
+++ b/kernel/cgroup_pids.c
@@ -229,7 +229,7 @@
 		/* Only log the first time events_limit is incremented. */
 		if (atomic64_inc_return(&pids->events_limit) == 1) {
 			pr_info("cgroup: fork rejected by pids controller in ");
-			pr_cont_cgroup_path(task_cgroup(current, pids_cgrp_id));
+			pr_cont_cgroup_path(css->cgroup);
 			pr_cont("\n");
 		}
 		cgroup_file_notify(&pids->events_file);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 99c91f6..87b9cd9 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7586,6 +7586,7 @@
 	.start		= perf_swevent_start,
 	.stop		= perf_swevent_stop,
 	.read		= perf_swevent_read,
+	.events_across_hotplug = 1,
 };
 
 #ifdef CONFIG_EVENT_TRACING
@@ -7730,6 +7731,7 @@
 	.start		= perf_swevent_start,
 	.stop		= perf_swevent_stop,
 	.read		= perf_swevent_read,
+	.events_across_hotplug = 1,
 };
 
 static inline void perf_tp_register(void)
@@ -8460,6 +8462,7 @@
 	.start		= cpu_clock_event_start,
 	.stop		= cpu_clock_event_stop,
 	.read		= cpu_clock_event_read,
+	.events_across_hotplug = 1,
 };
 
 /*
@@ -8541,6 +8544,7 @@
 	.start		= task_clock_event_start,
 	.stop		= task_clock_event_stop,
 	.read		= task_clock_event_read,
+	.events_across_hotplug = 1,
 };
 
 static void perf_pmu_nop_void(struct pmu *pmu)
@@ -10342,6 +10346,17 @@
 			continue;
 
 		mutex_lock(&ctx->mutex);
+		raw_spin_lock_irq(&ctx->lock);
+		/*
+		 * Destroy the task <-> ctx relation and mark the context dead.
+		 *
+		 * This is important because even though the task hasn't been
+		 * exposed yet the context has been (through child_list).
+		 */
+		RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL);
+		WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
+		put_task_struct(task); /* cannot be last */
+		raw_spin_unlock_irq(&ctx->lock);
 again:
 		list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
 				group_entry)
@@ -10595,7 +10610,7 @@
 		ret = inherit_task_group(event, parent, parent_ctx,
 					 child, ctxn, &inherited_all);
 		if (ret)
-			break;
+			goto out_unlock;
 	}
 
 	/*
@@ -10611,7 +10626,7 @@
 		ret = inherit_task_group(event, parent, parent_ctx,
 					 child, ctxn, &inherited_all);
 		if (ret)
-			break;
+			goto out_unlock;
 	}
 
 	raw_spin_lock_irqsave(&parent_ctx->lock, flags);
@@ -10639,6 +10654,7 @@
 	}
 
 	raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
+out_unlock:
 	mutex_unlock(&parent_ctx->mutex);
 
 	perf_unpin_context(parent_ctx);
@@ -10703,6 +10719,76 @@
 }
 
 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
+static void
+check_hotplug_start_event(struct perf_event *event)
+{
+	if (event->attr.type == PERF_TYPE_SOFTWARE) {
+		switch (event->attr.config) {
+		case PERF_COUNT_SW_CPU_CLOCK:
+			cpu_clock_event_start(event, 0);
+			break;
+		case PERF_COUNT_SW_TASK_CLOCK:
+			break;
+		default:
+			if (event->pmu->start)
+				event->pmu->start(event, 0);
+			break;
+		}
+	}
+}
+
+static int perf_event_start_swevents(unsigned int cpu)
+{
+	struct perf_event_context *ctx;
+	struct pmu *pmu;
+	struct perf_event *event;
+	int idx;
+
+	idx = srcu_read_lock(&pmus_srcu);
+	list_for_each_entry_rcu(pmu, &pmus, entry) {
+		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
+		mutex_lock(&ctx->mutex);
+		raw_spin_lock(&ctx->lock);
+		list_for_each_entry(event, &ctx->event_list, event_entry)
+			check_hotplug_start_event(event);
+		raw_spin_unlock(&ctx->lock);
+		mutex_unlock(&ctx->mutex);
+	}
+	srcu_read_unlock(&pmus_srcu, idx);
+	return 0;
+}
+
+/*
+ * If keeping events across hotplugging is supported, do not
+ * remove the event list so event lives beyond CPU hotplug.
+ * The context is exited via an fd close path when userspace
+ * is done and the target CPU is online. If software clock
+ * event is active, then stop hrtimer associated with it.
+ * Start the timer when the CPU comes back online.
+ */
+static void
+check_hotplug_remove_from_context(struct perf_event *event,
+			   struct perf_cpu_context *cpuctx,
+			   struct perf_event_context *ctx)
+{
+	if (!event->pmu->events_across_hotplug) {
+		__perf_remove_from_context(event, cpuctx,
+			ctx, (void *)DETACH_GROUP);
+	} else if (event->attr.type == PERF_TYPE_SOFTWARE) {
+		switch (event->attr.config) {
+		case PERF_COUNT_SW_CPU_CLOCK:
+			cpu_clock_event_stop(event, 0);
+			break;
+		case PERF_COUNT_SW_TASK_CLOCK:
+			break;
+		default:
+			if (event->pmu->stop)
+				event->pmu->stop(event, 0);
+			break;
+		}
+	}
+}
+
 static void __perf_event_exit_context(void *__info)
 {
 	struct perf_event_context *ctx = __info;
@@ -10711,7 +10797,7 @@
 
 	raw_spin_lock(&ctx->lock);
 	list_for_each_entry(event, &ctx->event_list, event_entry)
-		__perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
+		check_hotplug_remove_from_context(event, cpuctx, ctx);
 	raw_spin_unlock(&ctx->lock);
 }
 
@@ -10830,6 +10916,21 @@
 }
 device_initcall(perf_event_sysfs_init);
 
+static int perf_cpu_hp_init(void)
+{
+	int ret;
+
+	ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ONLINE,
+				"PERF/CORE/AP_PERF_ONLINE",
+				perf_event_start_swevents,
+				perf_event_exit_cpu);
+	if (ret)
+		pr_err("CPU hotplug notifier for perf core could not be registered: %d\n",
+		       ret);
+	return ret;
+}
+subsys_initcall(perf_cpu_hp_init);
+
 #ifdef CONFIG_CGROUP_PERF
 static struct cgroup_subsys_state *
 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fe084ef..f7f5256 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2172,6 +2172,7 @@
 	wallclock = sched_ktime_clock();
 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 	update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
+	cpufreq_update_util(rq, 0);
 	raw_spin_unlock(&rq->lock);
 
 	rcu_read_lock();
@@ -2264,6 +2265,7 @@
 
 		update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 		update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
+		cpufreq_update_util(rq, 0);
 		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
 		note_task_waking(p, wallclock);
 	}
@@ -3370,6 +3372,8 @@
 
 	wallclock = sched_ktime_clock();
 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+
+	cpufreq_update_util(rq, 0);
 	early_notif = early_detection_notify(rq, wallclock);
 
 	raw_spin_unlock(&rq->lock);
@@ -3704,6 +3708,7 @@
 	if (likely(prev != next)) {
 		update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
 		update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
+		cpufreq_update_util(rq, 0);
 		if (!is_idle_task(prev) && !prev->on_rq)
 			update_avg_burst(prev);
 
@@ -3717,6 +3722,7 @@
 		rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */
 	} else {
 		update_task_ravg(prev, rq, TASK_UPDATE, wallclock, 0);
+		cpufreq_update_util(rq, 0);
 		lockdep_unpin_lock(&rq->lock, cookie);
 		raw_spin_unlock_irq(&rq->lock);
 	}
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 124eb6a..0085f66 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1770,12 +1770,11 @@
 #ifdef CONFIG_SMP
 		if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded)
 			queue_push_tasks(rq);
-#else
+#endif
 		if (dl_task(rq->curr))
 			check_preempt_curr_dl(rq, p, 0);
 		else
 			resched_curr(rq);
-#endif
 	}
 }
 
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index c0adf4e..4de373f 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -3180,6 +3180,13 @@
 		update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_ktime_clock(),
 				 0);
 
+		/*
+		 * Ensure that we don't report load for 'cpu' again via the
+		 * cpufreq_update_util path in the window that started at
+		 * rq->window_start
+		 */
+		rq->load_reported_window = rq->window_start;
+
 		account_load_subtractions(rq);
 		load[i] = rq->prev_runnable_sum;
 		nload[i] = rq->nt_prev_runnable_sum;
@@ -3610,6 +3617,11 @@
 
 	migrate_top_tasks(p, src_rq, dest_rq);
 
+	if (!same_freq_domain(new_cpu, task_cpu(p))) {
+		cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
+		cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
+	}
+
 	if (p == src_rq->ed_task) {
 		src_rq->ed_task = NULL;
 		if (!dest_rq->ed_task)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index bcac711..709f719 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2410,10 +2410,9 @@
 #ifdef CONFIG_SMP
 		if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded)
 			queue_push_tasks(rq);
-#else
+#endif /* CONFIG_SMP */
 		if (p->prio < rq->curr->prio)
 			resched_curr(rq);
-#endif /* CONFIG_SMP */
 	}
 }
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e7f6794..5e25011 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -79,6 +79,7 @@
 	u64 time;
 };
 
+extern unsigned int sched_disable_window_stats;
 #endif /* CONFIG_SCHED_HMP */
 
 
@@ -770,6 +771,7 @@
 
 	int cstate, wakeup_latency, wakeup_energy;
 	u64 window_start;
+	u64 load_reported_window;
 	unsigned long hmp_flags;
 
 	u64 cur_irqload;
@@ -2142,6 +2144,18 @@
 {
 	struct update_util_data *data;
 
+#ifdef CONFIG_SCHED_HMP
+	/*
+	 * Skip if we've already reported, but not if this is an inter-cluster
+	 * migration
+	 */
+	if (!sched_disable_window_stats &&
+		(rq->load_reported_window == rq->window_start) &&
+		!(flags & SCHED_CPUFREQ_INTERCLUSTER_MIG))
+		return;
+	rq->load_reported_window = rq->window_start;
+#endif
+
 	data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
 	if (data)
 		data->func(data, rq_clock(rq), flags);
diff --git a/lib/Makefile b/lib/Makefile
index e0eb131..6bde16d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -31,6 +31,8 @@
 lib-y	+= kobject.o klist.o
 obj-y	+= lockref.o
 
+KASAN_SANITIZE_find_bit.o := n
+
 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
 	 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
 	 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
diff --git a/mm/cma.c b/mm/cma.c
index c960459..0306bab 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -134,6 +134,10 @@
 	spin_lock_init(&cma->mem_head_lock);
 #endif
 
+	if (!PageHighMem(pfn_to_page(cma->base_pfn)))
+		kmemleak_free_part(__va(cma->base_pfn << PAGE_SHIFT),
+				cma->count << PAGE_SHIFT);
+
 	return 0;
 
 err:
@@ -380,6 +384,8 @@
 	if (!count)
 		return NULL;
 
+	trace_cma_alloc_start(count, align);
+
 	mask = cma_bitmap_aligned_mask(cma, align);
 	offset = cma_bitmap_aligned_offset(cma, align);
 	bitmap_maxno = cma_bitmap_maxno(cma);
@@ -420,6 +426,8 @@
 
 		pr_debug("%s(): memory range at %p is busy, retrying\n",
 			 __func__, pfn_to_page(pfn));
+
+		trace_cma_alloc_busy_retry(pfn, pfn_to_page(pfn), count, align);
 		/* try again with a bit different memory target */
 		start = bitmap_no + mask + 1;
 	}
diff --git a/mm/memory.c b/mm/memory.c
index cbb1e5e..91e1653 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3050,7 +3050,7 @@
 }
 
 static unsigned long fault_around_bytes __read_mostly =
-	rounddown_pow_of_two(65536);
+	rounddown_pow_of_two(4096);
 
 #ifdef CONFIG_DEBUG_FS
 static int fault_around_bytes_get(void *data, u64 *val)
diff --git a/mm/migrate.c b/mm/migrate.c
index 66ce6b4..f49de3cf 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1319,6 +1319,8 @@
 	int swapwrite = current->flags & PF_SWAPWRITE;
 	int rc;
 
+	trace_mm_migrate_pages_start(mode, reason);
+
 	if (!swapwrite)
 		current->flags |= PF_SWAPWRITE;
 
diff --git a/mm/percpu.c b/mm/percpu.c
index 2557143..f014ceb 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1010,8 +1010,11 @@
 		mutex_unlock(&pcpu_alloc_mutex);
 	}
 
-	if (chunk != pcpu_reserved_chunk)
+	if (chunk != pcpu_reserved_chunk) {
+		spin_lock_irqsave(&pcpu_lock, flags);
 		pcpu_nr_empty_pop_pages -= occ_pages;
+		spin_unlock_irqrestore(&pcpu_lock, flags);
+	}
 
 	if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
 		pcpu_schedule_balance_work();
diff --git a/mm/slub.c b/mm/slub.c
index 2b01429..30be24b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1673,6 +1673,7 @@
 	if (current->reclaim_state)
 		current->reclaim_state->reclaimed_slab += pages;
 	memcg_uncharge_slab(page, order, s);
+	kasan_alloc_pages(page, order);
 	__free_pages(page, order);
 }
 
@@ -3881,6 +3882,7 @@
 	if (unlikely(!PageSlab(page))) {
 		BUG_ON(!PageCompound(page));
 		kfree_hook(x);
+		kasan_alloc_pages(page, compound_order(page));
 		__free_pages(page, compound_order(page));
 		return;
 	}
diff --git a/net/Kconfig b/net/Kconfig
index cd20118..d5ff4f7 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -321,6 +321,15 @@
 	  with many clients some protection against DoS by a single (spoofed)
 	  flow that greatly exceeds average workload.
 
+config SOCKEV_NLMCAST
+	bool "Enable SOCKEV Netlink Multicast"
+	default n
+	---help---
+	  Default client for SOCKEV notifier events. Sends multicast netlink
+	  messages whenever the socket event notifier is invoked. Enable if
+	  user space entities need to be notified of socket events without
+	  having to poll /proc
+
 menu "Network testing"
 
 config NET_PKTGEN
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index d243688..d3f6c26 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -1334,7 +1334,6 @@
 		if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
 		    (xorstate & CEPH_OSD_EXISTS)) {
 			pr_info("osd%d does not exist\n", osd);
-			map->osd_weight[osd] = CEPH_OSD_IN;
 			ret = set_primary_affinity(map, osd,
 						   CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
 			if (ret)
diff --git a/net/core/Makefile b/net/core/Makefile
index d6508c2..77bb89b 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -24,6 +24,7 @@
 obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
 obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
 obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
+obj-$(CONFIG_SOCKEV_NLMCAST) += sockev_nlmcast.o
 obj-$(CONFIG_DST_CACHE) += dst_cache.o
 obj-$(CONFIG_HWBM) += hwbm.o
 obj-$(CONFIG_NET_DEVLINK) += devlink.o
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 11fce17..46e8830 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -69,27 +69,17 @@
 	return 0;
 }
 
-static void update_classid(struct cgroup_subsys_state *css, void *v)
-{
-	struct css_task_iter it;
-	struct task_struct *p;
-
-	css_task_iter_start(css, &it);
-	while ((p = css_task_iter_next(&it))) {
-		task_lock(p);
-		iterate_fd(p->files, 0, update_classid_sock, v);
-		task_unlock(p);
-	}
-	css_task_iter_end(&it);
-}
-
 static void cgrp_attach(struct cgroup_taskset *tset)
 {
 	struct cgroup_subsys_state *css;
+	struct task_struct *p;
 
-	cgroup_taskset_first(tset, &css);
-	update_classid(css,
-		       (void *)(unsigned long)css_cls_state(css)->classid);
+	cgroup_taskset_for_each(p, css, tset) {
+		task_lock(p);
+		iterate_fd(p->files, 0, update_classid_sock,
+			   (void *)(unsigned long)css_cls_state(css)->classid);
+		task_unlock(p);
+	}
 }
 
 static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
@@ -101,12 +91,22 @@
 			 u64 value)
 {
 	struct cgroup_cls_state *cs = css_cls_state(css);
+	struct css_task_iter it;
+	struct task_struct *p;
 
 	cgroup_sk_alloc_disable();
 
 	cs->classid = (u32)value;
 
-	update_classid(css, (void *)(unsigned long)cs->classid);
+	css_task_iter_start(css, &it);
+	while ((p = css_task_iter_next(&it))) {
+		task_lock(p);
+		iterate_fd(p->files, 0, update_classid_sock,
+			   (void *)(unsigned long)cs->classid);
+		task_unlock(p);
+	}
+	css_task_iter_end(&it);
+
 	return 0;
 }
 
diff --git a/net/core/sock.c b/net/core/sock.c
index 87a740b..19562f7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1437,6 +1437,11 @@
 		pr_debug("%s: optmem leakage (%d bytes) detected\n",
 			 __func__, atomic_read(&sk->sk_omem_alloc));
 
+	if (sk->sk_frag.page) {
+		put_page(sk->sk_frag.page);
+		sk->sk_frag.page = NULL;
+	}
+
 	if (sk->sk_peer_cred)
 		put_cred(sk->sk_peer_cred);
 	put_pid(sk->sk_peer_pid);
@@ -1533,6 +1538,12 @@
 			is_charged = sk_filter_charge(newsk, filter);
 
 		if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
+			/* We need to make sure that we don't uncharge the new
+			 * socket if we couldn't charge it in the first place
+			 * as otherwise we uncharge the parent's filter.
+			 */
+			if (!is_charged)
+				RCU_INIT_POINTER(newsk->sk_filter, NULL);
 			/* It is still raw copy of parent, so invalidate
 			 * destructor and make plain sk_free() */
 			newsk->sk_destruct = NULL;
@@ -2741,11 +2752,6 @@
 
 	sk_refcnt_debug_release(sk);
 
-	if (sk->sk_frag.page) {
-		put_page(sk->sk_frag.page);
-		sk->sk_frag.page = NULL;
-	}
-
 	sock_put(sk);
 }
 EXPORT_SYMBOL(sk_common_release);
diff --git a/net/core/sockev_nlmcast.c b/net/core/sockev_nlmcast.c
new file mode 100644
index 0000000..04f61fc
--- /dev/null
+++ b/net/core/sockev_nlmcast.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2014-2015, 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Default SOCKEV client implementation
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/netlink.h>
+#include <linux/sockev.h>
+#include <net/sock.h>
+
+static int registration_status;
+static struct sock *socknlmsgsk;
+
+static void sockev_skmsg_recv(struct sk_buff *skb)
+{
+	pr_debug("%s(): Got unsolicited request\n", __func__);
+}
+
+static struct netlink_kernel_cfg nlcfg = {
+	.input = sockev_skmsg_recv
+};
+
+static void _sockev_event(unsigned long event, __u8 *evstr, int buflen)
+{
+	switch (event) {
+	case SOCKEV_SOCKET:
+		strlcpy(evstr, "SOCKEV_SOCKET", buflen);
+		break;
+	case SOCKEV_BIND:
+		strlcpy(evstr, "SOCKEV_BIND", buflen);
+		break;
+	case SOCKEV_LISTEN:
+		strlcpy(evstr, "SOCKEV_LISTEN", buflen);
+		break;
+	case SOCKEV_ACCEPT:
+		strlcpy(evstr, "SOCKEV_ACCEPT", buflen);
+		break;
+	case SOCKEV_CONNECT:
+		strlcpy(evstr, "SOCKEV_CONNECT", buflen);
+		break;
+	case SOCKEV_SHUTDOWN:
+		strlcpy(evstr, "SOCKEV_SHUTDOWN", buflen);
+		break;
+	default:
+		strlcpy(evstr, "UNKNOWN", buflen);
+	}
+}
+
+static int sockev_client_cb(struct notifier_block *nb,
+			    unsigned long event, void *data)
+{
+	struct sk_buff *skb;
+	struct nlmsghdr *nlh;
+	struct sknlsockevmsg *smsg;
+	struct socket *sock;
+
+	sock = (struct socket *)data;
+	if (socknlmsgsk == 0)
+		goto done;
+	if ((!socknlmsgsk) || (!sock) || (!sock->sk))
+		goto done;
+
+	if (sock->sk->sk_family != AF_INET && sock->sk->sk_family != AF_INET6)
+		goto done;
+
+	if (event != SOCKEV_BIND && event != SOCKEV_LISTEN)
+		goto done;
+
+	skb = nlmsg_new(sizeof(struct sknlsockevmsg), GFP_KERNEL);
+	if (!skb)
+		goto done;
+
+	nlh = nlmsg_put(skb, 0, 0, event, sizeof(struct sknlsockevmsg), 0);
+	if (!nlh) {
+		kfree_skb(skb);
+		goto done;
+	}
+
+	NETLINK_CB(skb).dst_group = SKNLGRP_SOCKEV;
+
+	smsg = nlmsg_data(nlh);
+	smsg->pid = current->pid;
+	_sockev_event(event, smsg->event, sizeof(smsg->event));
+	smsg->skfamily = sock->sk->sk_family;
+	smsg->skstate = sock->sk->sk_state;
+	smsg->skprotocol = sock->sk->sk_protocol;
+	smsg->sktype = sock->sk->sk_type;
+	smsg->skflags = sock->sk->sk_flags;
+
+	nlmsg_notify(socknlmsgsk, skb, 0, SKNLGRP_SOCKEV, 0, GFP_KERNEL);
+done:
+	return 0;
+}
+
+static struct notifier_block sockev_notifier_client = {
+	.notifier_call = sockev_client_cb,
+	.next = 0,
+	.priority = 0
+};
+
+/* ***************** Startup/Shutdown *************************************** */
+
+static int __init sockev_client_init(void)
+{
+	int rc;
+
+	registration_status = 1;
+	rc = sockev_register_notify(&sockev_notifier_client);
+	if (rc != 0) {
+		registration_status = 0;
+		pr_err("%s(): Failed to register cb (%d)\n", __func__, rc);
+	}
+	socknlmsgsk = netlink_kernel_create(&init_net, NETLINK_SOCKEV, &nlcfg);
+	if (!socknlmsgsk) {
+		pr_err("%s(): Failed to initialize netlink socket\n", __func__);
+		if (registration_status)
+			sockev_unregister_notify(&sockev_notifier_client);
+		registration_status = 0;
+	}
+
+	return rc;
+}
+
+static void __exit sockev_client_exit(void)
+{
+	if (registration_status)
+		sockev_unregister_notify(&sockev_notifier_client);
+}
+
+module_init(sockev_client_init)
+module_exit(sockev_client_exit)
+MODULE_LICENSE("GPL v2");
+
diff --git a/net/ipc_router/ipc_router_core.c b/net/ipc_router/ipc_router_core.c
index cdf372f..e057887 100644
--- a/net/ipc_router/ipc_router_core.c
+++ b/net/ipc_router/ipc_router_core.c
@@ -2798,6 +2798,9 @@
 	if (!port_ptr || !name)
 		return -EINVAL;
 
+	if (port_ptr->type != CLIENT_PORT)
+		return -EINVAL;
+
 	if (name->addrtype != MSM_IPC_ADDR_NAME)
 		return -EINVAL;
 
diff --git a/net/ipc_router/ipc_router_socket.c b/net/ipc_router/ipc_router_socket.c
index a84fc11..02242a1 100644
--- a/net/ipc_router/ipc_router_socket.c
+++ b/net/ipc_router/ipc_router_socket.c
@@ -543,10 +543,18 @@
 static int msm_ipc_router_close(struct socket *sock)
 {
 	struct sock *sk = sock->sk;
-	struct msm_ipc_port *port_ptr = msm_ipc_sk_port(sk);
+	struct msm_ipc_port *port_ptr;
 	int ret;
 
+	if (!sk)
+		return -EINVAL;
+
 	lock_sock(sk);
+	port_ptr = msm_ipc_sk_port(sk);
+	if (!port_ptr) {
+		release_sock(sk);
+		return -EINVAL;
+	}
 	ret = msm_ipc_router_close_port(port_ptr);
 	msm_ipc_unload_default_node(msm_ipc_sk(sk)->default_node_vote_info);
 	release_sock(sk);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 18412f9..98fd2f7 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1082,7 +1082,8 @@
 
 	net = sock_net(skb->sk);
 	nlh = nlmsg_hdr(skb);
-	if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len ||
+	if (skb->len < nlmsg_total_size(sizeof(*frn)) ||
+	    skb->len < nlh->nlmsg_len ||
 	    nlmsg_len(nlh) < sizeof(*frn))
 		return;
 
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 723059a..e074816 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5572,6 +5572,7 @@
 	struct inet_connection_sock *icsk = inet_csk(sk);
 
 	tcp_set_state(sk, TCP_ESTABLISHED);
+	icsk->icsk_ack.lrcvtime = tcp_time_stamp;
 
 	if (skb) {
 		icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
@@ -5790,7 +5791,6 @@
 			 * to stand against the temptation 8)     --ANK
 			 */
 			inet_csk_schedule_ack(sk);
-			icsk->icsk_ack.lrcvtime = tcp_time_stamp;
 			tcp_enter_quickack_mode(sk);
 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
 						  TCP_DELACK_MAX, TCP_RTO_MAX);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 6234eba..8615a6b 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -466,6 +466,7 @@
 		newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
 		minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
 		newicsk->icsk_rto = TCP_TIMEOUT_INIT;
+		newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
 
 		newtp->packets_out = 0;
 		newtp->retrans_out = 0;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 965ca86..0cfb91f 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -223,6 +223,7 @@
 	.accept_ra_rtr_pref	= 1,
 	.rtr_probe_interval	= 60 * HZ,
 #ifdef CONFIG_IPV6_ROUTE_INFO
+	.accept_ra_rt_info_min_plen = 0,
 	.accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
@@ -270,6 +271,7 @@
 	.accept_ra_rtr_pref	= 1,
 	.rtr_probe_interval	= 60 * HZ,
 #ifdef CONFIG_IPV6_ROUTE_INFO
+	.accept_ra_rt_info_min_plen = 0,
 	.accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
@@ -4963,6 +4965,7 @@
 	array[DEVCONF_RTR_PROBE_INTERVAL] =
 		jiffies_to_msecs(cnf->rtr_probe_interval);
 #ifdef CONFIG_IPV6_ROUTE_INFO
+	array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
 	array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
 #endif
 #endif
@@ -5939,6 +5942,13 @@
 	},
 #ifdef CONFIG_IPV6_ROUTE_INFO
 	{
+		.procname	= "accept_ra_rt_info_min_plen",
+		.data		= &ipv6_devconf.accept_ra_rt_info_min_plen,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
 		.procname	= "accept_ra_rt_info_max_plen",
 		.data		= &ipv6_devconf.accept_ra_rt_info_max_plen,
 		.maxlen		= sizeof(int),
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index d8e6714..01858ac 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1395,6 +1395,8 @@
 			if (ri->prefix_len == 0 &&
 			    !in6_dev->cnf.accept_ra_defrtr)
 				continue;
+			if (ri->prefix_len < in6_dev->cnf.accept_ra_rt_info_min_plen)
+				continue;
 			if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
 				continue;
 			rt6_route_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f6fbd25..26d5718 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1037,6 +1037,7 @@
 	ipc6.hlimit = -1;
 	ipc6.tclass = -1;
 	ipc6.dontfrag = -1;
+	sockc.tsflags = sk->sk_tsflags;
 
 	/* destination address check */
 	if (sin6) {
@@ -1157,7 +1158,6 @@
 
 	fl6.flowi6_mark = sk->sk_mark;
 	fl6.flowi6_uid = sk->sk_uid;
-	sockc.tsflags = sk->sk_tsflags;
 
 	if (msg->msg_controllen) {
 		opt = &opt_space;
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
index 3bf0c59..0f5628a 100644
--- a/net/netfilter/xt_qtaguid.c
+++ b/net/netfilter/xt_qtaguid.c
@@ -1814,8 +1814,11 @@
 }
 
 #ifdef DDEBUG
-/* This function is not in xt_qtaguid_print.c because of locks visibility */
-static void prdebug_full_state(int indent_level, const char *fmt, ...)
+/*
+ * This function is not in xt_qtaguid_print.c because of locks visibility.
+ * The lock of sock_tag_list must be aquired before calling this function
+ */
+static void prdebug_full_state_locked(int indent_level, const char *fmt, ...)
 {
 	va_list args;
 	char *fmt_buff;
@@ -1836,16 +1839,12 @@
 	kfree(buff);
 	va_end(args);
 
-	spin_lock_bh(&sock_tag_list_lock);
 	prdebug_sock_tag_tree(indent_level, &sock_tag_tree);
-	spin_unlock_bh(&sock_tag_list_lock);
 
-	spin_lock_bh(&sock_tag_list_lock);
 	spin_lock_bh(&uid_tag_data_tree_lock);
 	prdebug_uid_tag_data_tree(indent_level, &uid_tag_data_tree);
 	prdebug_proc_qtu_data_tree(indent_level, &proc_qtu_data_tree);
 	spin_unlock_bh(&uid_tag_data_tree_lock);
-	spin_unlock_bh(&sock_tag_list_lock);
 
 	spin_lock_bh(&iface_stat_list_lock);
 	prdebug_iface_stat_list(indent_level, &iface_stat_list);
@@ -1854,7 +1853,7 @@
 	pr_debug("qtaguid: %s(): }\n", __func__);
 }
 #else
-static void prdebug_full_state(int indent_level, const char *fmt, ...) {}
+static void prdebug_full_state_locked(int indent_level, const char *fmt, ...) {}
 #endif
 
 struct proc_ctrl_print_info {
@@ -1977,8 +1976,11 @@
 			   (u64)atomic64_read(&qtu_events.match_no_sk),
 			   (u64)atomic64_read(&qtu_events.match_no_sk_file));
 
-		/* Count the following as part of the last item_index */
-		prdebug_full_state(0, "proc ctrl");
+		/* Count the following as part of the last item_index. No need
+		 * to lock the sock_tag_list here since it is already locked when
+		 * starting the seq_file operation
+		 */
+		prdebug_full_state_locked(0, "proc ctrl");
 	}
 
 	return 0;
@@ -2887,8 +2889,10 @@
 
 	sock_tag_tree_erase(&st_to_free_tree);
 
-	prdebug_full_state(0, "%s(): pid=%u tgid=%u", __func__,
+	spin_lock_bh(&sock_tag_list_lock);
+	prdebug_full_state_locked(0, "%s(): pid=%u tgid=%u", __func__,
 			   current->pid, current->tgid);
+	spin_unlock_bh(&sock_tag_list_lock);
 	return 0;
 }
 
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index ae25ded..0792541 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -588,7 +588,7 @@
 			ipv4 = true;
 			break;
 		case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
-			SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
+			SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src,
 					nla_get_in6_addr(a), is_mask);
 			ipv6 = true;
 			break;
@@ -649,6 +649,8 @@
 			tun_flags |= TUNNEL_VXLAN_OPT;
 			opts_type = type;
 			break;
+		case OVS_TUNNEL_KEY_ATTR_PAD:
+			break;
 		default:
 			OVS_NLERR(log, "Unknown IP tunnel attribute %d",
 				  type);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index e2c37061..69502fa 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -486,7 +486,8 @@
 	struct ib_cq *sendcq, *recvcq;
 	int rc;
 
-	max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES);
+	max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge,
+			RPCRDMA_MAX_SEND_SGES);
 	if (max_sge < RPCRDMA_MIN_SEND_SGES) {
 		pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
 		return -ENOMEM;
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 6a0d485..c36757e 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -146,6 +146,7 @@
 	if (s) {
 		struct unix_sock *u = unix_sk(s);
 
+		BUG_ON(!atomic_long_read(&u->inflight));
 		BUG_ON(list_empty(&u->link));
 
 		if (atomic_long_dec_and_test(&u->inflight))
@@ -341,6 +342,14 @@
 	}
 	list_del(&cursor);
 
+	/* Now gc_candidates contains only garbage.  Restore original
+	 * inflight counters for these as well, and remove the skbuffs
+	 * which are creating the cycle(s).
+	 */
+	skb_queue_head_init(&hitlist);
+	list_for_each_entry(u, &gc_candidates, link)
+		scan_children(&u->sk, inc_inflight, &hitlist);
+
 	/* not_cycle_list contains those sockets which do not make up a
 	 * cycle.  Restore these to the inflight list.
 	 */
@@ -350,14 +359,6 @@
 		list_move_tail(&u->link, &gc_inflight_list);
 	}
 
-	/* Now gc_candidates contains only garbage.  Restore original
-	 * inflight counters for these as well, and remove the skbuffs
-	 * which are creating the cycle(s).
-	 */
-	skb_queue_head_init(&hitlist);
-	list_for_each_entry(u, &gc_candidates, link)
-	scan_children(&u->sk, inc_inflight, &hitlist);
-
 	spin_unlock(&unix_gc_lock);
 
 	/* Here we are. Hitlist is filled. Die. */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 034f70c..9ed6b0f 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -548,21 +548,17 @@
 {
 	int err;
 
-	rtnl_lock();
-
 	if (!cb->args[0]) {
 		err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
 				  nl80211_fam.attrbuf, nl80211_fam.maxattr,
 				  nl80211_policy);
 		if (err)
-			goto out_unlock;
+			return err;
 
 		*wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk),
 						   nl80211_fam.attrbuf);
-		if (IS_ERR(*wdev)) {
-			err = PTR_ERR(*wdev);
-			goto out_unlock;
-		}
+		if (IS_ERR(*wdev))
+			return PTR_ERR(*wdev);
 		*rdev = wiphy_to_rdev((*wdev)->wiphy);
 		/* 0 is the first index - add 1 to parse only once */
 		cb->args[0] = (*rdev)->wiphy_idx + 1;
@@ -572,10 +568,8 @@
 		struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
 		struct wireless_dev *tmp;
 
-		if (!wiphy) {
-			err = -ENODEV;
-			goto out_unlock;
-		}
+		if (!wiphy)
+			return -ENODEV;
 		*rdev = wiphy_to_rdev(wiphy);
 		*wdev = NULL;
 
@@ -586,21 +580,11 @@
 			}
 		}
 
-		if (!*wdev) {
-			err = -ENODEV;
-			goto out_unlock;
-		}
+		if (!*wdev)
+			return -ENODEV;
 	}
 
 	return 0;
- out_unlock:
-	rtnl_unlock();
-	return err;
-}
-
-static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev)
-{
-	rtnl_unlock();
 }
 
 /* IE validation */
@@ -2584,17 +2568,17 @@
 	int filter_wiphy = -1;
 	struct cfg80211_registered_device *rdev;
 	struct wireless_dev *wdev;
+	int ret;
 
 	rtnl_lock();
 	if (!cb->args[2]) {
 		struct nl80211_dump_wiphy_state state = {
 			.filter_wiphy = -1,
 		};
-		int ret;
 
 		ret = nl80211_dump_wiphy_parse(skb, cb, &state);
 		if (ret)
-			return ret;
+			goto out_unlock;
 
 		filter_wiphy = state.filter_wiphy;
 
@@ -2639,12 +2623,14 @@
 		wp_idx++;
 	}
  out:
-	rtnl_unlock();
-
 	cb->args[0] = wp_idx;
 	cb->args[1] = if_idx;
 
-	return skb->len;
+	ret = skb->len;
+ out_unlock:
+	rtnl_unlock();
+
+	return ret;
 }
 
 static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
@@ -4371,9 +4357,10 @@
 	int sta_idx = cb->args[2];
 	int err;
 
+	rtnl_lock();
 	err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
 	if (err)
-		return err;
+		goto out_err;
 
 	if (!wdev->netdev) {
 		err = -EINVAL;
@@ -4408,7 +4395,7 @@
 	cb->args[2] = sta_idx;
 	err = skb->len;
  out_err:
-	nl80211_finish_wdev_dump(rdev);
+	rtnl_unlock();
 
 	return err;
 }
@@ -5179,9 +5166,10 @@
 	int path_idx = cb->args[2];
 	int err;
 
+	rtnl_lock();
 	err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
 	if (err)
-		return err;
+		goto out_err;
 
 	if (!rdev->ops->dump_mpath) {
 		err = -EOPNOTSUPP;
@@ -5214,7 +5202,7 @@
 	cb->args[2] = path_idx;
 	err = skb->len;
  out_err:
-	nl80211_finish_wdev_dump(rdev);
+	rtnl_unlock();
 	return err;
 }
 
@@ -5374,9 +5362,10 @@
 	int path_idx = cb->args[2];
 	int err;
 
+	rtnl_lock();
 	err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
 	if (err)
-		return err;
+		goto out_err;
 
 	if (!rdev->ops->dump_mpp) {
 		err = -EOPNOTSUPP;
@@ -5409,7 +5398,7 @@
 	cb->args[2] = path_idx;
 	err = skb->len;
  out_err:
-	nl80211_finish_wdev_dump(rdev);
+	rtnl_unlock();
 	return err;
 }
 
@@ -7559,9 +7548,12 @@
 	int start = cb->args[2], idx = 0;
 	int err;
 
+	rtnl_lock();
 	err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
-	if (err)
+	if (err) {
+		rtnl_unlock();
 		return err;
+	}
 
 	wdev_lock(wdev);
 	spin_lock_bh(&rdev->bss_lock);
@@ -7584,7 +7576,7 @@
 	wdev_unlock(wdev);
 
 	cb->args[2] = idx;
-	nl80211_finish_wdev_dump(rdev);
+	rtnl_unlock();
 
 	return skb->len;
 }
@@ -7668,9 +7660,10 @@
 	int res;
 	bool radio_stats;
 
+	rtnl_lock();
 	res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
 	if (res)
-		return res;
+		goto out_err;
 
 	/* prepare_wdev_dump parsed the attributes */
 	radio_stats = nl80211_fam.attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS];
@@ -7711,7 +7704,7 @@
 	cb->args[2] = survey_idx;
 	res = skb->len;
  out_err:
-	nl80211_finish_wdev_dump(rdev);
+	rtnl_unlock();
 	return res;
 }
 
@@ -11302,17 +11295,13 @@
 	void *data = NULL;
 	unsigned int data_len = 0;
 
-	rtnl_lock();
-
 	if (cb->args[0]) {
 		/* subtract the 1 again here */
 		struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
 		struct wireless_dev *tmp;
 
-		if (!wiphy) {
-			err = -ENODEV;
-			goto out_unlock;
-		}
+		if (!wiphy)
+			return -ENODEV;
 		*rdev = wiphy_to_rdev(wiphy);
 		*wdev = NULL;
 
@@ -11333,13 +11322,11 @@
 			  nl80211_fam.attrbuf, nl80211_fam.maxattr,
 			  nl80211_policy);
 	if (err)
-		goto out_unlock;
+		return err;
 
 	if (!nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_ID] ||
-	    !nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) {
-		err = -EINVAL;
-		goto out_unlock;
-	}
+	    !nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD])
+		return -EINVAL;
 
 	*wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk),
 					   nl80211_fam.attrbuf);
@@ -11348,10 +11335,8 @@
 
 	*rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk),
 					   nl80211_fam.attrbuf);
-	if (IS_ERR(*rdev)) {
-		err = PTR_ERR(*rdev);
-		goto out_unlock;
-	}
+	if (IS_ERR(*rdev))
+		return PTR_ERR(*rdev);
 
 	vid = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_ID]);
 	subcmd = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD]);
@@ -11364,19 +11349,15 @@
 		if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd)
 			continue;
 
-		if (!vcmd->dumpit) {
-			err = -EOPNOTSUPP;
-			goto out_unlock;
-		}
+		if (!vcmd->dumpit)
+			return -EOPNOTSUPP;
 
 		vcmd_idx = i;
 		break;
 	}
 
-	if (vcmd_idx < 0) {
-		err = -EOPNOTSUPP;
-		goto out_unlock;
-	}
+	if (vcmd_idx < 0)
+		return -EOPNOTSUPP;
 
 	if (nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]) {
 		data = nla_data(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]);
@@ -11393,9 +11374,6 @@
 
 	/* keep rtnl locked in successful case */
 	return 0;
- out_unlock:
-	rtnl_unlock();
-	return err;
 }
 
 static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
@@ -11410,9 +11388,10 @@
 	int err;
 	struct nlattr *vendor_data;
 
+	rtnl_lock();
 	err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev);
 	if (err)
-		return err;
+		goto out;
 
 	vcmd_idx = cb->args[2];
 	data = (void *)cb->args[3];
@@ -11421,18 +11400,26 @@
 
 	if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV |
 			   WIPHY_VENDOR_CMD_NEED_NETDEV)) {
-		if (!wdev)
-			return -EINVAL;
+		if (!wdev) {
+			err = -EINVAL;
+			goto out;
+		}
 		if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV &&
-		    !wdev->netdev)
-			return -EINVAL;
+		    !wdev->netdev) {
+			err = -EINVAL;
+			goto out;
+		}
 
 		if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) {
 			if (wdev->netdev &&
-			    !netif_running(wdev->netdev))
-				return -ENETDOWN;
-			if (!wdev->netdev && !wdev->p2p_started)
-				return -ENETDOWN;
+			    !netif_running(wdev->netdev)) {
+				err = -ENETDOWN;
+				goto out;
+			}
+			if (!wdev->netdev && !wdev->p2p_started) {
+				err = -ENETDOWN;
+				goto out;
+			}
 		}
 	}
 
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 5bf7e1bf..e0437a7 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3062,6 +3062,11 @@
 {
 	int rv;
 
+	/* Initialize the per-net locks here */
+	spin_lock_init(&net->xfrm.xfrm_state_lock);
+	spin_lock_init(&net->xfrm.xfrm_policy_lock);
+	mutex_init(&net->xfrm.xfrm_cfg_mutex);
+
 	rv = xfrm_statistics_init(net);
 	if (rv < 0)
 		goto out_statistics;
@@ -3078,11 +3083,6 @@
 	if (rv < 0)
 		goto out;
 
-	/* Initialize the per-net locks here */
-	spin_lock_init(&net->xfrm.xfrm_state_lock);
-	spin_lock_init(&net->xfrm.xfrm_policy_lock);
-	mutex_init(&net->xfrm.xfrm_cfg_mutex);
-
 	return 0;
 
 out:
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 671a1d0..a7e27e1 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -412,7 +412,14 @@
 	up = nla_data(rp);
 	ulen = xfrm_replay_state_esn_len(up);
 
-	if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
+	/* Check the overall length and the internal bitmap length to avoid
+	 * potential overflow. */
+	if (nla_len(rp) < ulen ||
+	    xfrm_replay_state_esn_len(replay_esn) != ulen ||
+	    replay_esn->bmp_len != up->bmp_len)
+		return -EINVAL;
+
+	if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
 		return -EINVAL;
 
 	return 0;
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 4c93520..f3b1d7f 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1832,6 +1832,7 @@
 	     info->output_pool != client->pool->size)) {
 		if (snd_seq_write_pool_allocated(client)) {
 			/* remove all existing cells */
+			snd_seq_pool_mark_closing(client->pool);
 			snd_seq_queue_client_leave_cells(client->number);
 			snd_seq_pool_done(client->pool);
 		}
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 86240d0..3f4efcb 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -70,6 +70,9 @@
 		return;
 	*fifo = NULL;
 
+	if (f->pool)
+		snd_seq_pool_mark_closing(f->pool);
+
 	snd_seq_fifo_clear(f);
 
 	/* wake up clients if any */
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index dfa5156..5847c44 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -414,6 +414,18 @@
 	return 0;
 }
 
+/* refuse the further insertion to the pool */
+void snd_seq_pool_mark_closing(struct snd_seq_pool *pool)
+{
+	unsigned long flags;
+
+	if (snd_BUG_ON(!pool))
+		return;
+	spin_lock_irqsave(&pool->lock, flags);
+	pool->closing = 1;
+	spin_unlock_irqrestore(&pool->lock, flags);
+}
+
 /* remove events */
 int snd_seq_pool_done(struct snd_seq_pool *pool)
 {
@@ -424,10 +436,6 @@
 		return -EINVAL;
 
 	/* wait for closing all threads */
-	spin_lock_irqsave(&pool->lock, flags);
-	pool->closing = 1;
-	spin_unlock_irqrestore(&pool->lock, flags);
-
 	if (waitqueue_active(&pool->output_sleep))
 		wake_up(&pool->output_sleep);
 
@@ -484,6 +492,7 @@
 	*ppool = NULL;
 	if (pool == NULL)
 		return 0;
+	snd_seq_pool_mark_closing(pool);
 	snd_seq_pool_done(pool);
 	kfree(pool);
 	return 0;
diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
index 4a2ec77..32f959c 100644
--- a/sound/core/seq/seq_memory.h
+++ b/sound/core/seq/seq_memory.h
@@ -84,6 +84,7 @@
 int snd_seq_pool_init(struct snd_seq_pool *pool);
 
 /* done pool - free events */
+void snd_seq_pool_mark_closing(struct snd_seq_pool *pool);
 int snd_seq_pool_done(struct snd_seq_pool *pool);
 
 /* create pool */
diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c
index ab4cdab..79edd88 100644
--- a/sound/pci/ctxfi/cthw20k1.c
+++ b/sound/pci/ctxfi/cthw20k1.c
@@ -1905,7 +1905,7 @@
 		return err;
 
 	/* Set DMA transfer mask */
-	if (dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
+	if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
 		dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits));
 	} else {
 		dma_set_mask(&pci->dev, DMA_BIT_MASK(32));
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 0c62b1d..112caa2 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6058,6 +6058,8 @@
 		ALC295_STANDARD_PINS,
 		{0x17, 0x21014040},
 		{0x18, 0x21a19050}),
+	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+		ALC295_STANDARD_PINS),
 	SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC298_STANDARD_PINS,
 		{0x17, 0x90170110}),
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 7d505e2..94ea909 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -78,8 +78,7 @@
 	[snd_soc_dapm_dai_link] = 2,
 	[snd_soc_dapm_dai_in] = 4,
 	[snd_soc_dapm_dai_out] = 4,
-	[snd_soc_dapm_aif_in] = 4,
-	[snd_soc_dapm_aif_out] = 4,
+	[snd_soc_dapm_adc] = 4,
 	[snd_soc_dapm_mic] = 5,
 	[snd_soc_dapm_mux] = 6,
 	[snd_soc_dapm_demux] = 6,
@@ -88,7 +87,8 @@
 	[snd_soc_dapm_mixer] = 8,
 	[snd_soc_dapm_mixer_named_ctl] = 8,
 	[snd_soc_dapm_pga] = 9,
-	[snd_soc_dapm_adc] = 10,
+	[snd_soc_dapm_aif_in] = 9,
+	[snd_soc_dapm_aif_out] = 9,
 	[snd_soc_dapm_out_drv] = 11,
 	[snd_soc_dapm_hp] = 11,
 	[snd_soc_dapm_spk] = 11,
@@ -100,7 +100,9 @@
 static int dapm_down_seq[] = {
 	[snd_soc_dapm_pre] = 0,
 	[snd_soc_dapm_kcontrol] = 1,
-	[snd_soc_dapm_adc] = 2,
+	[snd_soc_dapm_aif_in] = 2,
+	[snd_soc_dapm_aif_out] = 2,
+	[snd_soc_dapm_adc] = 5,
 	[snd_soc_dapm_hp] = 3,
 	[snd_soc_dapm_spk] = 3,
 	[snd_soc_dapm_line] = 3,
@@ -114,8 +116,6 @@
 	[snd_soc_dapm_micbias] = 8,
 	[snd_soc_dapm_mux] = 9,
 	[snd_soc_dapm_demux] = 9,
-	[snd_soc_dapm_aif_in] = 10,
-	[snd_soc_dapm_aif_out] = 10,
 	[snd_soc_dapm_dai_in] = 10,
 	[snd_soc_dapm_dai_out] = 10,
 	[snd_soc_dapm_dai_link] = 11,
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 4f4d230..53f12d2 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1931,14 +1931,14 @@
 
 	dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
 
-	/* shutdown the BEs */
-	dpcm_be_dai_shutdown(fe, substream->stream);
-
 	dev_dbg(fe->dev, "ASoC: close FE %s\n", fe->dai_link->name);
 
 	/* now shutdown the frontend */
 	soc_pcm_close(substream);
 
+	/* shutdown the BEs */
+	dpcm_be_dai_shutdown(fe, substream->stream);
+
 	/* run the stream event for each BE */
 	dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
 
diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h
index 4144666..d5677d3 100644
--- a/tools/include/linux/log2.h
+++ b/tools/include/linux/log2.h
@@ -13,12 +13,6 @@
 #define _TOOLS_LINUX_LOG2_H
 
 /*
- * deal with unrepresentable constant logarithms
- */
-extern __attribute__((const, noreturn))
-int ____ilog2_NaN(void);
-
-/*
  * non-constant log of base 2 calculators
  * - the arch may override these in asm/bitops.h if they can be implemented
  *   more efficiently than using fls() and fls64()
@@ -78,7 +72,7 @@
 #define ilog2(n)				\
 (						\
 	__builtin_constant_p(n) ? (		\
-		(n) < 1 ? ____ilog2_NaN() :	\
+		(n) < 2 ? 0 :			\
 		(n) & (1ULL << 63) ? 63 :	\
 		(n) & (1ULL << 62) ? 62 :	\
 		(n) & (1ULL << 61) ? 61 :	\
@@ -141,10 +135,7 @@
 		(n) & (1ULL <<  4) ?  4 :	\
 		(n) & (1ULL <<  3) ?  3 :	\
 		(n) & (1ULL <<  2) ?  2 :	\
-		(n) & (1ULL <<  1) ?  1 :	\
-		(n) & (1ULL <<  0) ?  0 :	\
-		____ilog2_NaN()			\
-				   ) :		\
+		1 ) :				\
 	(sizeof(n) <= 4) ?			\
 	__ilog2_u32(n) :			\
 	__ilog2_u64(n)				\