Merge "msm: ADSPRPC: Fix for NULL pointer dereference"
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-jpeg.txt b/Documentation/devicetree/bindings/media/video/msm-cam-jpeg.txt
new file mode 100644
index 0000000..a2ed98e
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-jpeg.txt
@@ -0,0 +1,187 @@
+* Qualcomm Technologies, Inc. MSM Camera JPEG
+
+The MSM camera JPEG devices are implemented multiple device nodes.
+The root JPEG device node has properties defined to hint the driver
+about the number of Encoder and DMA nodes available during the
+probe sequence. Each node has multiple properties defined
+for interrupts, clocks and regulators.
+
+=======================
+Required Node Structure
+=======================
+JPEG root interface node takes care of the handling account for number
+of Encoder and DMA devices present on the hardware.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-jpeg".
+
+- compat-hw-name
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,jpegenc" or "qcom,jpegdma".
+
+- num-jpeg-enc
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported Encoder HW blocks.
+
+- num-jpeg-dma
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported DMA HW blocks.
+
+Example:
+	qcom,cam-jpeg {
+		compatible = "qcom,cam-jpeg";
+		compat-hw-name = "qcom,jpegenc",
+			"qcom,jpegdma";
+		num-jpeg-enc = <1>;
+		num-jpeg-dma = <1>;
+		status = "ok";
+	};
+
+
+=======================
+Required Node Structure
+=======================
+Encoder/DMA Nodes provide interface for JPEG driver about
+the device register map, interrupt map, clocks and regulators.
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Node instance number.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam_jpeg_enc".
+
+- reg-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the register resources.
+
+- reg
+  Usage: optional
+  Value type: <u32>
+  Definition: Register values.
+
+- reg-cam-base
+  Usage: optional
+  Value type: <u32>
+  Definition: Offset of the register space compared to
+              to Camera base register space.
+
+- interrupt-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: optional
+  Value type: <u32>
+  Definition: Interrupt associated with JPEG HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for JPEG HW.
+
+- camss-vdd-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed
+              in "regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for JPEG HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for JPEG HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- src-clock-name
+  Usage: required
+  Value type: <string>
+  Definition: Source clock name.
+
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition: List of strings corresponds clock-rates levels.
+  Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo.
+
+Examples:
+	cam_jpeg_enc: qcom,jpegenc@ac4e000 {
+		cell-index = <0>;
+		compatible = "qcom,cam_jpeg_enc";
+		reg-names = "jpege_hw";
+		reg = <0xac4e000 0x4000>;
+		reg-cam-base = <0x4e000>;
+		interrupt-names = "jpeg";
+		interrupts = <0 474 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"jpegenc_clk_src",
+			"jpegenc_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_JPEG_CLK_SRC>,
+			<&clock_camcc CAM_CC_JPEG_CLK>;
+
+		clock-rates = <0 0 0 0 0 600000000 0>;
+		src-clock-name = "jpegenc_clk_src";
+		clock-cntl-level = "nominal";
+		status = "ok";
+	};
+
+	cam_jpeg_dma: qcom,jpegdma@0xac52000{
+		cell-index = <0>;
+		compatible = "qcom,cam_jpeg_dma";
+		reg-names = "jpegdma_hw";
+		reg = <0xac52000 0x4000>;
+		reg-cam-base = <0x52000>;
+		interrupt-names = "jpegdma";
+		interrupts = <0 475 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"jpegdma_clk_src",
+			"jpegdma_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_JPEG_CLK_SRC>,
+			<&clock_camcc CAM_CC_JPEG_CLK>;
+
+		clock-rates = <0 0 0 0 0 600000000 0>;
+		src-clock-name = "jpegdma_clk_src";
+		clock-cntl-level = "nominal";
+		status = "ok";
+	};
+
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
index 441d771..6df71af 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -85,21 +85,6 @@
 		maximum charge current in mA for each thermal
 		level.
 
-- qcom,step-soc-thresholds
-  Usage:      optional
-  Value type: Array of <u32>
-  Definition: Array of SOC threshold values, size of 4. This should be a
-		flat array that denotes the percentage ranging from 0 to 100.
-		If the array is not present, step charging is disabled.
-
-- qcom,step-current-deltas
-  Usage:      optional
-  Value type: Array of <s32>
-  Definition: Array of delta values for charging current, size of 5, with
-		FCC as base.  This should be a flat array that denotes the
-		offset of charging current in uA, from -3100000 to 3200000.
-		If the array is not present, step charging is disabled.
-
 - io-channels
   Usage:      optional
   Value type: List of <phandle u32>
@@ -182,6 +167,18 @@
   Definition: Specifies the deglitch interval for OTG detection.
 		If the value is not present, 50 msec is used as default.
 
+- qcom,step-charging-enable
+  Usage:      optional
+  Value type: bool
+  Definition: Boolean flag which when present enables step-charging.
+
+- qcom,wd-bark-time-secs
+  Usage:      optional
+  Value type: <u32>
+  Definition: WD bark-timeout in seconds. The possible values are
+		16, 32, 64, 128. If not defined it defaults to 64.
+
+
 =============================================
 Second Level Nodes - SMB2 Charger Peripherals
 =============================================
@@ -217,9 +214,6 @@
 
 	dpdm-supply = <&qusb_phy0>;
 
-	qcom,step-soc-thresholds = <60 70 80 90>;
-	qcom,step-current-deltas = <500000 250000 150000 0 (-150000)>;
-
 	qcom,chgr@1000 {
 		reg = <0x1000 0x100>;
 		interrupts =    <0x2 0x10 0x0 IRQ_TYPE_NONE>,
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 7b491f3..4d05e50 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -2579,3 +2579,256 @@
 		qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
 					  "SpkrLeft", "SpkrRight";
 	};
+
+* SDM670 ASoC Machine driver
+
+Required properties:
+- compatible : "qcom,sdm670-asoc-snd"
+- qcom,model : The user-visible name of this sound card.
+- qcom,msm-hs-micbias-type : This property is used to recognize the headset
+  micbias type, internal or external.
+- qcom,msm-mbhc-hphl-swh: This property is used to distinguish headset HPHL
+switch type on target typically the switch type will be normally open or
+normally close, value for this property 0 for normally close and 1 for
+normally open.
+-  qcom,msm-mbhc-gnd-swh: This property is used to distinguish headset GND
+switch type on target typically the switch type will be normally open or
+normally close, value for this property 0 for normally close and 1 for
+normally open.
+- qcom,audio-routing : A list of the connections between audio components.
+- qcom,msm-gpios : Lists down all the gpio sets that are supported.
+- qcom,pinctrl-names : Lists all the possible combinations of the gpio sets
+mentioned in qcom,msm-gpios.
+- pinctrl-names : The combinations of gpio sets from above that are supported in
+the flavor.
+- pinctrl-# : Pinctrl states as mentioned in pinctrl-names.
+
+Optional properties:
+- qcom,cdc-us-euro-gpios : GPIO on which gnd/mic swap signal is coming.
+- qcom,msm-micbias1-ext-cap : Boolean. Enable micbias1 external
+capacitor mode.
+- qcom,msm-micbias2-ext-cap : Boolean. Enable micbias2 external
+capacitor mode.
+- qcom,wsa-disable : Boolean. Disables WSA speaker dailinks from sound node.
+- qcom,msm-spk-ext-pa : GPIO which enables external speaker pa.
+- qcom,msm-mclk-freq : This property is used to inform machine driver about
+mclk frequency needs to be configured for internal and external PA.
+- asoc-platform: This is phandle list containing the references to platform device
+		 nodes that are used as part of the sound card dai-links.
+- asoc-platform-names: This property contains list of platform names. The order of
+		       the platform names should match to that of the phandle order
+		       given in "asoc-platform".
+- asoc-cpu: This is phandle list containing the references to cpu dai device nodes
+	    that are used as part of the sound card dai-links.
+- asoc-cpu-names: This property contains list of cpu dai names. The order of the
+		  cpu dai names should match to that of the phandle order given.
+- asoc-codec: This is phandle list containing the references to codec dai device
+	nodes that are used as part of the sound card dai-links.
+- asoc-codec-names: This property contains list of codec dai names. The order of the
+	codec dai names should match to that of the phandle order given
+	in "asoc-codec".
+- qcom,wsa-max-devs : Maximum number of WSA881x devices present in the target
+- qcom,wsa-devs : List of phandles for all possible WSA881x devices supported for the target
+- qcom,wsa-aux-dev-prefix : Name prefix with Left/Right configuration for WSA881x device
+- qcom,cdc-pdm-gpios : phandle for pdm gpios.
+- qcom,cdc-comp-gpios : phandle for compander gpios.
+- qcom,cdc-dmic-gpios : phandle for Digital mic clk and data gpios.
+- qcom,cdc-sdw-gpios : phandle for soundwire clk and data gpios.
+- qcom,msm-mbhc-moist-cfg: This property is used to set moisture detection
+		threshold values for different codecs. First parameter is V(voltage)
+		second one is i(current), third one is r (resistance). Depending on the
+		codec set corresponding element in array and set others to 0.
+
+Example:
+	 sound {
+		compatible = "qcom,sdm670-asoc-snd";
+		qcom,model = "sdm670-snd-card";
+		qcom,msm-mclk-freq = <9600000>;
+		qcom,msm-mbhc-hphl-swh = <0>;
+		qcom,msm-mbhc-gnd-swh = <0>;
+		qcom,msm-hs-micbias-type = "internal";
+		qcom,msm-micbias1-ext-cap;
+		qcom,audio-routing =
+			"RX_BIAS", "MCLK",
+			"SPK_RX_BIAS", "MCLK",
+			"INT_LDO_H", "MCLK",
+			"MIC BIAS External", "Handset Mic",
+			"MIC BIAS Internal2", "Headset Mic",
+			"MIC BIAS External", "Secondary Mic",
+			"AMIC1", "MIC BIAS External",
+			"AMIC2", "MIC BIAS Internal2",
+			"AMIC3", "MIC BIAS External";
+		qcom,cdc-us-euro-gpios = <&msm_gpio 63 0>;
+		qcom,cdc-pdm-gpios = <&cdc_pdm_gpios>;
+		qcom,cdc-comp-gpios = <&cdc_comp_gpios>;
+		qcom,cdc-dmic-gpios = <&cdc_dmic_gpios>;
+		qcom,cdc-sdw-gpios = <&cdc_sdw_gpios>;
+		asoc-platform = <&pcm0>, <&pcm1>, <&voip>, <&voice>,
+				<&loopback>, <&compress>, <&hostless>,
+				<&afe>, <&lsm>, <&routing>, <&lpa>;
+		asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+				"msm-voip-dsp", "msm-pcm-voice", "msm-pcm-loopback",
+				"msm-compress-dsp", "msm-pcm-hostless", "msm-pcm-afe",
+				"msm-lsm-client", "msm-pcm-routing", "msm-pcm-lpa";
+		asoc-cpu = <&dai_pri_auxpcm>, <&dai_hdmi>,
+				<&dai_mi2s0>, <&dai_mi2s1>, <&dai_mi2s2>, <&dai_mi2s3>,
+				<&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
+				<&sb_3_rx>, <&sb_3_tx>, <&sb_4_rx>, <&sb_4_tx>,
+				<&bt_sco_rx>, <&bt_sco_tx>, <&int_fm_rx>, <&int_fm_tx>,
+				<&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>, <&afe_proxy_tx>,
+				<&incall_record_rx>, <&incall_record_tx>, <&incall_music_rx>,
+				<&incall_music_2_rx>;
+		asoc-cpu-names = "msm-dai-q6-auxpcm.1", "msm-dai-q6-hdmi.8",
+				"msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+				"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
+				"msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
+				"msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+				"msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+				"msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+				"msm-dai-q6-dev.12288", "msm-dai-q6-dev.12289",
+				"msm-dai-q6-dev.12292", "msm-dai-q6-dev.12293",
+				"msm-dai-q6-dev.224", "msm-dai-q6-dev.225",
+				"msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
+				"msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
+				"msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770";
+		asoc-codec = <&stub_codec>;
+		asoc-codec-names = "msm-stub-codec.1";
+		qcom,wsa-max-devs = <2>;
+		qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_212>,
+				<&wsa881x_213>, <&wsa881x_214>;
+		qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrLeft",
+					  "SpkrRight", "SpkrLeft";
+	};
+
+* SDM670 ASoC Slimbus Machine driver
+
+Required properties:
+- compatible : "qcom,sdm670-asoc-snd-tasha" for tasha codec,
+		"qcom,sdm670-asoc-snd-tavil" for tavil codec.
+- qcom,model : The user-visible name of this sound card.
+- qcom,msm-mclk-freq : MCLK frequency value for external codec
+- qcom,msm-gpios : Lists down all the gpio sets that are supported.
+- qcom,pinctrl-names : Lists all the possible combinations of the gpio sets
+mentioned in qcom,msm-gpios. Say we have 2^N combinations for N GPIOs,
+this would list all the 2^N combinations.
+- pinctrl-names : The combinations of gpio sets from above that are supported in
+the flavor. This can be sometimes same as qcom, pinctrl-names i.e with 2^N
+combinations or will have less incase if some combination is not supported.
+- pinctrl-# : Pinctrl states as mentioned in pinctrl-names.
+- qcom,audio-routing : A list of the connections between audio components.
+- asoc-platform: This is phandle list containing the references to platform device
+		 nodes that are used as part of the sound card dai-links.
+- asoc-platform-names: This property contains list of platform names. The order of
+		       the platform names should match to that of the phandle order
+		       given in "asoc-platform".
+- asoc-cpu: This is phandle list containing the references to cpu dai device nodes
+	    that are used as part of the sound card dai-links.
+- asoc-cpu-names: This property contains list of cpu dai names. The order of the
+		  cpu dai names should match to that of the phandle order given
+		  in "asoc-cpu". The cpu names are in the form of "%s.%d" form,
+		  where the id (%d) field represents the back-end AFE port id that
+		  this CPU dai is associated with.
+- asoc-codec: This is phandle list containing the references to codec dai device
+	      nodes that are used as part of the sound card dai-links.
+- asoc-codec-names: This property contains list of codec dai names. The order of the
+		    codec dai names should match to that of the phandle order given
+		    in "asoc-codec".
+Optional properties:
+- qcom,cdc-us-euro-gpios : GPIO on which gnd/mic swap signal is coming.
+- clock-names : clock name defined for external clock.
+- clocks : external clock defined for codec clock.
+- qcom,wsa-max-devs : Maximum number of WSA881x devices present in the target
+- qcom,wsa-devs : List of phandles for all possible WSA881x devices supported for the target
+- qcom,wsa-aux-dev-prefix : Name prefix with Left/Right configuration for WSA881x device
+
+Example:
+
+	sound-9335 {
+	compatible = "qcom,sdm670-asoc-snd-tasha";
+	qcom,model = "sdm670-tasha-snd-card";
+
+	qcom,audio-routing =
+		"RX_BIAS", "MCLK",
+		"LDO_H", "MCLK",
+		"AIF4 MAD", "MCLK",
+		"ultrasound amp", "LINEOUT1",
+		"ultrasound amp", "LINEOUT3",
+		"AMIC1", "MIC BIAS1 Internal1",
+		"MIC BIAS1 Internal1", "Handset Mic",
+		"AMIC2", "MIC BIAS2 External",
+		"MIC BIAS2 External", "Headset Mic",
+		"AMIC3", "MIC BIAS2 External",
+		"MIC BIAS2 External", "ANCRight Headset Mic",
+		"AMIC4", "MIC BIAS2 External",
+		"MIC BIAS2 External", "ANCLeft Headset Mic",
+		"DMIC1", "MIC BIAS1 External",
+		"MIC BIAS1 External", "Digital Mic1",
+		"DMIC2", "MIC BIAS1 External",
+		"MIC BIAS1 External", "Digital Mic2",
+		"DMIC3", "MIC BIAS3 External",
+		"MIC BIAS3 External", "Digital Mic3",
+		"DMIC4", "MIC BIAS3 External",
+		"MIC BIAS3 External", "Digital Mic4",
+		"DMIC5", "MIC BIAS4 External",
+		"MIC BIAS4 External", "Digital Mic5",
+		"DMIC6", "MIC BIAS4 External",
+		"MIC BIAS4 External", "Digital Mic6";
+
+		qcom,msm-mbhc-hphl-swh = <0>;
+		qcom,msm-mbhc-gnd-swh = <0>;
+		qcom,msm-mclk-freq = <9600000>;
+		qcom,msm-gpios =
+			"slim",
+			"us_eu_gpio";
+		qcom,pinctrl-names =
+			"all_off",
+			"slim_act",
+			"us_eu_gpio_act",
+			"slim_us_eu_gpio_act";
+		pinctrl-names =
+			"all_off",
+			"slim_act",
+			"us_eu_gpio_act",
+			"slim_us_eu_gpio_act";
+		pinctrl-0 = <&cdc_slim_lines_sus &cross_conn_det_sus>;
+		pinctrl-1 = <&cdc_slim_lines_act &cross_conn_det_sus>;
+		pinctrl-2 = <&cdc_slim_lines_sus &cross_conn_det_act>;
+		pinctrl-3 = <&cdc_slim_lines_act &cross_conn_det_act>;
+		qcom,cdc-us-euro-gpios = <&msm_gpio 63 0>;
+		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
+				<&loopback>, <&compress>, <&hostless>,
+				<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>;
+		asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+				"msm-pcm-dsp.2", "msm-voip-dsp",
+				"msm-pcm-voice", "msm-pcm-loopback",
+				"msm-compress-dsp", "msm-pcm-hostless",
+				"msm-pcm-afe", "msm-lsm-client",
+				"msm-pcm-routing", "msm-cpe-lsm",
+				"msm-compr-dsp";
+		asoc-cpu = <&dai_hdmi>,
+				<&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
+				<&sb_2_rx>, <&sb_2_tx>, <&sb_3_rx>, <&sb_3_tx>,
+				<&sb_4_rx>, <&sb_4_tx>, <&sb_5_tx>,
+				<&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
+				<&afe_proxy_tx>, <&incall_record_rx>,
+				<&incall_record_tx>, <&incall_music_rx>,
+				<&incall_music_2_rx>, <&sb_5_rx>;
+		asoc-cpu-names = "msm-dai-q6-hdmi.8",
+				"msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
+				"msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+				"msm-dai-q6-dev.16388", "msm-dai-q6-dev.16389",
+				"msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+				"msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+				"msm-dai-q6-dev.16395", "msm-dai-q6-dev.224",
+				"msm-dai-q6-dev.225", "msm-dai-q6-dev.241",
+				"msm-dai-q6-dev.240", "msm-dai-q6-dev.32771",
+				"msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773",
+				"msm-dai-q6-dev.32770", "msm-dai-q6-dev.16394";
+		asoc-codec = <&stub_codec>;
+		asoc-codec-names = "msm-stub-codec.1";
+		qcom,wsa-max-devs = <2>;
+		qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_212>,
+				<&wsa881x_213>, <&wsa881x_214>;
+		qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrLeft",
+					  "SpkrRight", "SpkrLeft";
+	};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
index 436a05d..bffcdf5 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
@@ -31,6 +31,8 @@
 		qcom,mdss-dsi-h-right-border = <0>;
 		qcom,mdss-dsi-v-top-border = <0>;
 		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-pan-physical-width-dimension = <74>;
+		qcom,mdss-pan-physical-height-dimension = <131>;
 		qcom,mdss-dsi-bpp = <24>;
 		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
 		qcom,mdss-dsi-underflow-color = <0xff>;
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
index 64e4d27..f860ea3 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
@@ -46,6 +46,8 @@
 		qcom,mdss-dsi-lane-3-state;
 		qcom,adjust-timer-wakeup-ms = <1>;
 		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-pan-physical-width-dimension = <74>;
+		qcom,mdss-pan-physical-height-dimension = <131>;
 		qcom,mdss-dsi-bl-max-level = <4095>;
 		qcom,mdss-dsi-dma-trigger = "trigger_sw";
 		qcom,mdss-dsi-mdp-trigger = "none";
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
index 346a8b4..23a96a4 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
@@ -202,6 +202,8 @@
 		qcom,mdss-dsi-dma-trigger = "trigger_sw";
 		qcom,mdss-dsi-mdp-trigger = "none";
 		qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+		qcom,mdss-pan-physical-width-dimension = <74>;
+		qcom,mdss-pan-physical-height-dimension = <131>;
 		qcom,mdss-dsi-tx-eot-append;
 
 		qcom,config-select = <&dsi_dual_nt35597_truly_video_config0>;
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
index c2b054a..6f66e8e 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
@@ -37,6 +37,8 @@
 		qcom,mdss-dsi-underflow-color = <0xff>;
 		qcom,mdss-dsi-border-color = <0>;
 		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-pan-physical-width-dimension = <64>;
+		qcom,mdss-pan-physical-height-dimension = <117>;
 		qcom,mdss-dsi-on-command = [
 			15 01 00 00 00 00 02 bb 10
 			15 01 00 00 00 00 02 b0 03
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
index 744bd2c..4562f8c 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
@@ -41,6 +41,8 @@
 		qcom,mdss-dsi-dma-trigger = "trigger_sw";
 		qcom,mdss-dsi-mdp-trigger = "none";
 		qcom,mdss-dsi-reset-sequence = <1 100>, <0 100>, <1 100>;
+		qcom,mdss-pan-physical-width-dimension = <71>;
+		qcom,mdss-pan-physical-height-dimension = <129>;
 		qcom,mdss-dsi-te-pin-select = <1>;
 		qcom,mdss-dsi-wr-mem-start = <0x2c>;
 		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
index b314e99..7954856 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
@@ -41,6 +41,8 @@
 		qcom,mdss-dsi-dma-trigger = "trigger_sw";
 		qcom,mdss-dsi-mdp-trigger = "none";
 		qcom,mdss-dsi-reset-sequence = <1 100>, <0 100>, <1 100>;
+		qcom,mdss-pan-physical-width-dimension = <71>;
+		qcom,mdss-pan-physical-height-dimension = <129>;
 		qcom,mdss-dsi-tx-eot-append;
 
 		qcom,adjust-timer-wakeup-ms = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index dcc5d1b..46d4aa6 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -1146,5 +1146,103 @@
 				};
 			};
 		};
+		/* SDC pin type */
+		sdc1_clk_on: sdc1_clk_on {
+			config {
+				pins = "sdc1_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc1_clk_off: sdc1_clk_off {
+			config {
+				pins = "sdc1_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc1_cmd_on: sdc1_cmd_on {
+			config {
+				pins = "sdc1_cmd";
+				bias-pull-up;		/* pull up */
+				drive-strength = <10>;	/* 10 MA */
+			};
+		};
+
+		sdc1_cmd_off: sdc1_cmd_off {
+			config {
+				pins = "sdc1_cmd";
+				num-grp-pins = <1>;
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc1_data_on: sdc1_data_on {
+			config {
+				pins = "sdc1_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <10>;	/* 10 MA */
+			};
+		};
+
+		sdc1_data_off: sdc1_data_off {
+			config {
+				pins = "sdc1_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc2_clk_on: sdc2_clk_on {
+			config {
+				pins = "sdc2_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc2_clk_off: sdc2_clk_off {
+			config {
+				pins = "sdc2_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc2_cmd_on: sdc2_cmd_on {
+			config {
+				pins = "sdc2_cmd";
+				bias-pull-up;		/* pull up */
+				drive-strength = <10>;	/* 10 MA */
+			};
+		};
+
+		sdc2_cmd_off: sdc2_cmd_off {
+			config {
+				pins = "sdc2_cmd";
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc2_data_on: sdc2_data_on {
+			config {
+				pins = "sdc2_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <10>;	/* 10 MA */
+			};
+		};
+
+		sdc2_data_off: sdc2_data_off {
+			config {
+				pins = "sdc2_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
index 17b90c7..f2f41fd 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
@@ -77,3 +77,24 @@
 	spm-level = <0>;
 	status = "ok";
 };
+
+&sdhc_1 {
+	vdd-supply = <&pm660l_l4>;
+	qcom,vdd-voltage-level = <2960000 2960000>;
+	qcom,vdd-current-level = <200 570000>;
+
+	vdd-io-supply = <&pm660_l8>;
+	qcom,vdd-io-always-on;
+	qcom,vdd-io-lpm-sup;
+	qcom,vdd-io-voltage-level = <1800000 1800000>;
+	qcom,vdd-io-current-level = <200 325000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc1_clk_on  &sdc1_cmd_on &sdc1_data_on>;
+	pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off>;
+
+	qcom,clk-rates = <400000 20000000 25000000 50000000>;
+	qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 7d9702e..3bef777 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -30,6 +30,7 @@
 
 	aliases {
 		ufshc1 = &ufshc_mem; /* Embedded UFS slot */
+		sdhc1 = &sdhc_1; /* SDC1 eMMC slot */
 	};
 
 	aliases {
@@ -1092,7 +1093,7 @@
 		qcom,mpu-enabled;
 	};
 
-	qmp_aop: mailbox@1799000c {
+	qmp_aop: qcom,qmp-aop@c300000 {
 		compatible = "qcom,qmp-mbox";
 		label = "aop";
 		reg = <0xc300000 0x100000>,
@@ -1100,6 +1101,7 @@
 		reg-names = "msgram", "irq-reg-base";
 		qcom,irq-mask = <0x1>;
 		interrupts = <0 389 1>;
+		priority = <0>;
 		mbox-desc-offset = <0x0>;
 		#mbox-cells = <1>;
 	};
@@ -1792,6 +1794,27 @@
 		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_5_out 0 0>;
 		status = "ok";
 	};
+
+	sdhc_1: sdhci@7c4000 {
+		compatible = "qcom,sdhci-msm-v5";
+		reg = <0x7C4000 0x1000>, <0x7C5000 0x1000>;
+		reg-names = "hc_mem", "cmdq_mem";
+
+		interrupts = <0 641 0>, <0 644 0>;
+		interrupt-names = "hc_irq", "pwr_irq";
+
+		qcom,bus-width = <8>;
+		qcom,large-address-bus;
+
+		clocks = <&clock_gcc GCC_SDCC1_AHB_CLK>,
+			<&clock_gcc GCC_SDCC1_APPS_CLK>;
+		clock-names = "iface_clk", "core_clk";
+
+		qcom,nonremovable;
+
+		qcom,scaling-lower-bus-speed-mode = "DDR52";
+		status = "disabled";
+	};
 };
 
 #include "sdm670-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index 3f19890..3fa0ab3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -246,6 +246,23 @@
 			};
 		};
 
+		msm_cam_smmu_jpeg {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_smmu 0x1060 0x8>,
+				<&apps_smmu 0x1068 0x8>;
+			label = "jpeg";
+			jpeg_iova_mem_map: iova-mem-map {
+				/* IO region is approximately 3.4 GB */
+				iova-mem-region-io {
+					iova-region-name = "io";
+					iova-region-start = <0x7400000>;
+					iova-region-len = <0xd8c00000>;
+					iova-region-id = <0x3>;
+					status = "ok";
+				};
+			};
+		};
+
 		msm_cam_icp_fw {
 			compatible = "qcom,msm-cam-smmu-fw-dev";
 			label="icp";
@@ -398,7 +415,7 @@
 			"csid0", "csid1", "csid2",
 			"ife0", "ife1", "ife2", "ipe0",
 			"ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
-			"icp0", "jpeg-dma0", "jpeg0", "fd0";
+			"icp0", "jpeg-dma0", "jpeg-enc0", "fd0";
 		client-axi-port-names =
 			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
 			"cam_hf_1", "cam_hf_2", "cam_hf_2",
@@ -491,8 +508,8 @@
 		label = "cam-cdm-intf";
 		num-hw-cdm = <1>;
 		cdm-client-names = "vfe",
-			"jpeg-dma",
-			"jpeg",
+			"jpegdma",
+			"jpegenc",
 			"fd";
 		status = "ok";
 	};
@@ -875,4 +892,76 @@
 		clock-cntl-level = "turbo";
 		status = "ok";
 	};
+
+	qcom,cam-jpeg {
+		compatible = "qcom,cam-jpeg";
+		compat-hw-name = "qcom,jpegenc",
+			"qcom,jpegdma";
+		num-jpeg-enc = <1>;
+		num-jpeg-dma = <1>;
+		status = "ok";
+	};
+
+	cam_jpeg_enc: qcom,jpegenc@ac4e000 {
+		cell-index = <0>;
+		compatible = "qcom,cam_jpeg_enc";
+		reg-names = "jpege_hw";
+		reg = <0xac4e000 0x4000>;
+		reg-cam-base = <0x4e000>;
+		interrupt-names = "jpeg";
+		interrupts = <0 474 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"jpegenc_clk_src",
+			"jpegenc_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_JPEG_CLK_SRC>,
+			<&clock_camcc CAM_CC_JPEG_CLK>;
+
+		clock-rates = <0 0 0 0 0 600000000 0>;
+		src-clock-name = "jpegenc_clk_src";
+		clock-cntl-level = "nominal";
+		status = "ok";
+	};
+
+	cam_jpeg_dma: qcom,jpegdma@0xac52000{
+		cell-index = <0>;
+		compatible = "qcom,cam_jpeg_dma";
+		reg-names = "jpegdma_hw";
+		reg = <0xac52000 0x4000>;
+		reg-cam-base = <0x52000>;
+		interrupt-names = "jpegdma";
+		interrupts = <0 475 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"jpegdma_clk_src",
+			"jpegdma_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_JPEG_CLK_SRC>,
+			<&clock_camcc CAM_CC_JPEG_CLK>;
+
+		clock-rates = <0 0 0 0 0 600000000 0>;
+		src-clock-name = "jpegdma_clk_src";
+		clock-cntl-level = "nominal";
+		status = "ok";
+	};
+
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index b826768..03b9e06 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -453,7 +453,7 @@
 	};
 
 	apc1_cpr: cprh-ctrl@17db0000 {
-		compatible = "qcom,cprh-sdm845-kbss-regulator";
+		compatible = "qcom,cprh-sdm845-v1-kbss-regulator";
 		reg =	<0x17db0000 0x4000>,
 			<0x00784000 0x1000>,
 			<0x17830000 0x1000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index c3fee80..b8aeb87 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -26,6 +26,418 @@
 	/delete-property/ qcom,sdr104-wa;
 };
 
+/delete-node/ &apc0_cpr;
+/delete-node/ &apc1_cpr;
+
+&soc {
+	/* CPR controller regulators */
+	apc0_cpr: cprh-ctrl@17dc0000 {
+		compatible = "qcom,cprh-sdm845-v2-kbss-regulator";
+		reg =	<0x17dc0000 0x4000>,
+			<0x00784000 0x1000>,
+			<0x17840000 0x1000>;
+		reg-names = "cpr_ctrl", "fuse_base", "saw";
+		clocks = <&clock_gcc GCC_CPUSS_RBCPR_CLK>;
+		clock-names = "core_clk";
+		qcom,cpr-ctrl-name = "apc0";
+		qcom,cpr-controller-id = <0>;
+
+		qcom,cpr-sensor-time = <1000>;
+		qcom,cpr-loop-time = <5000000>;
+		qcom,cpr-idle-cycles = <15>;
+		qcom,cpr-up-down-delay-time = <3000>;
+		qcom,cpr-step-quot-init-min = <11>;
+		qcom,cpr-step-quot-init-max = <12>;
+		qcom,cpr-count-mode = <0>;		/* All at once */
+		qcom,cpr-count-repeat = <20>;
+		qcom,cpr-down-error-step-limit = <1>;
+		qcom,cpr-up-error-step-limit = <1>;
+		qcom,cpr-corner-switch-delay-time = <1042>;
+		qcom,cpr-voltage-settling-time = <1760>;
+		qcom,cpr-reset-step-quot-loop-en;
+
+		qcom,voltage-step = <4000>;
+		qcom,voltage-base = <352000>;
+		qcom,cpr-saw-use-unit-mV;
+
+		qcom,saw-avs-ctrl = <0x101C031>;
+		qcom,saw-avs-limit = <0x3B803B8>;
+
+		qcom,cpr-enable;
+		qcom,cpr-hw-closed-loop;
+
+		qcom,cpr-panic-reg-addr-list =
+			<0x17dc3a84 0x17dc3a88 0x17840c18>;
+		qcom,cpr-panic-reg-name-list =
+			"APSS_SILVER_CPRH_STATUS_0",
+			"APSS_SILVER_CPRH_STATUS_1",
+			"SILVER_SAW4_PMIC_STS";
+
+		qcom,cpr-aging-ref-voltage = <952000>;
+		vdd-supply = <&pm8998_s13>;
+
+		thread@0 {
+			qcom,cpr-thread-id = <0>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <0>;
+			qcom,cpr-up-threshold = <2>;
+			qcom,cpr-down-threshold = <2>;
+
+			apc0_pwrcl_vreg: regulator {
+				regulator-name = "apc0_pwrcl_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <18>;
+
+				qcom,cpr-fuse-corners = <4>;
+				qcom,cpr-fuse-combos = <16>;
+				qcom,cpr-speed-bins = <2>;
+				qcom,cpr-speed-bin-corners = <18 18>;
+				qcom,cpr-corners = <18>;
+
+				qcom,cpr-corner-fmax-map = <6 12 15 18>;
+
+				qcom,cpr-voltage-ceiling =
+					<828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 884000  952000  952000>;
+
+				qcom,cpr-voltage-floor =
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  40000  40000>;
+
+				qcom,corner-frequencies =
+					<300000000  403200000  480000000
+					 576000000  652800000  748800000
+					 825600000  902400000  979200000
+					1056000000 1132800000 1228800000
+					1324800000 1420800000 1516800000
+					1612800000 1689600000 1766400000>;
+
+				qcom,cpr-ro-scaling-factor =
+					<2594 2795 2576 2761 2469 2673 2198
+					 2553 3188 3255 3191 2962 3055 2984
+					 2043 2947>,
+					<2594 2795 2576 2761 2469 2673 2198
+					 2553 3188 3255 3191 2962 3055 2984
+					 2043 2947>,
+					<2259 2389 2387 2531 2294 2464 2218
+					 2476 2525 2855 2817 2836 2740 2490
+					 1950 2632>,
+					<2259 2389 2387 2531 2294 2464 2218
+					 2476 2525 2855 2817 2836 2740 2490
+					 1950 2632>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<100000 100000 100000 100000>;
+
+				qcom,cpr-closed-loop-voltage-fuse-adjustment =
+					<100000 100000 100000 100000>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,allow-quotient-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+				qcom,cpr-aging-max-voltage-adjustment = <15000>;
+				qcom,cpr-aging-ref-corner = <18>;
+				qcom,cpr-aging-ro-scaling-factor = <1620>;
+				qcom,allow-aging-voltage-adjustment =
+					/* Speed bin 0 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 1 */
+					<0 1 1 1 1 1 1 1>;
+				qcom,allow-aging-open-loop-voltage-adjustment =
+					<1>;
+			};
+		};
+
+		thread@1 {
+			qcom,cpr-thread-id = <1>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <0>;
+			qcom,cpr-up-threshold = <2>;
+			qcom,cpr-down-threshold = <2>;
+
+			apc0_l3_vreg: regulator {
+				regulator-name = "apc0_l3_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <14>;
+
+				qcom,cpr-fuse-corners = <4>;
+				qcom,cpr-fuse-combos = <16>;
+				qcom,cpr-speed-bins = <2>;
+				qcom,cpr-speed-bin-corners = <14 14>;
+				qcom,cpr-corners = <14>;
+
+				qcom,cpr-corner-fmax-map = <4 8 11 14>;
+
+				qcom,cpr-voltage-ceiling =
+					<828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  884000  884000  952000>;
+
+				qcom,cpr-voltage-floor =
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  40000>;
+
+				qcom,corner-frequencies =
+					<300000000  403200000  480000000
+					 576000000  652800000  748800000
+					 844800000  940800000 1036800000
+					1132800000 1209600000 1305600000
+					1401600000 1478400000>;
+
+				qcom,cpr-ro-scaling-factor =
+					<2857 3056 2828 2952 2699 2796 2447
+					 2631 2630 2579 2244 3343 3287 3137
+					 3164 2656>,
+					<2857 3056 2828 2952 2699 2796 2447
+					 2631 2630 2579 2244 3343 3287 3137
+					 3164 2656>,
+					<2439 2577 2552 2667 2461 2577 2394
+					 2536 2132 2307 2191 2903 2838 2912
+					 2501 2095>,
+					<2439 2577 2552 2667 2461 2577 2394
+					 2536 2132 2307 2191 2903 2838 2912
+					 2501 2095>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<100000 100000 100000 100000>;
+
+				qcom,cpr-closed-loop-voltage-fuse-adjustment =
+					<100000 100000 100000 100000>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,allow-quotient-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+				qcom,cpr-aging-max-voltage-adjustment = <15000>;
+				qcom,cpr-aging-ref-corner = <14>;
+				qcom,cpr-aging-ro-scaling-factor = <1620>;
+				qcom,allow-aging-voltage-adjustment =
+					/* Speed bin 0 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 1 */
+					<0 1 1 1 1 1 1 1>;
+				qcom,allow-aging-open-loop-voltage-adjustment =
+					<1>;
+			};
+		};
+	};
+
+	apc1_cpr: cprh-ctrl@17db0000 {
+		compatible = "qcom,cprh-sdm845-v2-kbss-regulator";
+		reg =	<0x17db0000 0x4000>,
+			<0x00784000 0x1000>,
+			<0x17830000 0x1000>;
+		reg-names = "cpr_ctrl", "fuse_base", "saw";
+		clocks = <&clock_gcc GCC_CPUSS_RBCPR_CLK>;
+		clock-names = "core_clk";
+		qcom,cpr-ctrl-name = "apc1";
+		qcom,cpr-controller-id = <1>;
+
+		qcom,cpr-sensor-time = <1000>;
+		qcom,cpr-loop-time = <5000000>;
+		qcom,cpr-idle-cycles = <15>;
+		qcom,cpr-up-down-delay-time = <3000>;
+		qcom,cpr-step-quot-init-min = <9>;
+		qcom,cpr-step-quot-init-max = <14>;
+		qcom,cpr-count-mode = <0>;		/* All at once */
+		qcom,cpr-count-repeat = <20>;
+		qcom,cpr-down-error-step-limit = <1>;
+		qcom,cpr-up-error-step-limit = <1>;
+		qcom,cpr-corner-switch-delay-time = <1042>;
+		qcom,cpr-voltage-settling-time = <1760>;
+		qcom,cpr-reset-step-quot-loop-en;
+
+		qcom,apm-threshold-voltage = <800000>;
+		qcom,apm-crossover-voltage = <880000>;
+		qcom,mem-acc-threshold-voltage = <852000>;
+		qcom,mem-acc-crossover-voltage = <852000>;
+
+		qcom,voltage-step = <4000>;
+		qcom,voltage-base = <352000>;
+		qcom,cpr-saw-use-unit-mV;
+
+		qcom,saw-avs-ctrl = <0x101C031>;
+		qcom,saw-avs-limit = <0x4700470>;
+
+		qcom,cpr-enable;
+		qcom,cpr-hw-closed-loop;
+
+		qcom,cpr-panic-reg-addr-list =
+			<0x17db3a84 0x17830c18>;
+		qcom,cpr-panic-reg-name-list =
+			"APSS_GOLD_CPRH_STATUS_0", "GOLD_SAW4_PMIC_STS";
+
+		qcom,cpr-aging-ref-voltage = <1136000>;
+		vdd-supply = <&pm8998_s12>;
+
+		thread@0 {
+			qcom,cpr-thread-id = <0>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <0>;
+			qcom,cpr-up-threshold = <2>;
+			qcom,cpr-down-threshold = <2>;
+
+			apc1_perfcl_vreg: regulator {
+				regulator-name = "apc1_perfcl_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <33>;
+
+				qcom,cpr-fuse-corners = <5>;
+				qcom,cpr-fuse-combos = <16>;
+				qcom,cpr-speed-bins = <2>;
+				qcom,cpr-speed-bin-corners = <28 31>;
+				qcom,cpr-corners =
+					/* Speed bin 0 */
+					<28 28 28 28 28 28 28 28>,
+					/* Speed bin 1 */
+					<31 31 31 31 31 31 31 31>;
+
+				qcom,cpr-corner-fmax-map =
+					/* Speed bin 0 */
+					<7 14 22 27 28>,
+					/* Speed bin 1 */
+					<7 14 22 27 31>;
+
+				qcom,cpr-voltage-ceiling =
+					/* Speed bin 0 */
+					<828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  828000  884000  884000
+					 884000  884000 1104000 1104000 1104000
+					1104000 1136000 1136000>,
+					/* Speed bin 1 */
+					<828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  828000  884000  884000
+					 884000  884000 1104000 1104000 1104000
+					1104000 1136000 1136000 1136000 1136000
+					1136000>;
+
+				qcom,cpr-voltage-floor =
+					/* Speed bin 0 */
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000>,
+					/* Speed bin 1 */
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+					/* Speed bin 0 */
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000>,
+					/* Speed bin 1 */
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  40000  40000  40000
+					 40000>;
+
+				qcom,corner-frequencies =
+					/* Speed bin 0 */
+					<300000000  403200000  480000000
+					 576000000  652800000  748800000
+					 825600000  902400000  979200000
+					1056000000 1132800000 1209600000
+					1286400000 1363200000 1459200000
+					1536000000 1612800000 1689600000
+					1766400000 1843200000 1920000000
+					1996800000 2092800000 2169600000
+					2246400000 2323200000 2400000000
+					2400000000>,
+					/* Speed bin 1 */
+					<300000000  403200000  480000000
+					 576000000  652800000  748800000
+					 825600000  902400000  979200000
+					1056000000 1132800000 1209600000
+					1286400000 1363200000 1459200000
+					1536000000 1612800000 1689600000
+					1766400000 1843200000 1920000000
+					1996800000 2092800000 2169600000
+					2246400000 2323200000 2400000000
+					2476800000 2553600000 2630400000
+					2707200000>;
+
+				qcom,cpr-ro-scaling-factor =
+					<2857 3056 2828 2952 2699 2796 2447
+					 2631 2630 2579 2244 3343 3287 3137
+					 3164 2656>,
+					<2857 3056 2828 2952 2699 2796 2447
+					 2631 2630 2579 2244 3343 3287 3137
+					 3164 2656>,
+					<2086 2208 2273 2408 2203 2327 2213
+					 2340 1755 2039 2049 2474 2437 2618
+					 2003 1675>,
+					<2086 2208 2273 2408 2203 2327 2213
+					 2340 1755 2039 2049 2474 2437 2618
+					 2003 1675>,
+					<2086 2208 2273 2408 2203 2327 2213
+					 2340 1755 2039 2049 2474 2437 2618
+					 2003 1675>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<100000 100000 100000 100000 100000>;
+
+				qcom,cpr-closed-loop-voltage-fuse-adjustment =
+					<100000 100000 100000 100000 100000>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,allow-quotient-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+				qcom,cpr-aging-max-voltage-adjustment = <15000>;
+				qcom,cpr-aging-ref-corner = <27 31>;
+				qcom,cpr-aging-ro-scaling-factor = <1700>;
+				qcom,allow-aging-voltage-adjustment =
+					/* Speed bin 0 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 1 */
+					<0 1 1 1 1 1 1 1>;
+				qcom,allow-aging-open-loop-voltage-adjustment =
+					<1>;
+			};
+		};
+	};
+};
+
+&clock_cpucc {
+	vdd-l3-supply = <&apc0_l3_vreg>;
+	vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
+};
+
 &clock_gcc {
 	compatible = "qcom,gcc-sdm845-v2";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 202df95..f408719 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -960,7 +960,6 @@
 		clock-names = "devfreq_clk";
 		clocks = <&clock_cpucc L3_CLUSTER0_VOTE_CLK>;
 		governor = "performance";
-		qcom,prepare-clk;
 	};
 
 	l3_cpu4: qcom,l3-cpu4 {
@@ -968,7 +967,6 @@
 		clock-names = "devfreq_clk";
 		clocks = <&clock_cpucc L3_CLUSTER1_VOTE_CLK>;
 		governor = "performance";
-		qcom,prepare-clk;
 	};
 
 	devfreq_l3lat_0: qcom,cpu0-l3lat-mon {
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 4e0dfd1..031ba29 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1399,6 +1399,14 @@
 
 	if (fl->profile)
 		getnstimeofday(&invoket);
+
+	VERIFY(err, fl->sctx != NULL);
+	if (err)
+		goto bail;
+	VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
+	if (err)
+		goto bail;
+
 	if (!kernel) {
 		VERIFY(err, 0 == context_restore_interrupted(fl, inv,
 								&ctx));
@@ -2172,6 +2180,9 @@
 		kref_init(&me->channel[cid].kref);
 		pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
 						MAJOR(me->dev_no), cid);
+		err = glink_queue_rx_intent(me->channel[cid].chan, NULL, 64);
+		if (err)
+			pr_info("adsprpc: initial intent failed for %d\n", cid);
 		if (me->channel[cid].ssrcount !=
 				 me->channel[cid].prevssrcount) {
 			me->channel[cid].prevssrcount =
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index e3fe064..0441451 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -100,7 +100,7 @@
 #define VERIFY(err, val) \
 do {\
 	VERIFY_IPRINTF(__FILE_LINE__"info: calling: " #val "\n");\
-	if (0 == (val)) {\
+	if ((val) == 0) {\
 		(err) = (err) == 0 ? -1 : (err);\
 		VERIFY_EPRINTF(__FILE_LINE__"error: %d: " #val "\n", (err));\
 	} else {\
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index 47e9fab..1e49722 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -45,6 +45,7 @@
 #include "clk-debug.h"
 
 #define OSM_INIT_RATE			300000000UL
+#define XO_RATE				19200000UL
 #define OSM_TABLE_SIZE			40
 #define SINGLE_CORE			1
 #define MAX_CLUSTER_CNT			3
@@ -450,6 +451,7 @@
 }
 
 static bool is_v2;
+static bool osm_tz_enabled;
 
 static inline struct clk_osm *to_clk_osm(struct clk_hw *_hw)
 {
@@ -544,23 +546,12 @@
 
 static int clk_osm_search_table(struct osm_entry *table, int entries, long rate)
 {
-	int quad_core_index, single_core_index = 0;
-	int core_count;
+	int index = 0;
 
-	for (quad_core_index = 0; quad_core_index < entries;
-						quad_core_index++) {
-		core_count = CORE_COUNT_VAL(table[quad_core_index].freq_data);
-		if (rate == table[quad_core_index].frequency &&
-					core_count == SINGLE_CORE) {
-			single_core_index = quad_core_index;
-			continue;
-		}
-		if (rate == table[quad_core_index].frequency &&
-					core_count == MAX_CORE_COUNT)
-			return quad_core_index;
+	for (index = 0; index < entries; index++) {
+		if (rate == table[index].frequency)
+			return index;
 	}
-	if (single_core_index)
-		return single_core_index;
 
 	return -EINVAL;
 }
@@ -642,7 +633,7 @@
 }
 
 
-const struct clk_ops clk_ops_l3_osm = {
+static struct clk_ops clk_ops_l3_osm = {
 	.enable = clk_osm_enable,
 	.round_rate = clk_osm_round_rate,
 	.list_rate = clk_osm_list_rate,
@@ -2107,6 +2098,49 @@
 	return rc;
 }
 
+static int clk_osm_read_lut(struct platform_device *pdev, struct clk_osm *c)
+{
+	u32 data, src, lval, i, j = OSM_TABLE_SIZE;
+
+	for (i = 0; i < OSM_TABLE_SIZE; i++) {
+		data = clk_osm_read_reg(c, FREQ_REG + i * OSM_REG_SIZE);
+		src = ((data & GENMASK(31, 30)) >> 30);
+		lval = (data & GENMASK(7, 0));
+
+		if (!src)
+			c->osm_table[i].frequency = OSM_INIT_RATE;
+		else
+			c->osm_table[i].frequency = XO_RATE * lval;
+
+		data = clk_osm_read_reg(c, VOLT_REG + i * OSM_REG_SIZE);
+		c->osm_table[i].virtual_corner =
+					((data & GENMASK(21, 16)) >> 16);
+		c->osm_table[i].open_loop_volt = (data & GENMASK(11, 0));
+
+		pr_debug("index=%d freq=%ld virtual_corner=%d open_loop_voltage=%u\n",
+			 i, c->osm_table[i].frequency,
+			 c->osm_table[i].virtual_corner,
+			 c->osm_table[i].open_loop_volt);
+
+		if (i > 0 && j == OSM_TABLE_SIZE && c->osm_table[i].frequency ==
+					c->osm_table[i - 1].frequency)
+			j = i;
+	}
+
+	osm_clks_init[c->cluster_num].rate_max = devm_kcalloc(&pdev->dev,
+						 j, sizeof(unsigned long),
+						       GFP_KERNEL);
+	if (!osm_clks_init[c->cluster_num].rate_max)
+		return -ENOMEM;
+
+	for (i = 0; i < j; i++)
+		osm_clks_init[c->cluster_num].rate_max[i] =
+					c->osm_table[i].frequency;
+
+	c->num_entries = osm_clks_init[c->cluster_num].num_rate_max = j;
+	return 0;
+}
+
 static int clk_osm_parse_acd_dt_configs(struct platform_device *pdev)
 {
 	struct device_node *of = pdev->dev.of_node;
@@ -2582,6 +2616,12 @@
 		return -ENOMEM;
 	}
 
+	/* Check if OSM has been enabled already by trustzone.  */
+	if (readl_relaxed(l3_clk.vbases[OSM_BASE] + ENABLE_REG)) {
+		dev_info(&pdev->dev, "OSM has been initialized and enabled by TZ software\n");
+		osm_tz_enabled = true;
+	}
+
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 						"osm_pwrcl_base");
 	if (!res) {
@@ -2615,6 +2655,9 @@
 		return -ENOMEM;
 	}
 
+	if (osm_tz_enabled)
+		return rc;
+
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l3_pll");
 	if (!res) {
 		dev_err(&pdev->dev,
@@ -3029,241 +3072,282 @@
 		return rc;
 	}
 
-	if (l3_clk.vbases[EFUSE_BASE]) {
-		/* Multiple speed-bins are supported */
-		pte_efuse = readl_relaxed(l3_clk.vbases[EFUSE_BASE]);
-		l3_clk.speedbin = ((pte_efuse >> L3_EFUSE_SHIFT) &
-						    L3_EFUSE_MASK);
-		snprintf(l3speedbinstr, ARRAY_SIZE(l3speedbinstr),
+	if (!osm_tz_enabled) {
+		if (l3_clk.vbases[EFUSE_BASE]) {
+			/* Multiple speed-bins are supported */
+			pte_efuse = readl_relaxed(l3_clk.vbases[EFUSE_BASE]);
+			l3_clk.speedbin = ((pte_efuse >> L3_EFUSE_SHIFT) &
+							L3_EFUSE_MASK);
+			snprintf(l3speedbinstr, ARRAY_SIZE(l3speedbinstr),
 			 "qcom,l3-speedbin%d-v%d", l3_clk.speedbin, pvs_ver);
-	}
+		}
 
-	dev_info(&pdev->dev, "using L3 speed bin %u and pvs_ver %d\n",
-		 l3_clk.speedbin, pvs_ver);
+		dev_info(&pdev->dev, "using L3 speed bin %u and pvs_ver %d\n",
+					l3_clk.speedbin, pvs_ver);
 
-	rc = clk_osm_get_lut(pdev, &l3_clk, l3speedbinstr);
-	if (rc) {
-		dev_err(&pdev->dev, "Unable to get OSM LUT for L3, rc=%d\n",
-			rc);
-		return rc;
-	}
+		rc = clk_osm_get_lut(pdev, &l3_clk, l3speedbinstr);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to get OSM LUT for L3, rc=%d\n",
+				rc);
+			return rc;
+		}
 
-	if (pwrcl_clk.vbases[EFUSE_BASE]) {
-		/* Multiple speed-bins are supported */
-		pte_efuse = readl_relaxed(pwrcl_clk.vbases[EFUSE_BASE]);
-		pwrcl_clk.speedbin = ((pte_efuse >> PWRCL_EFUSE_SHIFT) &
-						    PWRCL_EFUSE_MASK);
-		snprintf(pwrclspeedbinstr, ARRAY_SIZE(pwrclspeedbinstr),
+		if (pwrcl_clk.vbases[EFUSE_BASE]) {
+			/* Multiple speed-bins are supported */
+			pte_efuse = readl_relaxed(pwrcl_clk.vbases[EFUSE_BASE]);
+			pwrcl_clk.speedbin = ((pte_efuse >> PWRCL_EFUSE_SHIFT) &
+							PWRCL_EFUSE_MASK);
+			snprintf(pwrclspeedbinstr, ARRAY_SIZE(pwrclspeedbinstr),
 			 "qcom,pwrcl-speedbin%d-v%d", pwrcl_clk.speedbin,
 							pvs_ver);
-	}
+		}
 
-	dev_info(&pdev->dev, "using pwrcl speed bin %u and pvs_ver %d\n",
-		 pwrcl_clk.speedbin, pvs_ver);
+		dev_info(&pdev->dev, "using pwrcl speed bin %u and pvs_ver %d\n",
+					pwrcl_clk.speedbin, pvs_ver);
 
-	rc = clk_osm_get_lut(pdev, &pwrcl_clk, pwrclspeedbinstr);
-	if (rc) {
-		dev_err(&pdev->dev, "Unable to get OSM LUT for power cluster, rc=%d\n",
-			rc);
-		return rc;
-	}
-
-	if (perfcl_clk.vbases[EFUSE_BASE]) {
-		/* Multiple speed-bins are supported */
-		pte_efuse = readl_relaxed(perfcl_clk.vbases[EFUSE_BASE]);
-		perfcl_clk.speedbin = ((pte_efuse >> PERFCL_EFUSE_SHIFT) &
-							PERFCL_EFUSE_MASK);
-		snprintf(perfclspeedbinstr, ARRAY_SIZE(perfclspeedbinstr),
-			 "qcom,perfcl-speedbin%d-v%d", perfcl_clk.speedbin,
-							pvs_ver);
-	}
-
-	dev_info(&pdev->dev, "using perfcl speed bin %u and pvs_ver %d\n",
-		 perfcl_clk.speedbin, pvs_ver);
-
-	rc = clk_osm_get_lut(pdev, &perfcl_clk, perfclspeedbinstr);
-	if (rc) {
-		dev_err(&pdev->dev, "Unable to get OSM LUT for perf cluster, rc=%d\n",
-			rc);
-		return rc;
-	}
-
-	rc = clk_osm_parse_dt_configs(pdev);
-	if (rc) {
-		dev_err(&pdev->dev, "Unable to parse OSM device tree configurations\n");
-		return rc;
-	}
-
-	rc = clk_osm_parse_acd_dt_configs(pdev);
-	if (rc) {
-		dev_err(&pdev->dev, "Unable to parse ACD device tree configurations\n");
-		return rc;
-	}
-
-	rc = clk_osm_acd_resources_init(pdev);
-	if (rc) {
-		dev_err(&pdev->dev, "ACD resources init failed, rc=%d\n",
-			rc);
-		return rc;
-	}
-
-	rc = clk_osm_resolve_open_loop_voltages(&l3_clk);
-	if (rc) {
-		if (rc == -EPROBE_DEFER)
+		rc = clk_osm_get_lut(pdev, &pwrcl_clk, pwrclspeedbinstr);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to get OSM LUT for power cluster, rc=%d\n",
+				rc);
 			return rc;
-		dev_err(&pdev->dev, "Unable to determine open-loop voltages for L3, rc=%d\n",
-			rc);
-		return rc;
-	}
+		}
 
-	rc = clk_osm_resolve_open_loop_voltages(&pwrcl_clk);
-	if (rc) {
-		if (rc == -EPROBE_DEFER)
+		if (perfcl_clk.vbases[EFUSE_BASE]) {
+			/* Multiple speed-bins are supported */
+			pte_efuse =
+				readl_relaxed(perfcl_clk.vbases[EFUSE_BASE]);
+			perfcl_clk.speedbin = ((pte_efuse >> PERFCL_EFUSE_SHIFT)
+						& PERFCL_EFUSE_MASK);
+			snprintf(perfclspeedbinstr,
+				ARRAY_SIZE(perfclspeedbinstr),
+				"qcom,perfcl-speedbin%d-v%d",
+				perfcl_clk.speedbin, pvs_ver);
+		}
+
+		dev_info(&pdev->dev, "using perfcl speed bin %u and pvs_ver %d\n",
+					perfcl_clk.speedbin, pvs_ver);
+
+		rc = clk_osm_get_lut(pdev, &perfcl_clk, perfclspeedbinstr);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to get OSM LUT for perf cluster, rc=%d\n",
+				rc);
 			return rc;
-		dev_err(&pdev->dev, "Unable to determine open-loop voltages for power cluster, rc=%d\n",
-			rc);
-		return rc;
-	}
+		}
 
-	rc = clk_osm_resolve_open_loop_voltages(&perfcl_clk);
-	if (rc) {
-		if (rc == -EPROBE_DEFER)
+		rc = clk_osm_parse_dt_configs(pdev);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to parse OSM device tree configurations\n");
 			return rc;
-		dev_err(&pdev->dev, "Unable to determine open-loop voltages for perf cluster, rc=%d\n",
-			rc);
-		return rc;
-	}
+		}
 
-	rc = clk_osm_resolve_crossover_corners(&l3_clk, pdev);
-	if (rc)
-		dev_info(&pdev->dev,
-			"No APM crossover corner programmed for L3\n");
+		rc = clk_osm_parse_acd_dt_configs(pdev);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to parse ACD device tree configurations\n");
+			return rc;
+		}
 
-	rc = clk_osm_resolve_crossover_corners(&pwrcl_clk, pdev);
-	if (rc)
-		dev_info(&pdev->dev,
-			"No APM crossover corner programmed for pwrcl_clk\n");
+		rc = clk_osm_acd_resources_init(pdev);
+		if (rc) {
+			dev_err(&pdev->dev, "ACD resources init failed, rc=%d\n",
+				rc);
+			return rc;
+		}
 
-	rc = clk_osm_resolve_crossover_corners(&perfcl_clk, pdev);
-	if (rc)
-		dev_info(&pdev->dev, "No MEM-ACC crossover corner programmed\n");
+		rc = clk_osm_resolve_open_loop_voltages(&l3_clk);
+		if (rc) {
+			if (rc == -EPROBE_DEFER)
+				return rc;
+			dev_err(&pdev->dev, "Unable to determine open-loop voltages for L3, rc=%d\n",
+				rc);
+			return rc;
+		}
+		rc = clk_osm_resolve_open_loop_voltages(&pwrcl_clk);
+		if (rc) {
+			if (rc == -EPROBE_DEFER)
+				return rc;
+			dev_err(&pdev->dev, "Unable to determine open-loop voltages for power cluster, rc=%d\n",
+				rc);
+			return rc;
+		}
+		rc = clk_osm_resolve_open_loop_voltages(&perfcl_clk);
+		if (rc) {
+			if (rc == -EPROBE_DEFER)
+				return rc;
+			dev_err(&pdev->dev, "Unable to determine open-loop voltages for perf cluster, rc=%d\n",
+				rc);
+			return rc;
+		}
 
-	clk_osm_setup_cycle_counters(&l3_clk);
-	clk_osm_setup_cycle_counters(&pwrcl_clk);
-	clk_osm_setup_cycle_counters(&perfcl_clk);
+		rc = clk_osm_resolve_crossover_corners(&l3_clk, pdev);
+		if (rc)
+			dev_info(&pdev->dev,
+				"No APM crossover corner programmed for L3\n");
+		rc = clk_osm_resolve_crossover_corners(&pwrcl_clk, pdev);
+		if (rc)
+			dev_info(&pdev->dev,
+				"No APM crossover corner programmed for pwrcl_clk\n");
+		rc = clk_osm_resolve_crossover_corners(&perfcl_clk, pdev);
+		if (rc)
+			dev_info(&pdev->dev, "No MEM-ACC crossover corner programmed\n");
 
-	clk_osm_print_osm_table(&l3_clk);
-	clk_osm_print_osm_table(&pwrcl_clk);
-	clk_osm_print_osm_table(&perfcl_clk);
+		clk_osm_setup_cycle_counters(&l3_clk);
+		clk_osm_setup_cycle_counters(&pwrcl_clk);
+		clk_osm_setup_cycle_counters(&perfcl_clk);
 
-	rc = clk_osm_setup_hw_table(&l3_clk);
-	if (rc) {
-		dev_err(&pdev->dev, "failed to setup l3 hardware table\n");
-		goto exit;
-	}
-	rc = clk_osm_setup_hw_table(&pwrcl_clk);
-	if (rc) {
-		dev_err(&pdev->dev, "failed to setup power cluster hardware table\n");
-		goto exit;
-	}
-	rc = clk_osm_setup_hw_table(&perfcl_clk);
-	if (rc) {
-		dev_err(&pdev->dev, "failed to setup perf cluster hardware table\n");
-		goto exit;
-	}
+		clk_osm_print_osm_table(&l3_clk);
+		clk_osm_print_osm_table(&pwrcl_clk);
+		clk_osm_print_osm_table(&perfcl_clk);
 
-	/* Policy tuning */
-	rc = clk_osm_set_cc_policy(pdev);
-	if (rc < 0) {
-		dev_err(&pdev->dev, "cc policy setup failed");
-		goto exit;
-	}
+		rc = clk_osm_setup_hw_table(&l3_clk);
+		if (rc) {
+			dev_err(&pdev->dev, "failed to setup l3 hardware table\n");
+			goto exit;
+		}
+		rc = clk_osm_setup_hw_table(&pwrcl_clk);
+		if (rc) {
+			dev_err(&pdev->dev, "failed to setup power cluster hardware table\n");
+			goto exit;
+		}
+		rc = clk_osm_setup_hw_table(&perfcl_clk);
+		if (rc) {
+			dev_err(&pdev->dev, "failed to setup perf cluster hardware table\n");
+			goto exit;
+		}
 
-	/* LLM Freq Policy Tuning */
-	rc = clk_osm_set_llm_freq_policy(pdev);
-	if (rc < 0) {
-		dev_err(&pdev->dev, "LLM Frequency Policy setup failed");
-		goto exit;
-	}
+		/* Policy tuning */
+		rc = clk_osm_set_cc_policy(pdev);
+		if (rc < 0) {
+			dev_err(&pdev->dev, "cc policy setup failed");
+			goto exit;
+		}
 
-	/* LLM Voltage Policy Tuning */
-	rc = clk_osm_set_llm_volt_policy(pdev);
-	if (rc < 0) {
-		dev_err(&pdev->dev, "Failed to set LLM voltage Policy");
-		goto exit;
-	}
+		/* LLM Freq Policy Tuning */
+		rc = clk_osm_set_llm_freq_policy(pdev);
+		if (rc < 0) {
+			dev_err(&pdev->dev, "LLM Frequency Policy setup failed");
+			goto exit;
+		}
 
-	clk_osm_setup_fsms(&l3_clk);
-	clk_osm_setup_fsms(&pwrcl_clk);
-	clk_osm_setup_fsms(&perfcl_clk);
+		/* LLM Voltage Policy Tuning */
+		rc = clk_osm_set_llm_volt_policy(pdev);
+		if (rc < 0) {
+			dev_err(&pdev->dev, "Failed to set LLM voltage Policy");
+			goto exit;
+		}
 
-	/* Program VC at which the array power supply needs to be switched */
-	clk_osm_write_reg(&perfcl_clk, perfcl_clk.apm_threshold_vc,
+		clk_osm_setup_fsms(&l3_clk);
+		clk_osm_setup_fsms(&pwrcl_clk);
+		clk_osm_setup_fsms(&perfcl_clk);
+
+		/*
+		 * Program the VC at which the array power supply
+		 * needs to be switched.
+		 */
+		clk_osm_write_reg(&perfcl_clk, perfcl_clk.apm_threshold_vc,
 				APM_CROSSOVER_VC, OSM_BASE);
-	if (perfcl_clk.secure_init) {
-		clk_osm_write_seq_reg(&perfcl_clk, perfcl_clk.apm_crossover_vc,
-				DATA_MEM(77));
-		clk_osm_write_seq_reg(&perfcl_clk,
+		if (perfcl_clk.secure_init) {
+			clk_osm_write_seq_reg(&perfcl_clk,
+				perfcl_clk.apm_crossover_vc, DATA_MEM(77));
+			clk_osm_write_seq_reg(&perfcl_clk,
 				(0x39 | (perfcl_clk.apm_threshold_vc << 6)),
 				DATA_MEM(111));
-	} else {
-		scm_io_write(perfcl_clk.pbases[SEQ_BASE] + DATA_MEM(77),
-				perfcl_clk.apm_crossover_vc);
-		scm_io_write(perfcl_clk.pbases[SEQ_BASE] + DATA_MEM(111),
+		} else {
+			scm_io_write(perfcl_clk.pbases[SEQ_BASE] + DATA_MEM(77),
+					perfcl_clk.apm_crossover_vc);
+			scm_io_write(perfcl_clk.pbases[SEQ_BASE] +
+								DATA_MEM(111),
 				(0x39 | (perfcl_clk.apm_threshold_vc << 6)));
-	}
+		}
 
-	/*
-	 * Perform typical secure-world HW initialization
-	 * as necessary.
-	 */
-	clk_osm_do_additional_setup(&l3_clk, pdev);
-	clk_osm_do_additional_setup(&pwrcl_clk, pdev);
-	clk_osm_do_additional_setup(&perfcl_clk, pdev);
+		/*
+		 * Perform typical secure-world HW initialization
+		 * as necessary.
+		 */
+		clk_osm_do_additional_setup(&l3_clk, pdev);
+		clk_osm_do_additional_setup(&pwrcl_clk, pdev);
+		clk_osm_do_additional_setup(&perfcl_clk, pdev);
 
-	/* MEM-ACC Programming */
-	clk_osm_program_mem_acc_regs(&l3_clk);
-	clk_osm_program_mem_acc_regs(&pwrcl_clk);
-	clk_osm_program_mem_acc_regs(&perfcl_clk);
+		/* MEM-ACC Programming */
+		clk_osm_program_mem_acc_regs(&l3_clk);
+		clk_osm_program_mem_acc_regs(&pwrcl_clk);
+		clk_osm_program_mem_acc_regs(&perfcl_clk);
 
-	if (of_property_read_bool(pdev->dev.of_node, "qcom,osm-pll-setup")) {
-		clk_osm_setup_cluster_pll(&l3_clk);
-		clk_osm_setup_cluster_pll(&pwrcl_clk);
-		clk_osm_setup_cluster_pll(&perfcl_clk);
-	}
+		if (of_property_read_bool(pdev->dev.of_node,
+					"qcom,osm-pll-setup")) {
+			clk_osm_setup_cluster_pll(&l3_clk);
+			clk_osm_setup_cluster_pll(&pwrcl_clk);
+			clk_osm_setup_cluster_pll(&perfcl_clk);
+		}
 
-	/* Misc programming */
-	clk_osm_misc_programming(&l3_clk);
-	clk_osm_misc_programming(&pwrcl_clk);
-	clk_osm_misc_programming(&perfcl_clk);
+		/* Misc programming */
+		clk_osm_misc_programming(&l3_clk);
+		clk_osm_misc_programming(&pwrcl_clk);
+		clk_osm_misc_programming(&perfcl_clk);
 
-	pwrcl_clk.per_core_dcvs = perfcl_clk.per_core_dcvs =
+		rc = clk_osm_acd_init(&l3_clk);
+		if (rc) {
+			pr_err("failed to initialize ACD for L3, rc=%d\n", rc);
+			goto exit;
+		}
+		rc = clk_osm_acd_init(&pwrcl_clk);
+		if (rc) {
+			pr_err("failed to initialize ACD for pwrcl, rc=%d\n",
+									rc);
+			goto exit;
+		}
+		rc = clk_osm_acd_init(&perfcl_clk);
+		if (rc) {
+			pr_err("failed to initialize ACD for perfcl, rc=%d\n",
+									rc);
+			goto exit;
+		}
+
+		pwrcl_clk.per_core_dcvs = perfcl_clk.per_core_dcvs =
 			of_property_read_bool(pdev->dev.of_node,
 				"qcom,enable-per-core-dcvs");
-	if (pwrcl_clk.per_core_dcvs) {
+		if (pwrcl_clk.per_core_dcvs) {
+			val = clk_osm_read_reg(&pwrcl_clk, CORE_DCVS_CTRL);
+			val |= BIT(0);
+			clk_osm_write_reg(&pwrcl_clk, val, CORE_DCVS_CTRL,
+							OSM_BASE);
+			val = clk_osm_read_reg(&perfcl_clk, CORE_DCVS_CTRL);
+			val |= BIT(0);
+			clk_osm_write_reg(&perfcl_clk, val, CORE_DCVS_CTRL,
+							OSM_BASE);
+		}
+	} else {
+		/* OSM has been enabled already by trustzone */
+		rc = clk_osm_read_lut(pdev, &l3_clk);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to read OSM LUT for L3, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		rc = clk_osm_read_lut(pdev, &pwrcl_clk);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to read OSM LUT for power cluster, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		rc = clk_osm_read_lut(pdev, &perfcl_clk);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to read OSM LUT for perf cluster, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* Check if per-core DCVS is enabled/not */
 		val = clk_osm_read_reg(&pwrcl_clk, CORE_DCVS_CTRL);
-		val |= BIT(0);
-		clk_osm_write_reg(&pwrcl_clk, val, CORE_DCVS_CTRL, OSM_BASE);
+		if (val && BIT(0))
+			pwrcl_clk.per_core_dcvs = true;
 
 		val = clk_osm_read_reg(&perfcl_clk, CORE_DCVS_CTRL);
-		val |= BIT(0);
-		clk_osm_write_reg(&perfcl_clk, val, CORE_DCVS_CTRL, OSM_BASE);
-	}
+		if (val && BIT(0))
+			perfcl_clk.per_core_dcvs = true;
 
-	rc = clk_osm_acd_init(&l3_clk);
-	if (rc) {
-		pr_err("failed to initialize ACD for L3, rc=%d\n", rc);
-		goto exit;
-	}
-	rc = clk_osm_acd_init(&pwrcl_clk);
-	if (rc) {
-		pr_err("failed to initialize ACD for pwrcl, rc=%d\n", rc);
-		goto exit;
-	}
-	rc = clk_osm_acd_init(&perfcl_clk);
-	if (rc) {
-		pr_err("failed to initialize ACD for perfcl, rc=%d\n", rc);
-		goto exit;
+		clk_ops_l3_osm.enable = NULL;
 	}
 
 	spin_lock_init(&l3_clk.lock);
@@ -3290,7 +3374,23 @@
 
 	get_online_cpus();
 
-	/* Set the L3 clock to run off GPLL0 and enable OSM for the domain */
+	if (!osm_tz_enabled) {
+		populate_debugfs_dir(&l3_clk);
+		populate_debugfs_dir(&pwrcl_clk);
+		populate_debugfs_dir(&perfcl_clk);
+
+		/* Configure default rate to lowest frequency */
+		for (i = 0; i < MAX_CORE_COUNT; i++) {
+			osm_set_index(&pwrcl_clk, 0, i);
+			osm_set_index(&perfcl_clk, 0, i);
+		}
+	}
+	/*
+	 * Set the L3 clock to run off GPLL0 and enable OSM for the domain.
+	 * In the case that trustzone has already enabled OSM, bring the L3
+	 * clock rate to a safe level until the devfreq driver comes up and
+	 * votes for its desired frequency.
+	 */
 	rc = clk_set_rate(l3_clk.hw.clk, OSM_INIT_RATE);
 	if (rc) {
 		dev_err(&pdev->dev, "Unable to set init rate on L3 cluster, rc=%d\n",
@@ -3298,21 +3398,12 @@
 		goto provider_err;
 	}
 	WARN(clk_prepare_enable(l3_cluster0_vote_clk.hw.clk),
-		     "clk: Failed to enable cluster0 clock for L3\n");
+			"clk: Failed to enable cluster0 clock for L3\n");
 	WARN(clk_prepare_enable(l3_cluster1_vote_clk.hw.clk),
-		     "clk: Failed to enable cluster1 clock for L3\n");
+			"clk: Failed to enable cluster1 clock for L3\n");
 	udelay(300);
 
-	/* Configure default rate to lowest frequency */
-	for (i = 0; i < MAX_CORE_COUNT; i++) {
-		osm_set_index(&pwrcl_clk, 0, i);
-		osm_set_index(&perfcl_clk, 0, i);
-	}
-
 	populate_opp_table(pdev);
-	populate_debugfs_dir(&l3_clk);
-	populate_debugfs_dir(&pwrcl_clk);
-	populate_debugfs_dir(&perfcl_clk);
 
 	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
 	register_cpu_cycle_counter_cb(&cb);
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index dd69b31..7382cfa 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -138,9 +138,6 @@
 	int ret;
 	u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
 
-	if (rcg->flags & DFS_ENABLE_RCG)
-		return 0;
-
 	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
 				 CFG_SRC_SEL_MASK, cfg);
 	if (ret)
@@ -350,8 +347,9 @@
 	struct clk_hw *hw = &rcg->clkr.hw;
 	int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
 
+	/* Skip configuration if DFS control has been enabled for the RCG. */
 	if (rcg->flags & DFS_ENABLE_RCG)
-		return -EPERM;
+		return 0;
 
 	if (index < 0)
 		return index;
@@ -481,7 +479,7 @@
 	}
 
 	ret = clk_rcg2_configure(rcg, f);
-	if (ret && ret != -EPERM)
+	if (ret)
 		return ret;
 
 	if (rcg->flags & FORCE_ENABLE_RCG) {
@@ -1357,7 +1355,9 @@
 		"RCG flags %x\n", i, dfs_freq_tbl[i].freq, dfs_freq_tbl[i].src,
 				dfs_freq_tbl[i].pre_div, dfs_freq_tbl[i].m,
 				dfs_freq_tbl[i].n, rcg_flags);
-
+	/* Skip the safe configuration if DFS has been enabled for the RCG. */
+	if (clk->enable_safe_config)
+		clk->enable_safe_config = false;
 	clk->flags |= rcg_flags;
 	clk->freq_tbl = dfs_freq_tbl;
 err:
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index e11ea50..8286818 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -1031,9 +1031,7 @@
 	if (predicted && (idx < (cluster->nlevels - 1))) {
 		struct power_params *pwr_params = &cluster->levels[idx].pwr;
 
-		tick_broadcast_exit();
 		clusttimer_start(cluster, pwr_params->max_residency + tmr_add);
-		tick_broadcast_enter();
 	}
 
 	return 0;
@@ -1086,10 +1084,8 @@
 			struct power_params *pwr_params =
 						&cluster->levels[0].pwr;
 
-			tick_broadcast_exit();
 			clusttimer_start(cluster,
 					pwr_params->max_residency + tmr_add);
-			tick_broadcast_enter();
 		}
 	}
 
@@ -1196,9 +1192,6 @@
 	 * next wakeup within a cluster, in which case, CPU switches over to
 	 * use broadcast timer.
 	 */
-	if (from_idle && cpu_level->use_bc_timer)
-		tick_broadcast_enter();
-
 	if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
 		|| (cpu_level->mode ==
 			MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
@@ -1218,9 +1211,6 @@
 	struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
 	bool jtag_save_restore = cpu->levels[cpu_index].jtag_save_restore;
 
-	if (from_idle && cpu_level->use_bc_timer)
-		tick_broadcast_exit();
-
 	if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
 		|| (cpu_level->mode ==
 			MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
@@ -1272,6 +1262,11 @@
 	/*
 	 * idx = 0 is the default LPM state
 	 */
+	if (from_idle && cpu->levels[idx].use_bc_timer) {
+		if (tick_broadcast_enter())
+			return false;
+	}
+
 	if (!idx) {
 		stop_critical_timings();
 		wfi();
@@ -1290,6 +1285,10 @@
 	start_critical_timings();
 	update_debug_pc_event(CPU_EXIT, state_id,
 			success, 0xdeaffeed, true);
+
+	if (from_idle && cpu->levels[idx].use_bc_timer)
+		tick_broadcast_exit();
+
 	return success;
 }
 
diff --git a/drivers/edac/kryo3xx_arm64_edac.c b/drivers/edac/kryo3xx_arm64_edac.c
index f5bb3ed..5ca93a6 100644
--- a/drivers/edac/kryo3xx_arm64_edac.c
+++ b/drivers/edac/kryo3xx_arm64_edac.c
@@ -16,6 +16,7 @@
 #include <linux/platform_device.h>
 #include <linux/smp.h>
 #include <linux/cpu.h>
+#include <linux/cpu_pm.h>
 #include <linux/interrupt.h>
 #include <linux/of_irq.h>
 
@@ -125,6 +126,7 @@
 struct erp_drvdata {
 	struct edac_device_ctl_info *edev_ctl;
 	struct erp_drvdata __percpu **erp_cpu_drvdata;
+	struct notifier_block nb_pm;
 	int ppi;
 };
 
@@ -358,6 +360,19 @@
 	return IRQ_HANDLED;
 }
 
+static int kryo3xx_pmu_cpu_pm_notify(struct notifier_block *self,
+				unsigned long action, void *v)
+{
+	switch (action) {
+	case CPU_PM_EXIT:
+		kryo3xx_check_l3_scu_error(panic_handler_drvdata->edev_ctl);
+		kryo3xx_check_l1_l2_ecc(panic_handler_drvdata->edev_ctl);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
 static void initialize_registers(void *info)
 {
 	set_errxctlr_el1();
@@ -400,6 +415,7 @@
 	drv->edev_ctl->ctl_name = "cache";
 	drv->edev_ctl->panic_on_ce = ARM64_ERP_PANIC_ON_CE;
 	drv->edev_ctl->panic_on_ue = ARM64_ERP_PANIC_ON_UE;
+	drv->nb_pm.notifier_call = kryo3xx_pmu_cpu_pm_notify;
 	platform_set_drvdata(pdev, drv);
 
 	rc = edac_device_add_device(drv->edev_ctl);
@@ -424,6 +440,8 @@
 		goto out_dev;
 	}
 
+	cpu_pm_register_notifier(&(drv->nb_pm));
+
 	return 0;
 
 out_dev:
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index d97e4ef..f05d760 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -617,7 +617,7 @@
 			goto fail;
 		}
 
-		aspace = msm_gem_smmu_address_space_create(&pdev->dev,
+		aspace = msm_gem_smmu_address_space_create(dev,
 				mmu, "mdp5");
 		if (IS_ERR(aspace)) {
 			ret = PTR_ERR(aspace);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index d2ac684..33ef04b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -121,13 +121,37 @@
 		msm_drm_helper_hotplug_event(dev);
 }
 
+/**
+ * msm_atomic_helper_check - validate state object
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * This is a wrapper for the drm_atomic_helper_check to check the modeset
+ * and state checking for planes. Additionally it checks if any secure
+ * transition(moving CRTC and planes between secure and non-secure states and
+ * vice versa) is allowed or not. When going to secure state, planes
+ * with fb_mode as dir translated only can be staged on the CRTC, and only one
+ * CRTC should be active.
+ * Also mixing of secure and non-secure is not allowed.
+ *
+ * RETURNS
+ * Zero for success or -errorno.
+ */
 int msm_atomic_check(struct drm_device *dev,
 			    struct drm_atomic_state *state)
 {
+	struct msm_drm_private *priv;
+
 	if (msm_is_suspend_blocked(dev)) {
 		DRM_DEBUG("rejecting commit during suspend\n");
 		return -EBUSY;
 	}
+
+	priv = dev->dev_private;
+	if (priv && priv->kms && priv->kms->funcs &&
+			priv->kms->funcs->atomic_check)
+		return priv->kms->funcs->atomic_check(priv->kms, state);
+
 	return drm_atomic_helper_check(dev, state);
 }
 
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 96ab883..5b8a6b8 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -662,13 +662,57 @@
 
 /* For SDE  display */
 struct msm_gem_address_space *
-msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
+msm_gem_smmu_address_space_create(struct drm_device *dev, struct msm_mmu *mmu,
 		const char *name);
 
+/**
+ * msm_gem_add_obj_to_aspace_active_list: adds obj to active obj list in aspace
+ */
+void msm_gem_add_obj_to_aspace_active_list(
+		struct msm_gem_address_space *aspace,
+		struct drm_gem_object *obj);
+
+/**
+ * msm_gem_remove_obj_from_aspace_active_list: removes obj from  active obj
+ * list in aspace
+ */
+void msm_gem_remove_obj_from_aspace_active_list(
+		struct msm_gem_address_space *aspace,
+		struct drm_gem_object *obj);
+
+/**
+ * msm_gem_smmu_address_space_get: returns the aspace pointer for the requested
+ * domain
+ */
 struct msm_gem_address_space *
 msm_gem_smmu_address_space_get(struct drm_device *dev,
 		unsigned int domain);
 
+/**
+ * msm_gem_aspace_domain_attach_detach: function to inform the attach/detach
+ * of the domain for this aspace
+ */
+void msm_gem_aspace_domain_attach_detach_update(
+		struct msm_gem_address_space *aspace,
+		bool is_detach);
+
+/**
+ * msm_gem_address_space_register_cb: function to register callback for attach
+ * and detach of the domain
+ */
+int msm_gem_address_space_register_cb(
+		struct msm_gem_address_space *aspace,
+		void (*cb)(void *, bool),
+		void *cb_data);
+
+/**
+ * msm_gem_address_space_register_cb: function to unregister callback
+ */
+int msm_gem_address_space_unregister_cb(
+		struct msm_gem_address_space *aspace,
+		void (*cb)(void *, bool),
+		void *cb_data);
+
 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 		struct drm_file *file);
 
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index a7d06d1..d64dcc6 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -311,6 +311,10 @@
 		if (iommu_present(&platform_bus_type)) {
 			msm_gem_unmap_vma(domain->aspace, domain,
 				msm_obj->sgt, get_dmabuf_ptr(obj));
+
+			msm_gem_remove_obj_from_aspace_active_list(
+					domain->aspace,
+					obj);
 		}
 
 		obj_remove_domain(domain);
@@ -390,10 +394,12 @@
 			msm_obj->flags);
 	}
 
-	if (!ret && domain)
+	if (!ret && domain) {
 		*iova = domain->iova;
-	else
+		msm_gem_add_obj_to_aspace_active_list(aspace, obj);
+	} else {
 		obj_remove_domain(domain);
+	}
 
 	return ret;
 }
@@ -441,6 +447,63 @@
 	// things that are no longer needed..
 }
 
+void msm_gem_aspace_domain_attach_detach_update(
+		struct msm_gem_address_space *aspace,
+		bool is_detach)
+{
+	struct msm_gem_object *msm_obj;
+	struct drm_gem_object *obj;
+	struct aspace_client *aclient;
+	int ret;
+	uint32_t iova;
+
+	if (!aspace)
+		return;
+
+	mutex_lock(&aspace->dev->struct_mutex);
+	if (is_detach) {
+		/* Indicate to clients domain is getting detached */
+		list_for_each_entry(aclient, &aspace->clients, list) {
+			if (aclient->cb)
+				aclient->cb(aclient->cb_data,
+						is_detach);
+		}
+
+		/**
+		 * Unmap active buffers,
+		 * typically clients should do this when the callback is called,
+		 * but this needs to be done for the framebuffers which are not
+		 * attached to any planes. (background apps)
+		 */
+		list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
+			obj = &msm_obj->base;
+			if (obj->import_attach) {
+				put_iova(obj);
+				put_pages(obj);
+			}
+		}
+	} else {
+		/* map active buffers */
+		list_for_each_entry(msm_obj, &aspace->active_list,
+				iova_list) {
+			obj = &msm_obj->base;
+			ret = msm_gem_get_iova_locked(obj, aspace, &iova);
+			if (ret) {
+				mutex_unlock(&obj->dev->struct_mutex);
+				return;
+			}
+		}
+
+		/* Indicate to clients domain is attached */
+		list_for_each_entry(aclient, &aspace->clients, list) {
+			if (aclient->cb)
+				aclient->cb(aclient->cb_data,
+						is_detach);
+		}
+	}
+	mutex_unlock(&aspace->dev->struct_mutex);
+}
+
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 		struct drm_mode_create_dumb *args)
 {
@@ -869,6 +932,7 @@
 
 	INIT_LIST_HEAD(&msm_obj->submit_entry);
 	INIT_LIST_HEAD(&msm_obj->domains);
+	INIT_LIST_HEAD(&msm_obj->iova_list);
 
 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 9d41a00..c50c453 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -25,6 +25,8 @@
 #define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
 #define MSM_BO_KEEPATTRS     0x20000000     /* keep h/w bus attributes */
 
+struct msm_gem_object;
+
 struct msm_gem_aspace_ops {
 	int (*map)(struct msm_gem_address_space *, struct msm_gem_vma *,
 		struct sg_table *sgt, void *priv, unsigned int flags);
@@ -33,12 +35,35 @@
 		struct sg_table *sgt, void *priv);
 
 	void (*destroy)(struct msm_gem_address_space *);
+	void (*add_to_active)(struct msm_gem_address_space *,
+		struct msm_gem_object *);
+	void (*remove_from_active)(struct msm_gem_address_space *,
+		struct msm_gem_object *);
+	int (*register_cb)(struct msm_gem_address_space *,
+			void (*cb)(void *, bool),
+			void *);
+	int (*unregister_cb)(struct msm_gem_address_space *,
+			void (*cb)(void *, bool),
+			void *);
 };
 
+struct aspace_client {
+	void (*cb)(void *, bool);
+	void *cb_data;
+	struct list_head list;
+};
+
+
 struct msm_gem_address_space {
 	const char *name;
 	struct msm_mmu *mmu;
 	const struct msm_gem_aspace_ops *ops;
+	bool domain_attached;
+	struct drm_device *dev;
+	/* list of mapped objects */
+	struct list_head active_list;
+	/* list of clients */
+	struct list_head clients;
 };
 
 struct msm_gem_vma {
@@ -96,6 +121,7 @@
 	 * an IOMMU.  Also used for stolen/splashscreen buffer.
 	 */
 	struct drm_mm_node *vram_node;
+	struct list_head iova_list;
 };
 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 8e56871..d02228a 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -44,6 +44,9 @@
 	struct dma_buf *buf = priv;
 	int ret;
 
+	if (!aspace || !aspace->domain_attached)
+		return -EINVAL;
+
 	if (buf)
 		ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt, buf,
 			DMA_BIDIRECTIONAL, flags);
@@ -62,15 +65,109 @@
 	aspace->mmu->funcs->destroy(aspace->mmu);
 }
 
+static void smmu_aspace_add_to_active(
+		struct msm_gem_address_space *aspace,
+		struct msm_gem_object *msm_obj)
+{
+	WARN_ON(!mutex_is_locked(&aspace->dev->struct_mutex));
+	list_move_tail(&msm_obj->iova_list, &aspace->active_list);
+}
+
+static void smmu_aspace_remove_from_active(
+		struct msm_gem_address_space *aspace,
+		struct msm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj, *next;
+
+	WARN_ON(!mutex_is_locked(&aspace->dev->struct_mutex));
+
+	list_for_each_entry_safe(msm_obj, next, &aspace->active_list,
+			iova_list) {
+		if (msm_obj == obj) {
+			list_del(&msm_obj->iova_list);
+			break;
+		}
+	}
+}
+
+static int smmu_aspace_register_cb(
+		struct msm_gem_address_space *aspace,
+		void (*cb)(void *, bool),
+		void *cb_data)
+{
+	struct aspace_client *aclient = NULL;
+	struct aspace_client *temp;
+
+	if (!aspace)
+		return -EINVAL;
+
+	if (!aspace->domain_attached)
+		return -EACCES;
+
+	aclient = kzalloc(sizeof(*aclient), GFP_KERNEL);
+	if (!aclient)
+		return -ENOMEM;
+
+	aclient->cb = cb;
+	aclient->cb_data = cb_data;
+	INIT_LIST_HEAD(&aclient->list);
+
+	/* check if callback is already registered */
+	mutex_lock(&aspace->dev->struct_mutex);
+	list_for_each_entry(temp, &aspace->clients, list) {
+		if ((temp->cb == aclient->cb) &&
+			(temp->cb_data == aclient->cb_data)) {
+			kfree(aclient);
+			mutex_unlock(&aspace->dev->struct_mutex);
+			return -EEXIST;
+		}
+	}
+
+	list_move_tail(&aclient->list, &aspace->clients);
+	mutex_unlock(&aspace->dev->struct_mutex);
+
+	return 0;
+}
+
+static int smmu_aspace_unregister_cb(
+		struct msm_gem_address_space *aspace,
+		void (*cb)(void *, bool),
+		void *cb_data)
+{
+	struct aspace_client *aclient = NULL;
+	int rc = -ENOENT;
+
+	if (!aspace || !cb)
+		return -EINVAL;
+
+	mutex_lock(&aspace->dev->struct_mutex);
+	list_for_each_entry(aclient, &aspace->clients, list) {
+		if ((aclient->cb == cb) &&
+			(aclient->cb_data == cb_data)) {
+			list_del(&aclient->list);
+			kfree(aclient);
+			rc = 0;
+			break;
+		}
+	}
+	mutex_unlock(&aspace->dev->struct_mutex);
+
+	return rc;
+}
+
 
 static const struct msm_gem_aspace_ops smmu_aspace_ops = {
 	.map = smmu_aspace_map_vma,
 	.unmap = smmu_aspace_unmap_vma,
-	.destroy = smmu_aspace_destroy
+	.destroy = smmu_aspace_destroy,
+	.add_to_active = smmu_aspace_add_to_active,
+	.remove_from_active = smmu_aspace_remove_from_active,
+	.register_cb = smmu_aspace_register_cb,
+	.unregister_cb = smmu_aspace_unregister_cb,
 };
 
 struct msm_gem_address_space *
-msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
+msm_gem_smmu_address_space_create(struct drm_device *dev, struct msm_mmu *mmu,
 		const char *name)
 {
 	struct msm_gem_address_space *aspace;
@@ -82,9 +179,12 @@
 	if (!aspace)
 		return ERR_PTR(-ENOMEM);
 
+	aspace->dev = dev;
 	aspace->name = name;
 	aspace->mmu = mmu;
 	aspace->ops = &smmu_aspace_ops;
+	INIT_LIST_HEAD(&aspace->active_list);
+	INIT_LIST_HEAD(&aspace->clients);
 
 	return aspace;
 }
@@ -218,3 +318,44 @@
 
 	kfree(aspace);
 }
+
+void msm_gem_add_obj_to_aspace_active_list(
+		struct msm_gem_address_space *aspace,
+		struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+	if (aspace && aspace->ops && aspace->ops->add_to_active)
+		aspace->ops->add_to_active(aspace, msm_obj);
+}
+
+void msm_gem_remove_obj_from_aspace_active_list(
+		struct msm_gem_address_space *aspace,
+		struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+	if (aspace && aspace->ops && aspace->ops->remove_from_active)
+		aspace->ops->remove_from_active(aspace, msm_obj);
+}
+
+int msm_gem_address_space_register_cb(struct msm_gem_address_space *aspace,
+		void (*cb)(void *, bool),
+		void *cb_data)
+{
+	if (aspace && aspace->ops && aspace->ops->register_cb)
+		return aspace->ops->register_cb(aspace, cb, cb_data);
+
+	return -EINVAL;
+}
+
+int msm_gem_address_space_unregister_cb(struct msm_gem_address_space *aspace,
+		void (*cb)(void *, bool),
+		void *cb_data)
+{
+	if (aspace && aspace->ops && aspace->ops->unregister_cb)
+		return aspace->ops->unregister_cb(aspace, cb, cb_data);
+
+	return -EINVAL;
+}
+
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 7692bef..0375979 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -75,6 +75,9 @@
 			const struct msm_format *msm_fmt,
 			const struct drm_mode_fb_cmd2 *cmd,
 			struct drm_gem_object **bos);
+	/* perform complete atomic check of given atomic state */
+	int (*atomic_check)(struct msm_kms *kms,
+			struct drm_atomic_state *state);
 	/* misc: */
 	long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
 			struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 5af26e2..08e6f79 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -45,6 +45,7 @@
 	void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
 			struct dma_buf *dma_buf, int dir);
 	void (*destroy)(struct msm_mmu *mmu);
+	bool (*is_domain_secure)(struct msm_mmu *mmu);
 };
 
 struct msm_mmu {
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index 7d7f74a..730fc06 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -37,6 +37,7 @@
 	struct device *dev;
 	struct dma_iommu_mapping *mmu_mapping;
 	bool domain_attached;
+	bool secure;
 };
 
 struct msm_smmu {
@@ -275,6 +276,14 @@
 	msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir, dma_buf);
 }
 
+static bool msm_smmu_is_domain_secure(struct msm_mmu *mmu)
+{
+	struct msm_smmu *smmu = to_msm_smmu(mmu);
+	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+
+	return client->secure;
+}
+
 static const struct msm_mmu_funcs funcs = {
 	.attach = msm_smmu_attach,
 	.detach = msm_smmu_detach,
@@ -285,6 +294,7 @@
 	.map_dma_buf = msm_smmu_map_dma_buf,
 	.unmap_dma_buf = msm_smmu_unmap_dma_buf,
 	.destroy = msm_smmu_destroy,
+	.is_domain_secure = msm_smmu_is_domain_secure,
 };
 
 static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
@@ -458,6 +468,7 @@
 	if (domain->secure) {
 		int secure_vmid = VMID_CP_PIXEL;
 
+		client->secure = true;
 		rc = iommu_domain_set_attr(client->mmu_mapping->domain,
 				DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
 		if (rc) {
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 75dfdc0..935dc12 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -2711,6 +2711,130 @@
 	return rc;
 }
 
+static int _sde_crtc_find_plane_fb_modes(struct drm_crtc_state *state,
+		uint32_t *fb_ns,
+		uint32_t *fb_sec,
+		uint32_t *fb_ns_dir,
+		uint32_t *fb_sec_dir)
+{
+	struct drm_plane *plane;
+	const struct drm_plane_state *pstate;
+	struct sde_plane_state *sde_pstate;
+	uint32_t mode = 0;
+	int rc;
+
+	if (!state) {
+		SDE_ERROR("invalid state\n");
+		return -EINVAL;
+	}
+
+	*fb_ns = 0;
+	*fb_sec = 0;
+	*fb_ns_dir = 0;
+	*fb_sec_dir = 0;
+	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+		if (IS_ERR_OR_NULL(pstate)) {
+			rc = PTR_ERR(pstate);
+			SDE_ERROR("crtc%d failed to get plane%d state%d\n",
+					state->crtc->base.id,
+					plane->base.id, rc);
+			return rc;
+		}
+		sde_pstate = to_sde_plane_state(pstate);
+		mode = sde_plane_get_property(sde_pstate,
+				PLANE_PROP_FB_TRANSLATION_MODE);
+		switch (mode) {
+		case SDE_DRM_FB_NON_SEC:
+			(*fb_ns)++;
+			break;
+		case SDE_DRM_FB_SEC:
+			(*fb_sec)++;
+			break;
+		case SDE_DRM_FB_NON_SEC_DIR_TRANS:
+			(*fb_ns_dir)++;
+			break;
+		case SDE_DRM_FB_SEC_DIR_TRANS:
+			(*fb_sec_dir)++;
+			break;
+		default:
+			SDE_ERROR("Error: Plane[%d], fb_trans_mode:%d",
+					plane->base.id,
+					mode);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int _sde_crtc_check_secure_state(struct drm_crtc *crtc,
+		struct drm_crtc_state *state)
+{
+	struct drm_encoder *encoder;
+	struct sde_crtc_state *cstate;
+	uint32_t secure;
+	uint32_t fb_ns = 0, fb_sec = 0, fb_ns_dir = 0, fb_sec_dir = 0;
+	int encoder_cnt = 0;
+	int rc;
+
+	if (!crtc || !state) {
+		SDE_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	cstate = to_sde_crtc_state(state);
+
+	secure = sde_crtc_get_property(cstate,
+			CRTC_PROP_SECURITY_LEVEL);
+
+	rc = _sde_crtc_find_plane_fb_modes(state,
+			&fb_ns,
+			&fb_sec,
+			&fb_ns_dir,
+			&fb_sec_dir);
+	if (rc)
+		return rc;
+
+	/**
+	 * validate planes
+	 * fb_ns_dir is for  secure display use case,
+	 * fb_sec_dir is for secure camera preview use case,
+	 * fb_sec is for secure video playback,
+	 * fb_ns is for normal non secure use cases.
+	 */
+	if (((secure == SDE_DRM_SEC_ONLY) &&
+				(fb_ns || fb_sec || fb_sec_dir)) ||
+			(fb_sec || fb_sec_dir)) {
+		SDE_ERROR(
+			"crtc%d: invalid planes fb_modes Sec:%d, NS:%d, Sec_Dir:%d, NS_Dir%d\n",
+				crtc->base.id,
+				fb_sec, fb_ns, fb_sec_dir,
+				fb_ns_dir);
+		return -EINVAL;
+	}
+
+	/**
+	 * secure_crtc is not allowed in a shared toppolgy
+	 * across different encoders.
+	 */
+	if (fb_ns_dir || fb_sec_dir) {
+		drm_for_each_encoder(encoder, crtc->dev)
+			if (encoder->crtc ==  crtc)
+				encoder_cnt++;
+
+		if (encoder_cnt >
+			MAX_ALLOWED_ENCODER_CNT_PER_SECURE_CRTC) {
+			SDE_ERROR(
+				"crtc%d, invalid virtual encoder crtc%d\n",
+				crtc->base.id,
+				encoder_cnt);
+			return -EINVAL;
+
+		}
+	}
+	SDE_DEBUG("crtc:%d Secure validation successful\n", crtc->base.id);
+	return 0;
+}
+
 static int sde_crtc_atomic_check(struct drm_crtc *crtc,
 		struct drm_crtc_state *state)
 {
@@ -2757,6 +2881,10 @@
 	_sde_crtc_setup_is_ppsplit(state);
 	_sde_crtc_setup_lm_bounds(crtc, state);
 
+	rc = _sde_crtc_check_secure_state(crtc, state);
+	if (rc)
+		return rc;
+
 	 /* get plane state for all drm planes associated with crtc state */
 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
 		if (IS_ERR_OR_NULL(pstate)) {
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 84f9ce1..439aeac 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -492,4 +492,21 @@
 void sde_crtc_get_crtc_roi(struct drm_crtc_state *state,
 		const struct sde_rect **crtc_roi);
 
+/** sde_crt_get_secure_level - retrieve the secure level from the give state
+ *	object, this is used to determine the secure state of the crtc
+ * @crtc : Pointer to drm crtc structure
+ * @usr: Pointer to drm crtc state
+ * return: secure_level
+ */
+static inline int sde_crtc_get_secure_level(struct drm_crtc *crtc,
+		struct drm_crtc_state *state)
+{
+	if (!crtc || !state)
+		return -EINVAL;
+
+	return sde_crtc_get_property(to_sde_crtc_state(state),
+			CRTC_PROP_SECURITY_LEVEL);
+}
+
+
 #endif /* _SDE_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index b91bb87..7170d55 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -315,6 +315,7 @@
  * @pp_timeout_report_cnt: number of pingpong done irq timeout errors
  * @pending_rd_ptr_cnt: atomic counter to indicate if retire fence can be
  *                      signaled at the next rd_ptr_irq
+ * @rd_ptr_timestamp: last rd_ptr_irq timestamp
  * @autorefresh: autorefresh feature state
  */
 struct sde_encoder_phys_cmd {
@@ -324,6 +325,7 @@
 	int pp_timeout_report_cnt;
 	struct sde_encoder_phys_cmd_autorefresh autorefresh;
 	atomic_t pending_rd_ptr_cnt;
+	ktime_t rd_ptr_timestamp;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index a616e81..ad00a7f 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -47,6 +47,12 @@
 
 #define SDE_ENC_WR_PTR_START_TIMEOUT_US 20000
 
+/*
+ * Threshold for signalling retire fences in cases where
+ * CTL_START_IRQ is received just after RD_PTR_IRQ
+ */
+#define SDE_ENC_CTL_START_THRESHOLD_US 500
+
 static inline int _sde_encoder_phys_cmd_get_idle_timeout(
 		struct sde_encoder_phys_cmd *cmd_enc)
 {
@@ -212,7 +218,7 @@
 {
 	struct sde_encoder_phys *phys_enc = arg;
 	struct sde_encoder_phys_cmd *cmd_enc;
-	bool signal_fence = false;
+	u32 event = 0;
 
 	if (!phys_enc || !phys_enc->hw_pp)
 		return;
@@ -221,48 +227,29 @@
 	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
 
 	/**
-	 * signal only for master,
-	 * - when the ctl_start irq is done and incremented
-	 *   the pending_rd_ptr_cnt.
-	 * - when ctl_start irq status bit is set. This handles the case
-	 *   where ctl_start status bit is set in hardware, but the interrupt
-	 *   is delayed due to some reason.
+	 * signal only for master, when the ctl_start irq is
+	 * done and incremented the pending_rd_ptr_cnt.
 	 */
-	if (sde_encoder_phys_cmd_is_master(phys_enc) &&
-			atomic_read(&phys_enc->pending_retire_fence_cnt)) {
+	if (sde_encoder_phys_cmd_is_master(phys_enc)
+		    && atomic_add_unless(&cmd_enc->pending_rd_ptr_cnt, -1, 0)
+		    && atomic_add_unless(
+				&phys_enc->pending_retire_fence_cnt, -1, 0)) {
 
-		if (atomic_add_unless(
-				&cmd_enc->pending_rd_ptr_cnt, -1, 0)) {
-			signal_fence = true;
-		} else {
-			signal_fence =
-				sde_core_irq_read_nolock(phys_enc->sde_kms,
-				    phys_enc->irq[INTR_IDX_CTL_START].irq_idx,
-				    false);
-			if (signal_fence)
-				SDE_EVT32_IRQ(DRMID(phys_enc->parent),
-				    phys_enc->hw_pp->idx - PINGPONG_0,
-				    atomic_read(
-					&phys_enc->pending_retire_fence_cnt),
-				    SDE_EVTLOG_FUNC_CASE1);
-		}
-
-		if (signal_fence && phys_enc->parent_ops.handle_frame_done) {
-			atomic_add_unless(
-				&phys_enc->pending_retire_fence_cnt, -1, 0);
+		event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
+		if (phys_enc->parent_ops.handle_frame_done)
 			phys_enc->parent_ops.handle_frame_done(
-				phys_enc->parent, phys_enc,
-				SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
-		}
+				phys_enc->parent, phys_enc, event);
 	}
 
 	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
-			phys_enc->hw_pp->idx - PINGPONG_0, signal_fence, 0xfff);
+			phys_enc->hw_pp->idx - PINGPONG_0, event, 0xfff);
 
 	if (phys_enc->parent_ops.handle_vblank_virt)
 		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
 			phys_enc);
 
+	cmd_enc->rd_ptr_timestamp = ktime_get();
+
 	SDE_ATRACE_END("rd_ptr_irq");
 }
 
@@ -271,6 +258,8 @@
 	struct sde_encoder_phys *phys_enc = arg;
 	struct sde_encoder_phys_cmd *cmd_enc;
 	struct sde_hw_ctl *ctl;
+	u32 event = 0;
+	s64 time_diff_us;
 
 	if (!phys_enc || !phys_enc->hw_ctl)
 		return;
@@ -279,16 +268,41 @@
 	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
 
 	ctl = phys_enc->hw_ctl;
-	SDE_EVT32_IRQ(DRMID(phys_enc->parent), ctl->idx - CTL_0, 0xfff);
 	atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
 
-	/*
-	 * this is required for the fence signalling to be done in rd_ptr_irq
-	 * after ctrl_start_irq
-	 */
+	time_diff_us = ktime_us_delta(ktime_get(), cmd_enc->rd_ptr_timestamp);
+
+	/* handle retire fence based on only master */
 	if (sde_encoder_phys_cmd_is_master(phys_enc)
-			&& atomic_read(&phys_enc->pending_retire_fence_cnt))
-		atomic_inc(&cmd_enc->pending_rd_ptr_cnt);
+			&& atomic_read(&phys_enc->pending_retire_fence_cnt)) {
+		/**
+		 * Handle rare cases where the ctl_start_irq is received
+		 * after rd_ptr_irq. If it falls within a threshold, it is
+		 * guaranteed the frame would be picked up in the current TE.
+		 * Signal retire fence immediately in such case.
+		 */
+		if ((time_diff_us <= SDE_ENC_CTL_START_THRESHOLD_US)
+			    && atomic_add_unless(
+				&phys_enc->pending_retire_fence_cnt, -1, 0)) {
+
+			event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
+
+			if (phys_enc->parent_ops.handle_frame_done)
+				phys_enc->parent_ops.handle_frame_done(
+					phys_enc->parent, phys_enc, event);
+
+		/**
+		 * In ideal cases, ctl_start_irq is received before the
+		 * rd_ptr_irq, so set the atomic flag to indicate the event
+		 * and rd_ptr_irq will handle signalling the retire fence
+		 */
+		} else {
+			atomic_inc(&cmd_enc->pending_rd_ptr_cnt);
+		}
+	}
+
+	SDE_EVT32_IRQ(DRMID(phys_enc->parent), ctl->idx - CTL_0,
+				time_diff_us, event, 0xfff);
 
 	/* Signal any waiting ctl start interrupt */
 	wake_up_all(&phys_enc->pending_kickoff_wq);
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index c95fb47..2b736e5 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -918,10 +918,12 @@
  * @pixel_format:	DRM pixel format
  * @width:		Desired fb width
  * @height:		Desired fb height
+ * @pitch:		Desired fb pitch
  */
 static int _sde_encoder_phys_wb_init_internal_fb(
 		struct sde_encoder_phys_wb *wb_enc,
-		uint32_t pixel_format, uint32_t width, uint32_t height)
+		uint32_t pixel_format, uint32_t width,
+		uint32_t height, uint32_t pitch)
 {
 	struct drm_device *dev;
 	struct drm_framebuffer *fb;
@@ -951,9 +953,11 @@
 	mode_cmd.pixel_format = pixel_format;
 	mode_cmd.width = width;
 	mode_cmd.height = height;
+	mode_cmd.pitches[0] = pitch;
 
 	size = sde_format_get_framebuffer_size(pixel_format,
-			mode_cmd.width, mode_cmd.height, 0, 0);
+			mode_cmd.width, mode_cmd.height,
+			mode_cmd.pitches, NULL, 0);
 	if (!size) {
 		SDE_DEBUG("not creating zero size buffer\n");
 		return -EINVAL;
@@ -1314,7 +1318,7 @@
 
 	/* create internal buffer for disable logic */
 	if (_sde_encoder_phys_wb_init_internal_fb(wb_enc,
-				DRM_FORMAT_RGB888, 2, 1)) {
+				DRM_FORMAT_RGB888, 2, 1, 6)) {
 		SDE_ERROR("failed to init internal fb\n");
 		goto fail_wb_init;
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index 04c9e79..3acf4c9 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -701,7 +701,8 @@
 		const struct sde_format *fmt,
 		const uint32_t width,
 		const uint32_t height,
-		struct sde_hw_fmt_layout *layout)
+		struct sde_hw_fmt_layout *layout,
+		const uint32_t *pitches)
 {
 	int i;
 
@@ -751,6 +752,17 @@
 		}
 	}
 
+	/*
+	 * linear format: allow user allocated pitches if they are greater than
+	 * the requirement.
+	 * ubwc format: pitch values are computed uniformly across
+	 * all the components based on ubwc specifications.
+	 */
+	for (i = 0; i < layout->num_planes && i < SDE_MAX_PLANES; ++i) {
+		if (pitches && layout->plane_pitch[i] < pitches[i])
+			layout->plane_pitch[i] = pitches[i];
+	}
+
 	for (i = 0; i < SDE_MAX_PLANES; i++)
 		layout->total_size += layout->plane_size[i];
 
@@ -761,7 +773,8 @@
 		const struct sde_format *fmt,
 		const uint32_t w,
 		const uint32_t h,
-		struct sde_hw_fmt_layout *layout)
+		struct sde_hw_fmt_layout *layout,
+		const uint32_t *pitches)
 {
 	if (!layout || !fmt) {
 		DRM_ERROR("invalid pointer\n");
@@ -776,7 +789,7 @@
 	if (SDE_FORMAT_IS_UBWC(fmt) || SDE_FORMAT_IS_TILE(fmt))
 		return _sde_format_get_plane_sizes_ubwc(fmt, w, h, layout);
 
-	return _sde_format_get_plane_sizes_linear(fmt, w, h, layout);
+	return _sde_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
 }
 
 int sde_format_get_block_size(const struct sde_format *fmt,
@@ -801,6 +814,7 @@
 		const uint32_t format,
 		const uint32_t width,
 		const uint32_t height,
+		const uint32_t *pitches,
 		const uint64_t *modifiers,
 		const uint32_t modifiers_len)
 {
@@ -811,7 +825,10 @@
 	if (!fmt)
 		return 0;
 
-	if (sde_format_get_plane_sizes(fmt, width, height, &layout))
+	if (!pitches)
+		return -EINVAL;
+
+	if (sde_format_get_plane_sizes(fmt, width, height, &layout, pitches))
 		layout.total_size = 0;
 
 	return layout.total_size;
@@ -917,7 +934,7 @@
 
 	/* Can now check the pitches given vs pitches expected */
 	for (i = 0; i < layout->num_planes; ++i) {
-		if (layout->plane_pitch[i] != fb->pitches[i]) {
+		if (layout->plane_pitch[i] > fb->pitches[i]) {
 			DRM_ERROR("plane %u expected pitch %u, fb %u\n",
 				i, layout->plane_pitch[i], fb->pitches[i]);
 			return -EINVAL;
@@ -959,7 +976,7 @@
 
 	/* Populate the plane sizes etc via get_format */
 	ret = sde_format_get_plane_sizes(layout->format, fb->width, fb->height,
-			layout);
+			layout, fb->pitches);
 	if (ret)
 		return ret;
 
@@ -1063,7 +1080,7 @@
 	num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format);
 
 	ret = sde_format_get_plane_sizes(fmt, cmd->width, cmd->height,
-			&layout);
+			&layout, cmd->pitches);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.h b/drivers/gpu/drm/msm/sde/sde_formats.h
index 2333a72..58065ab 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.h
+++ b/drivers/gpu/drm/msm/sde/sde_formats.h
@@ -64,6 +64,8 @@
  * @w:               width of the buffer
  * @h:               height of the buffer
  * @layout:          layout of the buffer
+ * @pitches:         array of size [SDE_MAX_PLANES] to populate
+ *		     pitch for each plane
  *
  * Return: size of the buffer
  */
@@ -71,7 +73,8 @@
 		const struct sde_format *fmt,
 		const uint32_t w,
 		const uint32_t h,
-		struct sde_hw_fmt_layout *layout);
+		struct sde_hw_fmt_layout *layout,
+		const uint32_t *pitches);
 
 /**
  * sde_format_get_block_size - get block size of given format when
@@ -137,6 +140,8 @@
  * @format:            DRM pixel format
  * @width:             pixel width
  * @height:            pixel height
+ * @pitches:           array of size [SDE_MAX_PLANES] to populate
+ *		       pitch for each plane
  * @modifiers:         array to populate with drm modifiers, can be NULL
  * @modifiers_len:     length of modifers array
  *
@@ -146,6 +151,7 @@
 		const uint32_t format,
 		const uint32_t width,
 		const uint32_t height,
+		const uint32_t *pitches,
 		const uint64_t *modifiers,
 		const uint32_t modifiers_len);
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index 85af820..a502008 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -316,6 +316,7 @@
 	u32 chroma_samp, unpack, src_format;
 	u32 secure = 0, secure_bit_mask;
 	u32 opmode = 0;
+	u32 fast_clear = 0;
 	u32 op_mode_off, unpack_pat_off, format_off;
 	u32 idx;
 
@@ -385,10 +386,12 @@
 		SDE_REG_WRITE(c, SSPP_FETCH_CONFIG,
 			SDE_FETCH_CONFIG_RESET_VALUE |
 			ctx->mdp->highest_bank_bit << 18);
-		if (IS_UBWC_20_SUPPORTED(ctx->catalog->ubwc_version))
+		if (IS_UBWC_20_SUPPORTED(ctx->catalog->ubwc_version)) {
+			fast_clear = fmt->alpha_enable ? BIT(31) : 0;
 			SDE_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
-					BIT(31) | (ctx->mdp->ubwc_swizzle) |
+					fast_clear | (ctx->mdp->ubwc_swizzle) |
 					(ctx->mdp->highest_bank_bit << 4));
+		}
 	}
 
 	opmode |= MDSS_MDP_OP_PE_OVERRIDE;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 2c19ca7..94bbc99 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -1135,7 +1135,7 @@
 	}
 
 	ret = sde_format_get_plane_sizes(fbo->fmt, fbo->width, fbo->height,
-			&fbo->layout);
+			&fbo->layout, fbo->layout.plane_pitch);
 	if (ret) {
 		SDE_ERROR("failed to get plane sizes\n");
 		goto done;
@@ -1341,6 +1341,70 @@
 	sde_reg_dma_deinit();
 }
 
+int sde_kms_mmu_detach(struct sde_kms *sde_kms, bool secure_only)
+{
+	int i;
+
+	if (!sde_kms)
+		return -EINVAL;
+
+	for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
+		struct msm_mmu *mmu;
+		struct msm_gem_address_space *aspace = sde_kms->aspace[i];
+
+		if (!aspace)
+			continue;
+
+		mmu = sde_kms->aspace[i]->mmu;
+
+		if (secure_only &&
+			!aspace->mmu->funcs->is_domain_secure(mmu))
+			continue;
+
+		/* cleanup aspace before detaching */
+		msm_gem_aspace_domain_attach_detach_update(aspace, true);
+
+		SDE_DEBUG("Detaching domain:%d\n", i);
+		aspace->mmu->funcs->detach(mmu, (const char **)iommu_ports,
+			ARRAY_SIZE(iommu_ports));
+
+		aspace->domain_attached = false;
+	}
+
+	return 0;
+}
+
+int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only)
+{
+	int i;
+
+	if (!sde_kms)
+		return -EINVAL;
+
+	for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
+		struct msm_mmu *mmu;
+		struct msm_gem_address_space *aspace = sde_kms->aspace[i];
+
+		if (!aspace)
+			continue;
+
+		mmu = sde_kms->aspace[i]->mmu;
+
+		if (secure_only &&
+			!aspace->mmu->funcs->is_domain_secure(mmu))
+			continue;
+
+		SDE_DEBUG("Attaching domain:%d\n", i);
+		aspace->mmu->funcs->attach(mmu, (const char **)iommu_ports,
+			ARRAY_SIZE(iommu_ports));
+
+		msm_gem_aspace_domain_attach_detach_update(aspace, false);
+		aspace->domain_attached = true;
+	}
+
+	return 0;
+}
+
 static void sde_kms_destroy(struct msm_kms *kms)
 {
 	struct sde_kms *sde_kms;
@@ -1373,6 +1437,103 @@
 		sde_crtc_cancel_pending_flip(priv->crtcs[i], file);
 }
 
+static int sde_kms_check_secure_transition(struct msm_kms *kms,
+		struct drm_atomic_state *state)
+{
+	struct sde_kms *sde_kms;
+	struct drm_device *dev;
+	struct drm_crtc *crtc;
+	struct drm_crtc *sec_crtc = NULL, *temp_crtc = NULL;
+	struct drm_crtc_state *crtc_state;
+	int secure_crtc_cnt = 0, active_crtc_cnt = 0;
+	int secure_global_crtc_cnt = 0, active_mode_crtc_cnt = 0;
+	int i;
+
+	if (!kms || !state) {
+		return -EINVAL;
+		SDE_ERROR("invalid arguments\n");
+	}
+
+	/* iterate state object for active and secure crtc */
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		if (!crtc_state->active)
+			continue;
+		active_crtc_cnt++;
+		if (sde_crtc_get_secure_level(crtc, crtc_state) ==
+				SDE_DRM_SEC_ONLY) {
+			sec_crtc = crtc;
+			secure_crtc_cnt++;
+		}
+	}
+
+	/* bail out from further validation if no secure ctrc */
+	if (!secure_crtc_cnt)
+		return 0;
+
+	if ((secure_crtc_cnt > MAX_ALLOWED_SECURE_CLIENT_CNT) ||
+		(secure_crtc_cnt &&
+		 (active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE))) {
+		SDE_ERROR("Secure check failed active:%d, secure:%d\n",
+				active_crtc_cnt, secure_crtc_cnt);
+		return -EPERM;
+	}
+
+	sde_kms = to_sde_kms(kms);
+	dev = sde_kms->dev;
+	/* iterate global list for active and secure crtc */
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+
+		if (!crtc->state->active)
+			continue;
+
+		active_mode_crtc_cnt++;
+
+		if (sde_crtc_get_secure_level(crtc, crtc->state) ==
+				SDE_DRM_SEC_ONLY) {
+			secure_global_crtc_cnt++;
+			temp_crtc = crtc;
+		}
+	}
+
+	/**
+	 * if more than one crtc is active fail
+	 * check if the previous and current commit secure
+	 * are same
+	 */
+	if (secure_crtc_cnt && ((active_mode_crtc_cnt > 1) ||
+			(secure_global_crtc_cnt && (temp_crtc != sec_crtc))))
+		SDE_ERROR("Secure check failed active:%d crtc_id:%d\n",
+				active_mode_crtc_cnt, temp_crtc->base.id);
+
+	return 0;
+}
+
+static int sde_kms_atomic_check(struct msm_kms *kms,
+		struct drm_atomic_state *state)
+{
+	struct sde_kms *sde_kms;
+	struct drm_device *dev;
+	int ret;
+
+	if (!kms || !state)
+		return -EINVAL;
+
+	sde_kms = to_sde_kms(kms);
+	dev = sde_kms->dev;
+
+	ret = drm_atomic_helper_check(dev, state);
+	if (ret)
+		return ret;
+	/*
+	 * Check if any secure transition(moving CRTC between secure and
+	 * non-secure state and vice-versa) is allowed or not. when moving
+	 * to secure state, planes with fb_mode set to dir_translated only can
+	 * be staged on the CRTC, and only one CRTC can be active during
+	 * Secure state
+	 */
+	return sde_kms_check_secure_transition(kms, state);
+}
+
 static struct msm_gem_address_space*
 _sde_kms_get_address_space(struct msm_kms *kms,
 		unsigned int domain)
@@ -1393,7 +1554,9 @@
 	if (domain >= MSM_SMMU_DOMAIN_MAX)
 		return NULL;
 
-	return sde_kms->aspace[domain];
+	return (sde_kms->aspace[domain] &&
+			sde_kms->aspace[domain]->domain_attached) ?
+		sde_kms->aspace[domain] : NULL;
 }
 
 static const struct msm_kms_funcs kms_funcs = {
@@ -1413,6 +1576,7 @@
 	.enable_vblank   = sde_kms_enable_vblank,
 	.disable_vblank  = sde_kms_disable_vblank,
 	.check_modified_format = sde_format_check_modified_format,
+	.atomic_check = sde_kms_atomic_check,
 	.get_format      = sde_get_msm_format,
 	.round_pixclk    = sde_kms_round_pixclk,
 	.destroy         = sde_kms_destroy,
@@ -1463,7 +1627,7 @@
 			continue;
 		}
 
-		aspace = msm_gem_smmu_address_space_create(sde_kms->dev->dev,
+		aspace = msm_gem_smmu_address_space_create(sde_kms->dev,
 			mmu, "sde");
 		if (IS_ERR(aspace)) {
 			ret = PTR_ERR(aspace);
@@ -1480,7 +1644,7 @@
 			msm_gem_address_space_destroy(aspace);
 			goto fail;
 		}
-
+		aspace->domain_attached = true;
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index d818fdf..4c0699e 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -93,6 +93,15 @@
 /* timeout in frames waiting for frame done */
 #define SDE_FRAME_DONE_TIMEOUT	60
 
+/* max active secure client counts allowed */
+#define MAX_ALLOWED_SECURE_CLIENT_CNT	1
+
+/* max active crtc when secure client is active */
+#define MAX_ALLOWED_CRTC_CNT_DURING_SECURE	1
+
+/* max virtual encoders per secure crtc */
+#define MAX_ALLOWED_ENCODER_CNT_PER_SECURE_CRTC	1
+
 /*
  * struct sde_irq_callback - IRQ callback handlers
  * @list: list to callback
@@ -500,4 +509,13 @@
  */
 void sde_kms_fbo_unreference(struct sde_kms_fbo *fbo);
 
+/**
+ * smmu attach/detach functions
+ * @sde_kms: poiner to sde_kms structure
+ * @secure_only: if true only secure contexts are attached/detached, else
+ * all contexts are attached/detached/
+ */
+int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only);
+int sde_kms_mmu_detach(struct sde_kms *sde_kms, bool secure_only);
+
 #endif /* __sde_kms_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index e6f712c..5a014bc7 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -258,16 +258,16 @@
 				((src_width + 32) * fmt->bpp);
 		} else {
 			/* non NV12 */
-			total_fl = (fixed_buff_size / 2 - hflip_bytes) /
-				((src_width + 32) * fmt->bpp * 2);
+			total_fl = (fixed_buff_size / 2 - hflip_bytes) * 2 /
+				((src_width + 32) * fmt->bpp);
 		}
 	} else {
 		if (pstate->multirect_mode == SDE_SSPP_MULTIRECT_PARALLEL) {
-			total_fl = (fixed_buff_size / 2 - hflip_bytes) /
-				((src_width + 32) * fmt->bpp * 2);
+			total_fl = (fixed_buff_size / 2 - hflip_bytes) * 2 /
+				((src_width + 32) * fmt->bpp);
 		} else {
-			total_fl = (fixed_buff_size - hflip_bytes) /
-				((src_width + 32) * fmt->bpp * 2);
+			total_fl = (fixed_buff_size - hflip_bytes) * 2 /
+				((src_width + 32) * fmt->bpp);
 		}
 	}
 
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index 58448ca..58069f2 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -56,6 +56,8 @@
 #define MMSS_VBIF_TEST_BUS_OUT		0x230
 
 /* Vbif error info */
+#define MMSS_VBIF_PND_ERR		0x190
+#define MMSS_VBIF_SRC_ERR		0x194
 #define MMSS_VBIF_XIN_HALT_CTRL1	0x204
 #define MMSS_VBIF_ERR_INFO		0X1a0
 #define MMSS_VBIF_ERR_INFO_1		0x1a4
@@ -2373,7 +2375,7 @@
 	u32 **dump_mem = NULL;
 	u32 *dump_addr = NULL;
 	u32 value, d0, d1;
-	unsigned long reg;
+	unsigned long reg, reg1, reg2;
 	struct vbif_debug_bus_entry *head;
 	phys_addr_t phys = 0;
 	int i, list_size = 0;
@@ -2447,13 +2449,18 @@
 	wmb();
 
 	/**
-	 * Extract VBIF error info based on XIN halt status.
-	 * If the XIN client is not in HALT state, then retrieve the
-	 * VBIF error info for it.
+	 * Extract VBIF error info based on XIN halt and error status.
+	 * If the XIN client is not in HALT state, or an error is detected,
+	 * then retrieve the VBIF error info for it.
 	 */
 	reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
-	dev_err(sde_dbg_base.dev, "XIN HALT:0x%lX\n", reg);
+	reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
+	reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
+	dev_err(sde_dbg_base.dev,
+			"XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
+			reg, reg1, reg2);
 	reg >>= 16;
+	reg &= ~(reg1 | reg2);
 	for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
 		if (!test_bit(0, &reg)) {
 			writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index 78c325d..9cbffa5 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -16,7 +16,7 @@
 
 #define MAX_CLIENT_NAME_LEN 128
 
-#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	1600000000
+#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	0
 #define SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA	0
 #define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	1600000000
 #define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index 7865b14..aa8fa01 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -676,7 +676,7 @@
 		break;
 
 	case VSYNC_ENABLE:
-		reg = BIT(8) | ((mode & 0x7) < 10);
+		reg = BIT(8) | ((mode & 0x7) << 10);
 		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_DEBUG_BUS,
 					reg, rsc->debug_mode);
 		break;
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index c8f2702e..159512c 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -211,6 +211,7 @@
 	if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
 		spin_lock(&drvdata->spinlock);
 		stm_disable_hw(drvdata);
+		drvdata->enable = false;
 		spin_unlock(&drvdata->spinlock);
 
 		/* Wait until the engine has completely stopped */
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 202d867..85fe87f 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -527,11 +527,13 @@
 		goto out;
 	}
 
-	/* There is no point in reading a TMC in HW FIFO mode */
-	mode = readl_relaxed(drvdata->base + TMC_MODE);
-	if (mode != TMC_MODE_CIRCULAR_BUFFER) {
-		ret = -EINVAL;
-		goto out;
+	if (drvdata->enable) {
+		/* There is no point in reading a TMC in HW FIFO mode */
+		mode = readl_relaxed(drvdata->base + TMC_MODE);
+		if (mode != TMC_MODE_CIRCULAR_BUFFER) {
+			ret = -EINVAL;
+			goto out;
+		}
 	}
 
 	val = local_read(&drvdata->mode);
@@ -571,11 +573,13 @@
 
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 
-	/* There is no point in reading a TMC in HW FIFO mode */
-	mode = readl_relaxed(drvdata->base + TMC_MODE);
-	if (mode != TMC_MODE_CIRCULAR_BUFFER) {
-		spin_unlock_irqrestore(&drvdata->spinlock, flags);
-		return -EINVAL;
+	if (drvdata->enable) {
+		/* There is no point in reading a TMC in HW FIFO mode */
+		mode = readl_relaxed(drvdata->base + TMC_MODE);
+		if (mode != TMC_MODE_CIRCULAR_BUFFER) {
+			spin_unlock_irqrestore(&drvdata->spinlock, flags);
+			return -EINVAL;
+		}
 	}
 
 	/* Re-enable the TMC if need be */
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 2678a00..5bd52e4 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -158,6 +158,11 @@
 #define	FLASH_LED_DISABLE			0x00
 #define	FLASH_LED_SAFETY_TMR_DISABLED		0x13
 #define	FLASH_LED_MAX_TOTAL_CURRENT_MA		3750
+#define	FLASH_LED_IRES5P0_MAX_CURR_MA		640
+#define	FLASH_LED_IRES7P5_MAX_CURR_MA		960
+#define	FLASH_LED_IRES10P0_MAX_CURR_MA		1280
+#define	FLASH_LED_IRES12P5_MAX_CURR_MA		1600
+#define	MAX_IRES_LEVELS				4
 
 /* notifier call chain for flash-led irqs */
 static ATOMIC_NOTIFIER_HEAD(irq_notifier_list);
@@ -196,13 +201,15 @@
 	struct pinctrl_state		*hw_strobe_state_suspend;
 	int				hw_strobe_gpio;
 	int				ires_ua;
+	int				default_ires_ua;
 	int				max_current;
 	int				current_ma;
 	int				prev_current_ma;
 	u8				duration;
 	u8				id;
 	u8				type;
-	u8				ires;
+	u8				ires_idx;
+	u8				default_ires_idx;
 	u8				hdrm_val;
 	u8				current_reg_val;
 	u8				strobe_ctrl;
@@ -305,6 +312,11 @@
 	125, 119, 113, 107, 149, 143, 137, 131,
 };
 
+static int max_ires_curr_ma_table[MAX_IRES_LEVELS] = {
+	FLASH_LED_IRES12P5_MAX_CURR_MA, FLASH_LED_IRES10P0_MAX_CURR_MA,
+	FLASH_LED_IRES7P5_MAX_CURR_MA, FLASH_LED_IRES5P0_MAX_CURR_MA
+};
+
 static int qpnp_flash_led_read(struct qpnp_flash_led *led, u16 addr, u8 *data)
 {
 	int rc;
@@ -935,6 +947,7 @@
 
 static void qpnp_flash_led_node_set(struct flash_node_data *fnode, int value)
 {
+	int i = 0;
 	int prgm_current_ma = value;
 	int min_ma = fnode->ires_ua / 1000;
 	struct qpnp_flash_led *led = dev_get_drvdata(&fnode->pdev->dev);
@@ -944,7 +957,22 @@
 	else if (value < min_ma)
 		prgm_current_ma = min_ma;
 
+	fnode->ires_idx = fnode->default_ires_idx;
+	fnode->ires_ua = fnode->default_ires_ua;
+
 	prgm_current_ma = min(prgm_current_ma, fnode->max_current);
+	if (prgm_current_ma > max_ires_curr_ma_table[fnode->ires_idx]) {
+		/* find the matching ires */
+		for (i = MAX_IRES_LEVELS - 1; i >= 0; i--) {
+			if (prgm_current_ma <= max_ires_curr_ma_table[i]) {
+				fnode->ires_idx = i;
+				fnode->ires_ua = FLASH_LED_IRES_MIN_UA +
+				      (FLASH_LED_IRES_BASE - fnode->ires_idx) *
+				      FLASH_LED_IRES_DIVISOR;
+				break;
+			}
+		}
+	}
 	fnode->current_ma = prgm_current_ma;
 	fnode->cdev.brightness = prgm_current_ma;
 	fnode->current_reg_val = CURRENT_MA_TO_REG_VAL(prgm_current_ma,
@@ -1062,7 +1090,7 @@
 	val = 0;
 	for (i = 0; i < led->num_fnodes; i++)
 		if (snode->led_mask & BIT(led->fnode[i].id))
-			val |= led->fnode[i].ires << (led->fnode[i].id * 2);
+			val |= led->fnode[i].ires_idx << (led->fnode[i].id * 2);
 
 	rc = qpnp_flash_led_masked_write(led, FLASH_LED_REG_IRES(led->base),
 						FLASH_LED_CURRENT_MASK, val);
@@ -1434,13 +1462,14 @@
 		return rc;
 	}
 
-	fnode->ires_ua = FLASH_LED_IRES_DEFAULT_UA;
-	fnode->ires = FLASH_LED_IRES_DEFAULT_VAL;
+	fnode->default_ires_ua = fnode->ires_ua = FLASH_LED_IRES_DEFAULT_UA;
+	fnode->default_ires_idx = fnode->ires_idx = FLASH_LED_IRES_DEFAULT_VAL;
 	rc = of_property_read_u32(node, "qcom,ires-ua", &val);
 	if (!rc) {
-		fnode->ires_ua = val;
-		fnode->ires = FLASH_LED_IRES_BASE -
-			(val - FLASH_LED_IRES_MIN_UA) / FLASH_LED_IRES_DIVISOR;
+		fnode->default_ires_ua = fnode->ires_ua = val;
+		fnode->default_ires_idx = fnode->ires_idx =
+			FLASH_LED_IRES_BASE - (val - FLASH_LED_IRES_MIN_UA) /
+			FLASH_LED_IRES_DIVISOR;
 	} else if (rc != -EINVAL) {
 		pr_err("Unable to read current resolution rc=%d\n", rc);
 		return rc;
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
index 99bd263..800c9ea 100644
--- a/drivers/media/platform/msm/camera/Makefile
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -8,3 +8,4 @@
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_module/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg/
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
index 048fe8f..03f6e0c 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
@@ -32,7 +32,7 @@
 #define CAM_MAX_SW_CDM_VERSION_SUPPORTED  1
 #define CAM_SW_CDM_INDEX                  0
 #define CAM_CDM_INFLIGHT_WORKS            5
-#define CAM_CDM_HW_RESET_TIMEOUT          3000
+#define CAM_CDM_HW_RESET_TIMEOUT          300
 
 #define CAM_CDM_HW_ID_MASK      0xF
 #define CAM_CDM_HW_ID_SHIFT     0x5
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
index a63031b..c8b830f 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
@@ -243,7 +243,7 @@
 		*dst++ = *src++;
 	}
 
-	return pCmdBuffer;
+	return dst;
 }
 
 uint32_t *cdm_write_dmi(uint32_t *pCmdBuffer, uint8_t dmiCmd,
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
index fac8900..8f625ae 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -13,6 +13,7 @@
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 #include "cam_context.h"
+#include "cam_debug_util.h"
 
 static int cam_context_handle_hw_event(void *context, uint32_t evt_id,
 	void *evt_data)
@@ -21,7 +22,7 @@
 	struct cam_context *ctx = (struct cam_context *)context;
 
 	if (!ctx || !ctx->state_machine) {
-		pr_err("%s: Context is not ready.\n", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
@@ -29,8 +30,9 @@
 		rc = ctx->state_machine[ctx->state].irq_ops(ctx, evt_id,
 			evt_data);
 	else
-		pr_debug("%s: No function to handle event %d in dev %d, state %d\n",
-				__func__, evt_id, ctx->dev_hdl, ctx->state);
+		CAM_DBG(CAM_CORE,
+			"No function to handle event %d in dev %d, state %d",
+			evt_id, ctx->dev_hdl, ctx->state);
 	return rc;
 }
 
@@ -40,12 +42,12 @@
 	int rc;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: Context is not ready.\n'", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
 	if (!info) {
-		pr_err("%s: Invalid get device info payload.\n", __func__);
+		CAM_ERR(CAM_CORE, "Invalid get device info payload");
 		return -EINVAL;
 	}
 
@@ -54,8 +56,8 @@
 		rc = ctx->state_machine[ctx->state].crm_ops.get_dev_info(
 			ctx, info);
 	} else {
-		pr_err("%s: No get device info in dev %d, state %d\n",
-			__func__, ctx->dev_hdl, ctx->state);
+		CAM_ERR(CAM_CORE, "No get device info in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
 		rc = -EPROTO;
 	}
 	mutex_unlock(&ctx->ctx_mutex);
@@ -69,12 +71,12 @@
 	int rc;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: Context is not ready.\n", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
 	if (!link) {
-		pr_err("%s: Invalid link payload.\n", __func__);
+		CAM_ERR(CAM_CORE, "Invalid link payload");
 		return -EINVAL;
 	}
 
@@ -82,7 +84,7 @@
 	if (ctx->state_machine[ctx->state].crm_ops.link) {
 		rc = ctx->state_machine[ctx->state].crm_ops.link(ctx, link);
 	} else {
-		pr_err("%s: No crm link in dev %d, state %d\n", __func__,
+		CAM_ERR(CAM_CORE, "No crm link in dev %d, state %d",
 			ctx->dev_hdl, ctx->state);
 		rc = -EPROTO;
 	}
@@ -97,12 +99,12 @@
 	int rc;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: Context is not ready!\n", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
 	if (!unlink) {
-		pr_err("%s: Invalid unlink payload.\n", __func__);
+		CAM_ERR(CAM_CORE, "Invalid unlink payload");
 		return -EINVAL;
 	}
 
@@ -111,8 +113,8 @@
 		rc = ctx->state_machine[ctx->state].crm_ops.unlink(
 			ctx, unlink);
 	} else {
-		pr_err("%s: No crm unlink in dev %d, state %d\n",
-			__func__, ctx->dev_hdl, ctx->state);
+		CAM_ERR(CAM_CORE, "No crm unlink in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
 		rc = -EPROTO;
 	}
 	mutex_unlock(&ctx->ctx_mutex);
@@ -126,12 +128,12 @@
 	int rc;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: Context is not ready.\n'", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
 	if (!apply) {
-		pr_err("%s: Invalid apply request payload.\n'", __func__);
+		CAM_ERR(CAM_CORE, "Invalid apply request payload");
 		return -EINVAL;
 	}
 
@@ -140,8 +142,8 @@
 		rc = ctx->state_machine[ctx->state].crm_ops.apply_req(ctx,
 			apply);
 	} else {
-		pr_err("%s: No crm apply req in dev %d, state %d\n",
-			__func__, ctx->dev_hdl, ctx->state);
+		CAM_ERR(CAM_CORE, "No crm apply req in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
 		rc = -EPROTO;
 	}
 	mutex_unlock(&ctx->ctx_mutex);
@@ -155,7 +157,7 @@
 	int rc;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: Context is not ready\n", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
@@ -164,8 +166,8 @@
 		rc = ctx->state_machine[ctx->state].crm_ops.flush_req(ctx,
 			flush);
 	} else {
-		pr_err("%s: No crm flush req in dev %d, state %d\n",
-			__func__, ctx->dev_hdl, ctx->state);
+		CAM_ERR(CAM_CORE, "No crm flush req in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
 		rc = -EPROTO;
 	}
 	mutex_unlock(&ctx->ctx_mutex);
@@ -179,13 +181,12 @@
 	int rc;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: Context is not ready.\n", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
 	if (!cmd) {
-		pr_err("%s: Invalid acquire device command payload.\n",
-			__func__);
+		CAM_ERR(CAM_CORE, "Invalid acquire device command payload");
 		return -EINVAL;
 	}
 
@@ -194,8 +195,8 @@
 		rc = ctx->state_machine[ctx->state].ioctl_ops.acquire_dev(
 			ctx, cmd);
 	} else {
-		pr_err("%s: No acquire device in dev %d, state %d\n",
-			__func__, cmd->dev_handle, ctx->state);
+		CAM_ERR(CAM_CORE, "No acquire device in dev %d, state %d",
+			cmd->dev_handle, ctx->state);
 		rc = -EPROTO;
 	}
 	mutex_unlock(&ctx->ctx_mutex);
@@ -209,13 +210,12 @@
 	int rc;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: Context is not ready.\n", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
 	if (!cmd) {
-		pr_err("%s: Invalid release device command payload.\n",
-			__func__);
+		CAM_ERR(CAM_CORE, "Invalid release device command payload");
 		return -EINVAL;
 	}
 
@@ -224,8 +224,8 @@
 		rc = ctx->state_machine[ctx->state].ioctl_ops.release_dev(
 			ctx, cmd);
 	} else {
-		pr_err("%s: No release device in dev %d, state %d\n",
-			__func__, ctx->dev_hdl, ctx->state);
+		CAM_ERR(CAM_CORE, "No release device in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
 		rc = -EPROTO;
 	}
 	mutex_unlock(&ctx->ctx_mutex);
@@ -239,13 +239,12 @@
 	int rc;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: context is not ready\n'", __func__);
+		CAM_ERR(CAM_CORE, "context is not ready");
 		return -EINVAL;
 	}
 
 	if (!cmd) {
-		pr_err("%s: Invalid config device command payload.\n",
-			__func__);
+		CAM_ERR(CAM_CORE, "Invalid config device command payload");
 		return -EINVAL;
 	}
 
@@ -254,8 +253,8 @@
 		rc = ctx->state_machine[ctx->state].ioctl_ops.config_dev(
 			ctx, cmd);
 	} else {
-		pr_err("%s: No config device in dev %d, state %d\n",
-			__func__, ctx->dev_hdl, ctx->state);
+		CAM_ERR(CAM_CORE, "No config device in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
 		rc = -EPROTO;
 	}
 	mutex_unlock(&ctx->ctx_mutex);
@@ -269,13 +268,12 @@
 	int rc = 0;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: Context is not ready.\n", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
 	if (!cmd) {
-		pr_err("%s: Invalid start device command payload.\n",
-			__func__);
+		CAM_ERR(CAM_CORE, "Invalid start device command payload");
 		return -EINVAL;
 	}
 
@@ -285,8 +283,8 @@
 			ctx, cmd);
 	else
 		/* start device can be optional for some driver */
-		pr_debug("%s: No start device in dev %d, state %d\n",
-			__func__, ctx->dev_hdl, ctx->state);
+		CAM_DBG(CAM_CORE, "No start device in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
 
 	mutex_unlock(&ctx->ctx_mutex);
 
@@ -299,13 +297,12 @@
 	int rc = 0;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: Context is not ready.\n'", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
 	if (!cmd) {
-		pr_err("%s: Invalid stop device command payload.\n",
-			__func__);
+		CAM_ERR(CAM_CORE, "Invalid stop device command payload");
 		return -EINVAL;
 	}
 
@@ -315,8 +312,8 @@
 			ctx, cmd);
 	else
 		/* stop device can be optional for some driver */
-		pr_warn("%s: No stop device in dev %d, state %d\n",
-			__func__, ctx->dev_hdl, ctx->state);
+		CAM_WARN(CAM_CORE, "No stop device in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
 	mutex_unlock(&ctx->ctx_mutex);
 
 	return rc;
@@ -332,7 +329,7 @@
 
 	/* crm_node_intf is optinal */
 	if (!ctx || !hw_mgr_intf || !req_list) {
-		pr_err("%s: Invalid input parameters\n", __func__);
+		CAM_ERR(CAM_CORE, "Invalid input parameters");
 		return -EINVAL;
 	}
 
@@ -375,7 +372,7 @@
 	 * so we just free the memory for the context
 	 */
 	if (ctx->state != CAM_CTX_AVAILABLE)
-		pr_err("%s: Device did not shutdown cleanly.\n", __func__);
+		CAM_ERR(CAM_CORE, "Device did not shutdown cleanly");
 
 	memset(ctx, 0, sizeof(*ctx));
 
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index fa26ea0..043f44d 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -16,7 +16,7 @@
 
 #include "cam_node.h"
 #include "cam_trace.h"
-
+#include "cam_debug_util.h"
 static void  __cam_node_handle_shutdown(struct cam_node *node)
 {
 	if (node->hw_mgr_intf.hw_close)
@@ -30,7 +30,7 @@
 	int rc = -EFAULT;
 
 	if (!query) {
-		pr_err("%s: Invalid params\n", __func__);
+		CAM_ERR(CAM_CORE, "Invalid params");
 		return -EINVAL;
 	}
 
@@ -65,7 +65,7 @@
 
 	rc = cam_context_handle_acquire_dev(ctx, acquire);
 	if (rc) {
-		pr_err("%s: Acquire device failed\n", __func__);
+		CAM_ERR(CAM_CORE, "Acquire device failed");
 		goto free_ctx;
 	}
 
@@ -87,19 +87,19 @@
 		return -EINVAL;
 
 	if (start->dev_handle <= 0) {
-		pr_err("Invalid device handle for context\n");
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
 		return -EINVAL;
 	}
 
 	if (start->session_handle <= 0) {
-		pr_err("Invalid session handle for context\n");
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
 		return -EINVAL;
 	}
 
 	ctx = (struct cam_context *)cam_get_device_priv(start->dev_handle);
 	if (!ctx) {
-		pr_err("%s: Can not get context for handle %d\n",
-			__func__, start->dev_handle);
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			start->dev_handle);
 		return -EINVAL;
 	}
 
@@ -115,19 +115,19 @@
 		return -EINVAL;
 
 	if (stop->dev_handle <= 0) {
-		pr_err("Invalid device handle for context\n");
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
 		return -EINVAL;
 	}
 
 	if (stop->session_handle <= 0) {
-		pr_err("Invalid session handle for context\n");
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
 		return -EINVAL;
 	}
 
 	ctx = (struct cam_context *)cam_get_device_priv(stop->dev_handle);
 	if (!ctx) {
-		pr_err("%s: Can not get context for handle %d\n",
-			__func__, stop->dev_handle);
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			stop->dev_handle);
 		return -EINVAL;
 	}
 
@@ -143,19 +143,19 @@
 		return -EINVAL;
 
 	if (config->dev_handle <= 0) {
-		pr_err("Invalid device handle for context\n");
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
 		return -EINVAL;
 	}
 
 	if (config->session_handle <= 0) {
-		pr_err("Invalid session handle for context\n");
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
 		return -EINVAL;
 	}
 
 	ctx = (struct cam_context *)cam_get_device_priv(config->dev_handle);
 	if (!ctx) {
-		pr_err("%s: Can not get context for handle %d\n",
-			__func__, config->dev_handle);
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			config->dev_handle);
 		return -EINVAL;
 	}
 
@@ -172,29 +172,29 @@
 		return -EINVAL;
 
 	if (release->dev_handle <= 0) {
-		pr_err("Invalid device handle for context\n");
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
 		return -EINVAL;
 	}
 
 	if (release->session_handle <= 0) {
-		pr_err("Invalid session handle for context\n");
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
 		return -EINVAL;
 	}
 
 	ctx = (struct cam_context *)cam_get_device_priv(release->dev_handle);
 	if (!ctx) {
-		pr_err("%s: Can not get context for handle %d\n",
-			__func__, release->dev_handle);
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			release->dev_handle);
 		return -EINVAL;
 	}
 
 	rc = cam_context_handle_release_dev(ctx, release);
 	if (rc)
-		pr_err("%s: context release failed\n", __func__);
+		CAM_ERR(CAM_CORE, "context release failed");
 
 	rc = cam_destroy_device_hdl(release->dev_handle);
 	if (rc)
-		pr_err("%s: destroy device handle is failed\n", __func__);
+		CAM_ERR(CAM_CORE, "destroy device handle is failed");
 
 	mutex_lock(&node->list_mutex);
 	list_add_tail(&ctx->list, &node->free_ctx_list);
@@ -211,8 +211,8 @@
 
 	ctx = (struct cam_context *) cam_get_device_priv(info->dev_hdl);
 	if (!ctx) {
-		pr_err("%s: Can not get context  for handle %d\n",
-			__func__, info->dev_hdl);
+		CAM_ERR(CAM_CORE, "Can not get context  for handle %d",
+			info->dev_hdl);
 		return -EINVAL;
 	}
 	return cam_context_handle_crm_get_dev_info(ctx, info);
@@ -229,8 +229,8 @@
 
 	ctx = (struct cam_context *) cam_get_device_priv(setup->dev_hdl);
 	if (!ctx) {
-		pr_err("%s: Can not get context for handle %d\n",
-			__func__, setup->dev_hdl);
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			setup->dev_hdl);
 		return -EINVAL;
 	}
 
@@ -251,8 +251,8 @@
 
 	ctx = (struct cam_context *) cam_get_device_priv(apply->dev_hdl);
 	if (!ctx) {
-		pr_err("%s: Can not get context for handle %d\n",
-			__func__, apply->dev_hdl);
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			apply->dev_hdl);
 		return -EINVAL;
 	}
 
@@ -266,14 +266,14 @@
 	struct cam_context *ctx = NULL;
 
 	if (!flush) {
-		pr_err("%s: Invalid flush request payload\n", __func__);
+		CAM_ERR(CAM_CORE, "Invalid flush request payload");
 		return -EINVAL;
 	}
 
 	ctx = (struct cam_context *) cam_get_device_priv(flush->dev_hdl);
 	if (!ctx) {
-		pr_err("%s: Can not get context for handle %d\n",
-			__func__, flush->dev_hdl);
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			flush->dev_hdl);
 		return -EINVAL;
 	}
 
@@ -285,7 +285,7 @@
 	if (node)
 		memset(node, 0, sizeof(*node));
 
-	pr_debug("%s: deinit complete!\n", __func__);
+	CAM_DBG(CAM_CORE, "deinit complete");
 
 	return 0;
 }
@@ -317,8 +317,8 @@
 	node->ctx_size = ctx_size;
 	for (i = 0; i < ctx_size; i++) {
 		if (!ctx_list[i].state_machine) {
-			pr_err("%s: camera context %d is not initialized!",
-				__func__, i);
+			CAM_ERR(CAM_CORE,
+				"camera context %d is not initialized", i);
 			rc = -1;
 			goto err;
 		}
@@ -328,7 +328,7 @@
 
 	node->state = CAM_NODE_STATE_INIT;
 err:
-	pr_debug("%s: Exit. (rc = %d)\n", __func__, rc);
+	CAM_DBG(CAM_CORE, "Exit. (rc = %d)", rc);
 	return rc;
 }
 
@@ -339,7 +339,7 @@
 	if (!cmd)
 		return -EINVAL;
 
-	pr_debug("%s: handle cmd %d\n", __func__, cmd->op_code);
+	CAM_DBG(CAM_CORE, "handle cmd %d", cmd->op_code);
 
 	switch (cmd->op_code) {
 	case CAM_QUERY_CAP: {
@@ -353,8 +353,8 @@
 
 		rc = __cam_node_handle_query_cap(node, &query);
 		if (rc) {
-			pr_err("%s: querycap is failed(rc = %d)\n",
-				__func__,  rc);
+			CAM_ERR(CAM_CORE, "querycap is failed(rc = %d)",
+				rc);
 			break;
 		}
 
@@ -374,8 +374,8 @@
 		}
 		rc = __cam_node_handle_acquire_dev(node, &acquire);
 		if (rc) {
-			pr_err("%s: acquire device failed(rc = %d)\n",
-				__func__, rc);
+			CAM_ERR(CAM_CORE, "acquire device failed(rc = %d)",
+				rc);
 			break;
 		}
 		if (copy_to_user((void __user *)cmd->handle, &acquire,
@@ -392,8 +392,8 @@
 		else {
 			rc = __cam_node_handle_start_dev(node, &start);
 			if (rc)
-				pr_err("%s: start device failed(rc = %d)\n",
-					__func__, rc);
+				CAM_ERR(CAM_CORE,
+					"start device failed(rc = %d)", rc);
 		}
 		break;
 	}
@@ -406,8 +406,8 @@
 		else {
 			rc = __cam_node_handle_stop_dev(node, &stop);
 			if (rc)
-				pr_err("%s: stop device failed(rc = %d)\n",
-					__func__, rc);
+				CAM_ERR(CAM_CORE,
+					"stop device failed(rc = %d)", rc);
 		}
 		break;
 	}
@@ -420,8 +420,8 @@
 		else {
 			rc = __cam_node_handle_config_dev(node, &config);
 			if (rc)
-				pr_err("%s: config device failed(rc = %d)\n",
-					__func__, rc);
+				CAM_ERR(CAM_CORE,
+					"config device failed(rc = %d)", rc);
 		}
 		break;
 	}
@@ -434,8 +434,8 @@
 		else {
 			rc = __cam_node_handle_release_dev(node, &release);
 			if (rc)
-				pr_err("%s: release device failed(rc = %d)\n",
-					__func__, rc);
+				CAM_ERR(CAM_CORE,
+					"release device failed(rc = %d)", rc);
 		}
 		break;
 	}
@@ -443,7 +443,7 @@
 		__cam_node_handle_shutdown(node);
 		break;
 	default:
-		pr_err("Unknown op code %d\n", cmd->op_code);
+		CAM_ERR(CAM_CORE, "Unknown op code %d", cmd->op_code);
 		rc = -EINVAL;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_subdev.c b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
index 8664ce8..d690508 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
@@ -12,6 +12,7 @@
 
 #include "cam_subdev.h"
 #include "cam_node.h"
+#include "cam_debug_util.h"
 
 /**
  * cam_subdev_subscribe_event()
@@ -63,7 +64,7 @@
 			(struct cam_control *) arg);
 		break;
 	default:
-		pr_err("Invalid command %d for %s!\n", cmd,
+		CAM_ERR(CAM_CORE, "Invalid command %d for %s", cmd,
 			node->name);
 		rc = -EINVAL;
 	}
@@ -80,7 +81,7 @@
 
 	if (copy_from_user(&cmd_data, (void __user *)arg,
 		sizeof(cmd_data))) {
-		pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+		CAM_ERR(CAM_CORE, "Failed to copy from user_ptr=%pK size=%zu",
 			(void __user *)arg, sizeof(cmd_data));
 		return -EFAULT;
 	}
@@ -88,7 +89,8 @@
 	if (!rc) {
 		if (copy_to_user((void __user *)arg, &cmd_data,
 			sizeof(cmd_data))) {
-			pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+			CAM_ERR(CAM_CORE,
+				"Failed to copy to user_ptr=%pK size=%zu",
 				(void __user *)arg, sizeof(cmd_data));
 			rc = -EFAULT;
 		}
@@ -147,8 +149,8 @@
 
 	rc = cam_register_subdev(sd);
 	if (rc) {
-		pr_err("%s: cam_register_subdev() failed for dev: %s!\n",
-			__func__, sd->name);
+		CAM_ERR(CAM_CORE, "cam_register_subdev() failed for dev: %s",
+			sd->name);
 		goto err;
 	}
 	platform_set_drvdata(sd->pdev, sd);
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/Makefile b/drivers/media/platform/msm/camera/cam_jpeg/Makefile
new file mode 100644
index 0000000..4d272d3
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg_dev.o cam_jpeg_context.o
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
new file mode 100644
index 0000000..a299179
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
@@ -0,0 +1,138 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "cam_mem_mgr.h"
+#include "cam_sync_api.h"
+#include "cam_jpeg_context.h"
+#include "cam_context_utils.h"
+#include "cam_debug_util.h"
+
+static int __cam_jpeg_ctx_acquire_dev_in_available(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "Unable to Acquire device %d", rc);
+	else
+		ctx->state = CAM_CTX_ACQUIRED;
+
+	return rc;
+}
+
+static int __cam_jpeg_ctx_release_dev_in_acquired(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_release_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "Unable to release device %d", rc);
+
+	ctx->state = CAM_CTX_AVAILABLE;
+
+	return rc;
+}
+
+static int __cam_jpeg_ctx_config_dev_in_acquired(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	return cam_context_prepare_dev_to_hw(ctx, cmd);
+}
+
+static int __cam_jpeg_ctx_handle_buf_done_in_acquired(void *ctx,
+	uint32_t evt_id, void *done)
+{
+	return cam_context_buf_done_from_hw(ctx, done, evt_id);
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+	cam_jpeg_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+	/* Uninit */
+	{
+		.ioctl_ops = { },
+		.crm_ops = { },
+		.irq_ops = NULL,
+	},
+	/* Available */
+	{
+		.ioctl_ops = {
+			.acquire_dev = __cam_jpeg_ctx_acquire_dev_in_available,
+		},
+		.crm_ops = { },
+		.irq_ops = NULL,
+	},
+	/* Acquired */
+	{
+		.ioctl_ops = {
+			.release_dev = __cam_jpeg_ctx_release_dev_in_acquired,
+			.config_dev = __cam_jpeg_ctx_config_dev_in_acquired,
+		},
+		.crm_ops = { },
+		.irq_ops = __cam_jpeg_ctx_handle_buf_done_in_acquired,
+	},
+};
+
+int cam_jpeg_context_init(struct cam_jpeg_context *ctx,
+	struct cam_context *ctx_base,
+	struct cam_hw_mgr_intf *hw_intf)
+{
+	int rc;
+	int i;
+
+	if (!ctx || !ctx_base) {
+		CAM_ERR(CAM_JPEG, "Invalid Context");
+		rc = -EFAULT;
+		goto err;
+	}
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	ctx->base = ctx_base;
+
+	for (i = 0; i < CAM_CTX_REQ_MAX; i++)
+		ctx->req_base[i].req_priv = ctx;
+
+	rc = cam_context_init(ctx_base, NULL, hw_intf, ctx->req_base,
+		CAM_CTX_REQ_MAX);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "Camera Context Base init failed");
+		goto err;
+	}
+
+	ctx_base->state_machine = cam_jpeg_ctx_state_machine;
+	ctx_base->ctx_priv = ctx;
+
+err:
+	return rc;
+}
+
+int cam_jpeg_context_deinit(struct cam_jpeg_context *ctx)
+{
+	if (!ctx || !ctx->base) {
+		CAM_ERR(CAM_JPEG, "Invalid params: %pK", ctx);
+		return -EINVAL;
+	}
+
+	cam_context_deinit(ctx->base);
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.h b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.h
new file mode 100644
index 0000000..90ac5cf
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_JPEG_CONTEXT_H_
+#define _CAM_JPEG_CONTEXT_H_
+
+#include <uapi/media/cam_jpeg.h>
+
+#include "cam_context.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+
+#define CAM_JPEG_HW_EVENT_MAX 20
+
+/**
+ * struct cam_jpeg_context - Jpeg context
+ * @base: Base jpeg cam context object
+ * @req_base: Common request structure
+ */
+struct cam_jpeg_context {
+	struct cam_context *base;
+	struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
+};
+
+/* cam jpeg context irq handling function type */
+typedef int (*cam_jpeg_hw_event_cb_func)(
+	struct cam_jpeg_context *ctx_jpeg,
+	void *evt_data);
+
+/**
+ * struct cam_jpeg_ctx_irq_ops - Function table for handling IRQ callbacks
+ *
+ * @irq_ops: Array of handle function pointers.
+ *
+ */
+struct cam_jpeg_ctx_irq_ops {
+	cam_jpeg_hw_event_cb_func irq_ops[CAM_JPEG_HW_EVENT_MAX];
+};
+
+/**
+ * cam_jpeg_context_init()
+ *
+ * @brief: Initialization function for the JPEG context
+ *
+ * @ctx: JPEG context obj to be initialized
+ * @ctx_base: Context base from cam_context
+ * @hw_intf: JPEG hw manager interface
+ *
+ */
+int cam_jpeg_context_init(struct cam_jpeg_context *ctx,
+	struct cam_context *ctx_base,
+	struct cam_hw_mgr_intf *hw_intf);
+
+/**
+ * cam_jpeg_context_deinit()
+ *
+ * @brief: Deinitialize function for the JPEG context
+ *
+ * @ctx: JPEG context obj to be deinitialized
+ *
+ */
+int cam_jpeg_context_deinit(struct cam_jpeg_context *ctx);
+
+#endif  /* __CAM_JPEG_CONTEXT_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c
new file mode 100644
index 0000000..fb68ddb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c
@@ -0,0 +1,136 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/kernel.h>
+
+#include "cam_node.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_jpeg_dev.h"
+#include "cam_debug_util.h"
+
+#define CAM_JPEG_DEV_NAME "cam-jpeg"
+
+static struct cam_jpeg_dev g_jpeg_dev;
+
+static const struct of_device_id cam_jpeg_dt_match[] = {
+	{
+		.compatible = "qcom,cam-jpeg"
+	},
+	{ }
+};
+
+static int cam_jpeg_dev_remove(struct platform_device *pdev)
+{
+	int rc;
+	int i;
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_jpeg_context_deinit(&g_jpeg_dev.ctx_jpeg[i]);
+		if (rc)
+			CAM_ERR(CAM_JPEG, "JPEG context %d deinit failed %d",
+				i, rc);
+	}
+
+	rc = cam_subdev_remove(&g_jpeg_dev.sd);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "Unregister failed %d", rc);
+
+	return rc;
+}
+
+static int cam_jpeg_dev_probe(struct platform_device *pdev)
+{
+	int rc;
+	int i;
+	struct cam_hw_mgr_intf hw_mgr_intf;
+	struct cam_node *node;
+
+	rc = cam_subdev_probe(&g_jpeg_dev.sd, pdev, CAM_JPEG_DEV_NAME,
+		CAM_JPEG_DEVICE_TYPE);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "JPEG cam_subdev_probe failed %d", rc);
+		goto err;
+	}
+	node = (struct cam_node *)g_jpeg_dev.sd.token;
+
+	rc = cam_jpeg_hw_mgr_init(pdev->dev.of_node,
+		(uint64_t *)&hw_mgr_intf);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "Can not initialize JPEG HWmanager %d", rc);
+		goto unregister;
+	}
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_jpeg_context_init(&g_jpeg_dev.ctx_jpeg[i],
+			&g_jpeg_dev.ctx[i],
+			&node->hw_mgr_intf);
+		if (rc) {
+			CAM_ERR(CAM_JPEG, "JPEG context init failed %d %d",
+				i, rc);
+			goto ctx_init_fail;
+		}
+	}
+
+	rc = cam_node_init(node, &hw_mgr_intf, g_jpeg_dev.ctx, CAM_CTX_MAX,
+		CAM_JPEG_DEV_NAME);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "JPEG node init failed %d", rc);
+		goto ctx_init_fail;
+	}
+
+	mutex_init(&g_jpeg_dev.jpeg_mutex);
+
+	CAM_INFO(CAM_JPEG, "Camera JPEG probe complete");
+
+	return rc;
+
+ctx_init_fail:
+	for (--i; i >= 0; i--)
+		if (cam_jpeg_context_deinit(&g_jpeg_dev.ctx_jpeg[i]))
+			CAM_ERR(CAM_JPEG, "deinit fail %d %d", i, rc);
+unregister:
+	if (cam_subdev_remove(&g_jpeg_dev.sd))
+		CAM_ERR(CAM_JPEG, "remove fail %d", rc);
+err:
+	return rc;
+}
+
+static struct platform_driver jpeg_driver = {
+	.probe = cam_jpeg_dev_probe,
+	.remove = cam_jpeg_dev_remove,
+	.driver = {
+		.name = "cam_jpeg",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_jpeg_dt_match,
+	},
+};
+
+static int __init cam_jpeg_dev_init_module(void)
+{
+	return platform_driver_register(&jpeg_driver);
+}
+
+static void __exit cam_jpeg_dev_exit_module(void)
+{
+	platform_driver_unregister(&jpeg_driver);
+}
+
+module_init(cam_jpeg_dev_init_module);
+module_exit(cam_jpeg_dev_exit_module);
+MODULE_DESCRIPTION("MSM JPEG driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h
new file mode 100644
index 0000000..deab2d5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_JPEG_DEV_H_
+#define _CAM_JPEG_DEV_H_
+
+#include "cam_subdev.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_context.h"
+#include "cam_jpeg_context.h"
+
+/**
+ * struct cam_jpeg_dev - Camera JPEG V4l2 device node
+ *
+ * @sd: Commone camera subdevice node
+ * @node: Pointer to jpeg subdevice
+ * @ctx: JPEG base context storage
+ * @ctx_jpeg: JPEG private context storage
+ * @jpeg_mutex: Jpeg dev mutex
+ */
+struct cam_jpeg_dev {
+	struct cam_subdev sd;
+	struct cam_node *node;
+	struct cam_context ctx[CAM_CTX_MAX];
+	struct cam_jpeg_context ctx_jpeg[CAM_CTX_MAX];
+	struct mutex jpeg_mutex;
+};
+#endif /* __CAM_JPEG_DEV_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/Makefile b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/Makefile
new file mode 100644
index 0000000..08c9528
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/Makefile
@@ -0,0 +1,13 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_enc_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_dma_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
new file mode 100644
index 0000000..b06b5c4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -0,0 +1,1178 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/debugfs.h>
+#include <media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "cam_sync_api.h"
+#include "cam_packet_util.h"
+#include "cam_hw.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_jpeg_hw_mgr.h"
+#include "cam_enc_hw_intf.h"
+#include "cam_dma_hw_intf.h"
+#include "cam_smmu_api.h"
+#include "cam_mem_mgr.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_debug_util.h"
+
+#define CAM_JPEG_HW_ENTRIES_MAX  20
+
+static struct cam_jpeg_hw_mgr g_jpeg_hw_mgr;
+
+static int32_t cam_jpeg_hw_mgr_cb(uint32_t irq_status,
+	int32_t result_size, void *data);
+static int cam_jpeg_mgr_process_cmd(void *priv, void *data);
+
+static int cam_jpeg_mgr_process_irq(void *priv, void *data)
+{
+	int rc = 0;
+	struct cam_jpeg_process_irq_work_data_t *task_data;
+	struct cam_jpeg_hw_mgr *hw_mgr;
+	int32_t i;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+	struct cam_hw_done_event_data buf_data;
+	struct cam_jpeg_set_irq_cb irq_cb;
+	uint32_t dev_type = 0;
+	uint64_t kaddr;
+	uint32_t *cmd_buf_kaddr;
+	size_t cmd_buf_len;
+	struct cam_jpeg_config_inout_param_info *p_params;
+	struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+	struct crm_workq_task *task;
+	struct cam_jpeg_process_frame_work_data_t *wq_task_data;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_JPEG, "Invalid data");
+		return -EINVAL;
+	}
+
+	task_data = data;
+	hw_mgr = &g_jpeg_hw_mgr;
+
+	ctx_data = (struct cam_jpeg_hw_ctx_data *)task_data->data;
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is not in use");
+		return -EINVAL;
+	}
+
+	dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+	irq_cb.jpeg_hw_mgr_cb = cam_jpeg_hw_mgr_cb;
+	irq_cb.data = NULL;
+	irq_cb.b_set_cb = false;
+	if (!hw_mgr->devices[dev_type][0]->hw_ops.process_cmd) {
+		CAM_ERR(CAM_JPEG, "process_cmd null ");
+		return -EINVAL;
+	}
+	rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd(
+		hw_mgr->devices[dev_type][0]->hw_priv,
+		CAM_JPEG_ENC_CMD_SET_IRQ_CB,
+		&irq_cb, sizeof(irq_cb));
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "CMD_SET_IRQ_CB failed %d", rc);
+		return rc;
+	}
+
+	mutex_lock(&g_jpeg_hw_mgr.hw_mgr_mutex);
+	hw_mgr->device_in_use[dev_type][0] = false;
+	p_cfg_req = hw_mgr->dev_hw_cfg_args[dev_type][0];
+	hw_mgr->dev_hw_cfg_args[dev_type][0] = NULL;
+	mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex);
+
+	task = cam_req_mgr_workq_get_task(
+		g_jpeg_hw_mgr.work_process_frame);
+	if (!task) {
+		CAM_ERR(CAM_JPEG, "no empty task");
+		return -EINVAL;
+	}
+
+	wq_task_data = (struct cam_jpeg_process_frame_work_data_t *)
+		task->payload;
+	if (!task_data) {
+		CAM_ERR(CAM_JPEG, "task_data is NULL");
+		return -EINVAL;
+	}
+	wq_task_data->data = (void *)(uint64_t)dev_type;
+	wq_task_data->request_id = 0;
+	wq_task_data->type = CAM_JPEG_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_jpeg_mgr_process_cmd;
+	rc = cam_req_mgr_workq_enqueue_task(task, &g_jpeg_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "could not enque task %d", rc);
+		return rc;
+	}
+
+	rc = cam_mem_get_cpu_buf(
+		p_cfg_req->hw_cfg_args.hw_update_entries[1].handle,
+		(uint64_t *)&kaddr, &cmd_buf_len);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "unable to get info for cmd buf: %x %d",
+			hw_mgr->iommu_hdl, rc);
+		return rc;
+	}
+
+	cmd_buf_kaddr = (uint32_t *)kaddr;
+
+	cmd_buf_kaddr =
+		(cmd_buf_kaddr +
+		(p_cfg_req->hw_cfg_args.hw_update_entries[1].offset/4));
+
+	p_params = (struct cam_jpeg_config_inout_param_info *)cmd_buf_kaddr;
+
+	p_params->output_size = task_data->result_size;
+	CAM_DBG(CAM_JPEG, "Encoded Size %d", task_data->result_size);
+
+	buf_data.num_handles = p_cfg_req->
+		hw_cfg_args.num_out_map_entries;
+	for (i = 0; i < buf_data.num_handles; i++) {
+		buf_data.resource_handle[i] =
+			p_cfg_req->hw_cfg_args.
+			out_map_entries[i].resource_handle;
+	}
+	buf_data.request_id =
+		(uint64_t)p_cfg_req->hw_cfg_args.priv;
+	ctx_data->ctxt_event_cb(ctx_data->context_priv, 0, &buf_data);
+
+	list_add_tail(&p_cfg_req->list, &hw_mgr->free_req_list);
+
+
+	return rc;
+}
+
+static int cam_jpeg_hw_mgr_cb(
+	uint32_t irq_status, int32_t result_size, void *data)
+{
+	int32_t rc;
+	unsigned long flags;
+	struct cam_jpeg_hw_mgr *hw_mgr = &g_jpeg_hw_mgr;
+	struct crm_workq_task *task;
+	struct cam_jpeg_process_irq_work_data_t *task_data;
+
+	spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags);
+	task = cam_req_mgr_workq_get_task(
+		g_jpeg_hw_mgr.work_process_irq_cb);
+	if (!task) {
+		CAM_ERR(CAM_JPEG, "no empty task");
+		spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+		return -ENOMEM;
+	}
+
+	task_data = (struct cam_jpeg_process_irq_work_data_t *)task->payload;
+	task_data->data = data;
+	task_data->irq_status = irq_status;
+	task_data->result_size = result_size;
+	task_data->type = CAM_JPEG_WORKQ_TASK_MSG_TYPE;
+	task->process_cb = cam_jpeg_mgr_process_irq;
+
+	rc = cam_req_mgr_workq_enqueue_task(task, &g_jpeg_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+
+	return rc;
+}
+
+static int cam_jpeg_mgr_get_free_ctx(struct cam_jpeg_hw_mgr *hw_mgr)
+{
+	int i = 0;
+	int num_ctx = CAM_JPEG_CTX_MAX;
+
+	for (i = 0; i < num_ctx; i++) {
+		mutex_lock(&hw_mgr->ctx_data[i].ctx_mutex);
+		if (hw_mgr->ctx_data[i].in_use == false) {
+			hw_mgr->ctx_data[i].in_use = true;
+			mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
+			break;
+		}
+		mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
+	}
+
+	return i;
+}
+
+
+static int cam_jpeg_mgr_release_ctx(
+	struct cam_jpeg_hw_mgr *hw_mgr, int ctx_id)
+{
+	if (ctx_id >= CAM_JPEG_CTX_MAX) {
+		CAM_ERR(CAM_JPEG, "ctx_id is wrong: %d", ctx_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+	if (!hw_mgr->ctx_data[ctx_id].in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is already in use: %d", ctx_id);
+		mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+		return -EINVAL;
+	}
+
+	hw_mgr->ctx_data[ctx_id].in_use = 0;
+	mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+
+	return 0;
+}
+
+static int cam_jpeg_mgr_process_cmd(void *priv, void *data)
+{
+	int rc;
+	int i = 0;
+	struct cam_jpeg_hw_mgr *hw_mgr = priv;
+	struct cam_hw_update_entry *cmd;
+	struct cam_cdm_bl_request *cdm_cmd;
+	struct cam_hw_config_args *config_args = NULL;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+	uint64_t request_id = 0;
+	struct cam_jpeg_process_frame_work_data_t *task_data =
+		(struct cam_jpeg_process_frame_work_data_t *)data;
+	uint32_t dev_type;
+	struct cam_jpeg_set_irq_cb irq_cb;
+	struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+	uint32_t size = 0;
+	uint32_t mem_cam_base = 0;
+	struct cam_hw_done_event_data buf_data;
+
+	CAM_DBG(CAM_JPEG, "in cam_jpeg_mgr_process_cmd");
+	if (!hw_mgr || !task_data) {
+		CAM_ERR(CAM_JPEG, "Invalid arguments %pK %pK",
+			hw_mgr, task_data);
+		return -EINVAL;
+	}
+
+	if (list_empty(&hw_mgr->hw_config_req_list)) {
+		CAM_DBG(CAM_JPEG, "no available request");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	p_cfg_req = list_first_entry(&hw_mgr->hw_config_req_list,
+		struct cam_jpeg_hw_cfg_req, list);
+	if (!p_cfg_req) {
+		CAM_ERR(CAM_JPEG, "no request");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	if (false == hw_mgr->device_in_use[p_cfg_req->dev_type][0]) {
+		hw_mgr->device_in_use[p_cfg_req->dev_type][0] = true;
+		hw_mgr->dev_hw_cfg_args[p_cfg_req->dev_type][0] = p_cfg_req;
+		list_del_init(&p_cfg_req->list);
+	} else {
+		CAM_ERR(CAM_JPEG, "NOT dequeing, just return");
+		rc = -EFAULT;
+		goto end;
+	}
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	config_args = (struct cam_hw_config_args *)&p_cfg_req->hw_cfg_args;
+	request_id = task_data->request_id;
+	if (request_id != (uint64_t)config_args->priv) {
+		CAM_WARN(CAM_JPEG, "not a recent req %d %d",
+			request_id, (uint64_t)config_args->priv);
+	}
+
+	if (!config_args->num_hw_update_entries) {
+		CAM_ERR(CAM_JPEG, "No hw update enteries are available");
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	ctx_data = (struct cam_jpeg_hw_ctx_data *)config_args->ctxt_to_hw_map;
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is not in use");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+	if (dev_type != p_cfg_req->dev_type)
+		CAM_WARN(CAM_JPEG, "dev types not same something wrong");
+
+	irq_cb.jpeg_hw_mgr_cb = cam_jpeg_hw_mgr_cb;
+	irq_cb.data = (void *)ctx_data;
+	irq_cb.b_set_cb = true;
+	if (!hw_mgr->devices[dev_type][0]->hw_ops.process_cmd) {
+		CAM_ERR(CAM_JPEG, "op process_cmd null ");
+		return -EINVAL;
+	}
+	rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd(
+		hw_mgr->devices[dev_type][0]->hw_priv,
+		CAM_JPEG_ENC_CMD_SET_IRQ_CB,
+		&irq_cb, sizeof(irq_cb));
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "SET_IRQ_CB failed %d", rc);
+		return -EINVAL;
+	}
+
+	if (!hw_mgr->devices[dev_type][0]->hw_ops.reset) {
+		CAM_ERR(CAM_JPEG, "op reset null ");
+		return -EINVAL;
+	}
+	rc = hw_mgr->devices[dev_type][0]->hw_ops.reset(
+		hw_mgr->devices[dev_type][0]->hw_priv,
+		NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "jpeg hw reset failed %d", rc);
+		return -EINVAL;
+	}
+
+	mem_cam_base = (uint64_t)hw_mgr->cdm_reg_map[dev_type][0]->
+		mem_cam_base;
+	size = hw_mgr->cdm_info[dev_type][0].cdm_ops->
+		cdm_required_size_changebase();
+	hw_mgr->cdm_info[dev_type][0].cdm_ops->
+		cdm_write_changebase(ctx_data->cmd_chbase_buf_addr,
+		(uint64_t)hw_mgr->cdm_reg_map[dev_type][0]->mem_cam_base);
+	ctx_data->cdm_cmd_chbase->cmd_arrary_count = 1;
+	ctx_data->cdm_cmd_chbase->type = CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA;
+	ctx_data->cdm_cmd_chbase->flag = false;
+	ctx_data->cdm_cmd_chbase->userdata = NULL;
+	ctx_data->cdm_cmd_chbase->cookie = 0;
+	ctx_data->cdm_cmd_chbase->cmd[0].bl_addr.kernel_iova =
+		ctx_data->cmd_chbase_buf_addr;
+	ctx_data->cdm_cmd_chbase->cmd[0].offset = 0;
+	ctx_data->cdm_cmd_chbase->cmd[0].len = size;
+	rc = cam_cdm_submit_bls(hw_mgr->cdm_info[dev_type][0].cdm_handle,
+		ctx_data->cdm_cmd_chbase);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "failed cdm cmd %d", rc);
+
+	CAM_DBG(CAM_JPEG, "cfg e %pK num %d",
+		config_args->hw_update_entries,
+		config_args->num_hw_update_entries);
+
+	if (config_args->num_hw_update_entries > 0) {
+		cdm_cmd = ctx_data->cdm_cmd;
+		cdm_cmd->cmd_arrary_count =
+			config_args->num_hw_update_entries - 1;
+		cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
+		cdm_cmd->flag = false;
+		cdm_cmd->userdata = NULL;
+		cdm_cmd->cookie = 0;
+
+		for (i = 0; i <= cdm_cmd->cmd_arrary_count; i++) {
+			cmd = (config_args->hw_update_entries + i);
+			cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle;
+			cdm_cmd->cmd[i].offset = cmd->offset;
+			cdm_cmd->cmd[i].len = cmd->len;
+		}
+
+		rc = cam_cdm_submit_bls(
+			hw_mgr->cdm_info[dev_type][0].cdm_handle,
+			cdm_cmd);
+		if (rc) {
+			CAM_ERR(CAM_JPEG, "Failed to apply the configs %d",
+				rc);
+			goto end_callcb;
+		}
+
+		if (!hw_mgr->devices[dev_type][0]->hw_ops.start) {
+			CAM_ERR(CAM_JPEG, "op start null ");
+			rc = -EINVAL;
+			goto end_callcb;
+		}
+		rc = hw_mgr->devices[dev_type][0]->hw_ops.start(
+			hw_mgr->devices[dev_type][0]->hw_priv,
+			NULL, 0);
+		if (rc) {
+			CAM_ERR(CAM_JPEG, "Failed to apply the configs %d",
+				rc);
+			goto end_callcb;
+		}
+	} else {
+		CAM_ERR(CAM_JPEG, "No commands to config");
+	}
+
+	return rc;
+
+end_callcb:
+	if (p_cfg_req) {
+		buf_data.num_handles = p_cfg_req->
+			hw_cfg_args.num_out_map_entries;
+		for (i = 0; i < buf_data.num_handles; i++) {
+			buf_data.resource_handle[i] =
+				p_cfg_req->hw_cfg_args.
+				out_map_entries[i].resource_handle;
+		}
+		buf_data.request_id =
+			(uint64_t)p_cfg_req->hw_cfg_args.priv;
+		ctx_data->ctxt_event_cb(ctx_data->context_priv, 0, &buf_data);
+	}
+end:
+
+	return rc;
+}
+
+static int cam_jpeg_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
+{
+	int rc;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_config_args *config_args = config_hw_args;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+	uint64_t request_id = 0;
+	struct cam_hw_update_entry *hw_update_entries;
+	struct crm_workq_task *task;
+	struct cam_jpeg_process_frame_work_data_t *task_data;
+	struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+
+	if (!hw_mgr || !config_args) {
+		CAM_ERR(CAM_JPEG, "Invalid arguments %pK %pK",
+			hw_mgr, config_args);
+		return -EINVAL;
+	}
+
+	if (!config_args->num_hw_update_entries) {
+		CAM_ERR(CAM_JPEG, "No hw update enteries are available");
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	ctx_data = (struct cam_jpeg_hw_ctx_data *)config_args->ctxt_to_hw_map;
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is not in use");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	if (list_empty(&hw_mgr->free_req_list)) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		CAM_ERR(CAM_JPEG, "list empty");
+		return -ENOMEM;
+	}
+
+	p_cfg_req = list_first_entry(&hw_mgr->free_req_list,
+		struct cam_jpeg_hw_cfg_req, list);
+	list_del_init(&p_cfg_req->list);
+
+	/* Update Currently Processing Config Request */
+	p_cfg_req->hw_cfg_args = *config_args;
+	p_cfg_req->dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+	request_id = (uint64_t)config_args->priv;
+	hw_update_entries = config_args->hw_update_entries;
+	CAM_DBG(CAM_JPEG, "ctx_data = %pK req_id = %d %pK",
+		ctx_data, request_id, config_args->priv);
+	task = cam_req_mgr_workq_get_task(g_jpeg_hw_mgr.work_process_frame);
+	if (!task) {
+		CAM_ERR(CAM_JPEG, "no empty task");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		rc = -ENOMEM;
+		goto err_after_dq_free_list;
+	}
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	task_data = (struct cam_jpeg_process_frame_work_data_t *)
+		task->payload;
+	if (!task_data) {
+		CAM_ERR(CAM_JPEG, "task_data is NULL");
+		rc = -EINVAL;
+		goto err_after_dq_free_list;
+	}
+	CAM_DBG(CAM_JPEG, "cfge %pK num %d",
+		p_cfg_req->hw_cfg_args.hw_update_entries,
+		p_cfg_req->hw_cfg_args.num_hw_update_entries);
+
+	list_add_tail(&p_cfg_req->list, &hw_mgr->hw_config_req_list);
+
+	task_data->data = (void *)(int64_t)p_cfg_req->dev_type;
+	task_data->request_id = request_id;
+	task_data->type = CAM_JPEG_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_jpeg_mgr_process_cmd;
+
+	rc = cam_req_mgr_workq_enqueue_task(task, &g_jpeg_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "failed to enqueue task %d", rc);
+		goto err_after_get_task;
+	}
+
+	return rc;
+
+err_after_get_task:
+	list_del_init(&p_cfg_req->list);
+err_after_dq_free_list:
+	list_add_tail(&p_cfg_req->list, &hw_mgr->free_req_list);
+
+	return rc;
+}
+
+
+static int cam_jpeg_mgr_prepare_hw_update(void *hw_mgr_priv,
+	void *prepare_hw_update_args)
+{
+	int rc, i, j, k;
+	struct cam_hw_prepare_update_args *prepare_args =
+		prepare_hw_update_args;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+	struct cam_packet *packet = NULL;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+
+	if (!prepare_args || !hw_mgr) {
+		CAM_ERR(CAM_JPEG, "Invalid args %pK %pK",
+			prepare_args, hw_mgr);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	ctx_data = (struct cam_jpeg_hw_ctx_data *)prepare_args->ctxt_to_hw_map;
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is not in use");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	packet = prepare_args->packet;
+	if (!packet) {
+		CAM_ERR(CAM_JPEG, "received packet is NULL");
+		return -EINVAL;
+	}
+
+	if (((packet->header.op_code & 0xff) != CAM_JPEG_OPCODE_ENC_UPDATE) &&
+		((packet->header.op_code
+		& 0xff) != CAM_JPEG_OPCODE_DMA_UPDATE)) {
+		CAM_ERR(CAM_JPEG, "Invalid Opcode in pkt: %d",
+			packet->header.op_code & 0xff);
+		return -EINVAL;
+	}
+	if ((packet->num_cmd_buf > 2) || !packet->num_patches ||
+		!packet->num_io_configs) {
+		CAM_ERR(CAM_JPEG, "wrong number of cmd/patch info: %u %u",
+			packet->num_cmd_buf,
+			packet->num_patches);
+		return -EINVAL;
+	}
+
+	cmd_desc = (struct cam_cmd_buf_desc *)
+		((uint32_t *)&packet->payload +
+		(packet->cmd_buf_offset / 4));
+	CAM_DBG(CAM_JPEG, "packet = %pK cmd_desc = %pK size = %lu",
+		(void *)packet, (void *)cmd_desc,
+		sizeof(struct cam_cmd_buf_desc));
+
+	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "Patch processing failed %d", rc);
+		return rc;
+	}
+
+	io_cfg_ptr = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+		packet->io_configs_offset / 4);
+	CAM_DBG(CAM_JPEG, "packet = %pK io_cfg_ptr = %pK size = %lu",
+		(void *)packet, (void *)io_cfg_ptr,
+		sizeof(struct cam_buf_io_cfg));
+
+	prepare_args->num_out_map_entries = 0;
+
+	for (i = 0, j = 0, k = 0; i < packet->num_io_configs; i++) {
+		if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
+			prepare_args->in_map_entries[j].resource_handle =
+				io_cfg_ptr[i].resource_type;
+			prepare_args->in_map_entries[j++].sync_id =
+				io_cfg_ptr[i].fence;
+			prepare_args->num_in_map_entries++;
+		} else {
+			prepare_args->in_map_entries[k].resource_handle =
+				io_cfg_ptr[i].resource_type;
+			prepare_args->out_map_entries[k++].sync_id =
+				io_cfg_ptr[i].fence;
+			prepare_args->num_out_map_entries++;
+		}
+		CAM_DBG(CAM_JPEG, "dir[%d]: %u, fence: %u",
+			i, io_cfg_ptr[i].direction, io_cfg_ptr[i].fence);
+	}
+
+	for (i = 0; i < packet->num_cmd_buf;  i++) {
+		prepare_args->hw_update_entries[i].len =
+			(uint32_t)cmd_desc[i].length;
+		prepare_args->hw_update_entries[i].handle =
+			(uint32_t)cmd_desc[i].mem_handle;
+		prepare_args->hw_update_entries[i].offset =
+			(uint32_t)cmd_desc[i].offset;
+		prepare_args->num_hw_update_entries++;
+	}
+
+	prepare_args->priv = (void *)packet->header.request_id;
+
+	CAM_DBG(CAM_JPEG, "will wait on input sync sync_id %d",
+		prepare_args->in_map_entries[0].sync_id);
+
+	return rc;
+}
+
+static int cam_jpeg_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
+{
+	int rc;
+	int ctx_id = 0;
+	struct cam_hw_release_args *release_hw = release_hw_args;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+	uint32_t dev_type;
+
+	if (!release_hw || !hw_mgr) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return -EINVAL;
+	}
+
+	ctx_data = (struct cam_jpeg_hw_ctx_data *)release_hw->ctxt_to_hw_map;
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is not in use");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+	dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	hw_mgr->cdm_info[dev_type][0].ref_cnt--;
+	if (!(hw_mgr->cdm_info[dev_type][0].ref_cnt)) {
+		if (cam_cdm_stream_off(
+			hw_mgr->cdm_info[dev_type][0].cdm_handle)) {
+			CAM_ERR(CAM_JPEG, "CDM stream off failed %d",
+				hw_mgr->cdm_info[dev_type][0].cdm_handle);
+		}
+		/* release cdm handle */
+		cam_cdm_release(hw_mgr->cdm_info[dev_type][0].cdm_handle);
+	}
+
+	if (g_jpeg_hw_mgr.devices[dev_type][0]->hw_ops.deinit) {
+		rc = g_jpeg_hw_mgr.devices[dev_type][0]->hw_ops.deinit(
+			g_jpeg_hw_mgr.devices[dev_type][0]->hw_priv, NULL, 0);
+		if (rc)
+			CAM_ERR(CAM_JPEG, "Failed to Init %d HW", dev_type);
+	}
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	rc = cam_jpeg_mgr_release_ctx(hw_mgr, ctx_id);
+	if (rc) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_JPEG, "handle %llu", ctx_data);
+
+	return rc;
+}
+
+static int cam_jpeg_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
+{
+	int rc;
+	int32_t ctx_id = 0;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+	struct cam_hw_acquire_args *args = acquire_hw_args;
+	struct cam_jpeg_acquire_dev_info jpeg_dev_acquire_info;
+	struct cam_cdm_acquire_data cdm_acquire;
+	uint32_t dev_type;
+	uint32_t size = 0;
+
+	if ((!hw_mgr_priv) || (!acquire_hw_args)) {
+		CAM_ERR(CAM_JPEG, "Invalid params: %pK %pK", hw_mgr_priv,
+			acquire_hw_args);
+		return -EINVAL;
+	}
+
+	if (args->num_acq > 1) {
+		CAM_ERR(CAM_JPEG,
+			"number of resources are wrong: %u",
+			args->num_acq);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&jpeg_dev_acquire_info,
+			(void __user *)args->acquire_info,
+			sizeof(jpeg_dev_acquire_info))) {
+		CAM_ERR(CAM_JPEG, "copy failed");
+		return -EFAULT;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	ctx_id = cam_jpeg_mgr_get_free_ctx(hw_mgr);
+	if (ctx_id >= CAM_JPEG_CTX_MAX) {
+		CAM_ERR(CAM_JPEG, "No free ctx space in hw_mgr");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EFAULT;
+	}
+
+	ctx_data = &hw_mgr->ctx_data[ctx_id];
+
+	ctx_data->cdm_cmd =
+		kzalloc(((sizeof(struct cam_cdm_bl_request)) +
+			((CAM_JPEG_HW_ENTRIES_MAX - 1) *
+			sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+	if (!ctx_data->cdm_cmd) {
+		rc = -ENOMEM;
+		goto acq_cdm_hdl_failed;
+	}
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	ctx_data->jpeg_dev_acquire_info = jpeg_dev_acquire_info;
+	mutex_unlock(&ctx_data->ctx_mutex);
+
+	dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+	if (!hw_mgr->cdm_info[dev_type][0].ref_cnt) {
+
+		if (dev_type == CAM_JPEG_RES_TYPE_ENC) {
+			memcpy(cdm_acquire.identifier,
+				"jpegenc", sizeof("jpegenc"));
+		} else {
+			memcpy(cdm_acquire.identifier,
+				"jpegdma", sizeof("jpegdma"));
+		}
+		cdm_acquire.cell_index = 0;
+		cdm_acquire.handle = 0;
+		cdm_acquire.userdata = ctx_data;
+		if (hw_mgr->cdm_reg_map[dev_type][0]) {
+			cdm_acquire.base_array[0] =
+				hw_mgr->cdm_reg_map[dev_type][0];
+		}
+		cdm_acquire.base_array_cnt = 1;
+		cdm_acquire.id = CAM_CDM_VIRTUAL;
+		cdm_acquire.cam_cdm_callback = NULL;
+
+		rc = cam_cdm_acquire(&cdm_acquire);
+		if (rc) {
+			CAM_ERR(CAM_JPEG, "Failed to acquire the CDM HW %d",
+				rc);
+			rc = -EFAULT;
+			goto acq_cdm_hdl_failed;
+		}
+		hw_mgr->cdm_info[dev_type][0].cdm_handle = cdm_acquire.handle;
+		hw_mgr->cdm_info[dev_type][0].cdm_ops = cdm_acquire.ops;
+		hw_mgr->cdm_info[dev_type][0].ref_cnt++;
+	} else {
+		hw_mgr->cdm_info[dev_type][0].ref_cnt++;
+	}
+
+	ctx_data->cdm_cmd_chbase =
+		kzalloc(((sizeof(struct cam_cdm_bl_request)) +
+			(2 * sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+	if (!ctx_data->cdm_cmd_chbase) {
+		rc = -ENOMEM;
+		goto start_cdm_hdl_failed;
+	}
+	size = hw_mgr->cdm_info[dev_type][0].
+		cdm_ops->cdm_required_size_changebase();
+	ctx_data->cmd_chbase_buf_addr = kzalloc(size*4, GFP_KERNEL);
+	if (!ctx_data->cdm_cmd_chbase) {
+		rc = -ENOMEM;
+		goto start_cdm_hdl_failed;
+	}
+
+	if (!g_jpeg_hw_mgr.devices[dev_type][0]->hw_ops.init) {
+		CAM_ERR(CAM_JPEG, "hw op init null ");
+		rc = -EINVAL;
+		goto start_cdm_hdl_failed;
+	}
+	rc = g_jpeg_hw_mgr.devices[dev_type][0]->hw_ops.init(
+		g_jpeg_hw_mgr.devices[dev_type][0]->hw_priv,
+		ctx_data,
+		sizeof(ctx_data));
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "Failed to Init %d HW", dev_type);
+		goto start_cdm_hdl_failed;
+	}
+
+	if (hw_mgr->cdm_info[dev_type][0].ref_cnt == 1)
+		if (cam_cdm_stream_on(
+			hw_mgr->cdm_info[dev_type][0].cdm_handle)) {
+			CAM_ERR(CAM_JPEG, "Can not start cdm (%d)!",
+				hw_mgr->cdm_info[dev_type][0].cdm_handle);
+			rc = -EFAULT;
+			goto start_cdm_hdl_failed;
+		}
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	ctx_data->context_priv = args->context_data;
+
+	args->ctxt_to_hw_map = (void *)&(hw_mgr->ctx_data[ctx_id]);
+
+	mutex_unlock(&ctx_data->ctx_mutex);
+
+	hw_mgr->ctx_data[ctx_id].ctxt_event_cb = args->event_cb;
+
+
+	if (copy_to_user((void __user *)args->acquire_info,
+		&jpeg_dev_acquire_info,
+		sizeof(jpeg_dev_acquire_info))) {
+		rc = -EFAULT;
+		goto copy_to_user_failed;
+	}
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	CAM_DBG(CAM_JPEG, "success ctx_data= %pK", ctx_data);
+
+	return rc;
+
+copy_to_user_failed:
+	cam_cdm_stream_off(hw_mgr->cdm_info[dev_type][0].cdm_handle);
+start_cdm_hdl_failed:
+	cam_cdm_release(hw_mgr->cdm_info[dev_type][0].cdm_handle);
+acq_cdm_hdl_failed:
+	kfree(ctx_data->cdm_cmd);
+	cam_jpeg_mgr_release_ctx(hw_mgr, ctx_id);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return rc;
+}
+
+static int cam_jpeg_mgr_get_hw_caps(void *hw_mgr_priv, void *hw_caps_args)
+{
+	int rc;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_query_cap_cmd *query_cap = hw_caps_args;
+
+	if (!hw_mgr_priv || !hw_caps_args) {
+		CAM_ERR(CAM_JPEG, "Invalid params: %pK %pK",
+			hw_mgr_priv, hw_caps_args);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	if (copy_to_user((void __user *)query_cap->caps_handle,
+		&g_jpeg_hw_mgr.jpeg_caps,
+		sizeof(struct cam_jpeg_query_cap_cmd))) {
+		CAM_ERR(CAM_JPEG, "copy_to_user failed");
+		rc = -EFAULT;
+		goto copy_error;
+	}
+	CAM_DBG(CAM_JPEG, "cam_jpeg_mgr_get_hw_caps success");
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return 0;
+
+copy_error:
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static int cam_jpeg_setup_workqs(void)
+{
+	int rc, i;
+
+	rc = cam_req_mgr_workq_create(
+		"jpeg_command_queue",
+		CAM_JPEG_WORKQ_NUM_TASK,
+		&g_jpeg_hw_mgr.work_process_frame,
+		CRM_WORKQ_USAGE_NON_IRQ);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "unable to create a worker %d", rc);
+		goto work_process_frame_failed;
+	}
+
+	rc = cam_req_mgr_workq_create(
+		"jpeg_message_queue",
+		CAM_JPEG_WORKQ_NUM_TASK,
+		&g_jpeg_hw_mgr.work_process_irq_cb,
+		CRM_WORKQ_USAGE_IRQ);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "unable to create a worker %d", rc);
+		goto work_process_irq_cb_failed;
+	}
+
+	g_jpeg_hw_mgr.process_frame_work_data =
+		(struct cam_jpeg_process_frame_work_data_t *)
+		kzalloc(sizeof(struct cam_jpeg_process_frame_work_data_t) *
+			CAM_JPEG_WORKQ_NUM_TASK, GFP_KERNEL);
+	if (!g_jpeg_hw_mgr.process_frame_work_data) {
+		rc = -ENOMEM;
+		goto work_process_frame_data_failed;
+	}
+
+	g_jpeg_hw_mgr.process_irq_cb_work_data =
+		(struct cam_jpeg_process_irq_work_data_t *)
+		kzalloc(sizeof(struct cam_jpeg_process_irq_work_data_t) *
+			CAM_JPEG_WORKQ_NUM_TASK, GFP_KERNEL);
+	if (!g_jpeg_hw_mgr.process_irq_cb_work_data) {
+		rc = -ENOMEM;
+		goto work_process_irq_cb_data_failed;
+	}
+
+	for (i = 0; i < CAM_JPEG_WORKQ_NUM_TASK; i++)
+		g_jpeg_hw_mgr.work_process_irq_cb->task.pool[i].payload =
+			&g_jpeg_hw_mgr.process_irq_cb_work_data[i];
+
+	for (i = 0; i < CAM_JPEG_WORKQ_NUM_TASK; i++)
+		g_jpeg_hw_mgr.work_process_frame->task.pool[i].payload =
+			&g_jpeg_hw_mgr.process_frame_work_data[i];
+
+	INIT_LIST_HEAD(&g_jpeg_hw_mgr.hw_config_req_list);
+	INIT_LIST_HEAD(&g_jpeg_hw_mgr.free_req_list);
+	for (i = 0; i < CAM_JPEG_HW_CFG_Q_MAX; i++) {
+		INIT_LIST_HEAD(&(g_jpeg_hw_mgr.req_list[i].list));
+		list_add_tail(&(g_jpeg_hw_mgr.req_list[i].list),
+			&(g_jpeg_hw_mgr.free_req_list));
+	}
+
+	return rc;
+
+work_process_irq_cb_data_failed:
+	kfree(g_jpeg_hw_mgr.process_frame_work_data);
+work_process_frame_data_failed:
+	cam_req_mgr_workq_destroy(&g_jpeg_hw_mgr.work_process_irq_cb);
+work_process_irq_cb_failed:
+	cam_req_mgr_workq_destroy(&g_jpeg_hw_mgr.work_process_frame);
+work_process_frame_failed:
+
+	return rc;
+}
+
+static int cam_jpeg_init_devices(struct device_node *of_node,
+	uint32_t *p_num_enc_dev,
+	uint32_t *p_num_dma_dev)
+{
+	int count, i, rc;
+	uint32_t num_dev;
+	uint32_t num_dma_dev;
+	const char *name = NULL;
+	struct device_node *child_node = NULL;
+	struct platform_device *child_pdev = NULL;
+	struct cam_hw_intf *child_dev_intf = NULL;
+	struct cam_hw_info *enc_hw = NULL;
+	struct cam_hw_info *dma_hw = NULL;
+	struct cam_hw_soc_info *enc_soc_info = NULL;
+	struct cam_hw_soc_info *dma_soc_info = NULL;
+
+	if (!p_num_enc_dev || !p_num_dma_dev) {
+		rc = -EINVAL;
+		goto num_dev_failed;
+	}
+	count = of_property_count_strings(of_node, "compat-hw-name");
+	if (!count) {
+		CAM_ERR(CAM_JPEG,
+			"no compat hw found in dev tree, count = %d",
+			count);
+		rc = -EINVAL;
+		goto num_dev_failed;
+	}
+
+	rc = of_property_read_u32(of_node, "num-jpeg-enc", &num_dev);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "read num enc devices failed %d", rc);
+		goto num_enc_failed;
+	}
+	g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC] = kzalloc(
+		sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL);
+	if (!g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC]) {
+		rc = -ENOMEM;
+		CAM_ERR(CAM_JPEG, "getting number of dma dev nodes failed");
+		goto num_enc_failed;
+	}
+
+	rc = of_property_read_u32(of_node, "num-jpeg-dma", &num_dma_dev);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "get num dma dev nodes failed %d", rc);
+		goto num_dma_failed;
+	}
+
+	g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA] = kzalloc(
+		sizeof(struct cam_hw_intf *) * num_dma_dev, GFP_KERNEL);
+	if (!g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA]) {
+		rc = -ENOMEM;
+		goto num_dma_failed;
+	}
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node, "compat-hw-name",
+			i, &name);
+		if (rc) {
+			CAM_ERR(CAM_JPEG, "getting dev object name failed");
+			goto compat_hw_name_failed;
+		}
+
+		child_node = of_find_node_by_name(NULL, name);
+		if (!child_node) {
+			CAM_ERR(CAM_JPEG,
+				"error! Cannot find node in dtsi %s", name);
+			rc = -ENODEV;
+			goto compat_hw_name_failed;
+		}
+
+		child_pdev = of_find_device_by_node(child_node);
+		if (!child_pdev) {
+			CAM_ERR(CAM_JPEG, "failed to find device on bus %s",
+				child_node->name);
+			rc = -ENODEV;
+			of_node_put(child_node);
+			goto compat_hw_name_failed;
+		}
+
+		child_dev_intf = (struct cam_hw_intf *)platform_get_drvdata(
+			child_pdev);
+		if (!child_dev_intf) {
+			CAM_ERR(CAM_JPEG, "no child device");
+			of_node_put(child_node);
+			rc = -ENODEV;
+			goto compat_hw_name_failed;
+		}
+		CAM_DBG(CAM_JPEG, "child_intf %pK type %d id %d",
+			child_dev_intf,
+			child_dev_intf->hw_type,
+			child_dev_intf->hw_idx);
+
+		if ((child_dev_intf->hw_type == CAM_JPEG_DEV_ENC &&
+			child_dev_intf->hw_idx >= num_dev) ||
+			(child_dev_intf->hw_type == CAM_JPEG_DEV_DMA &&
+			child_dev_intf->hw_idx >= num_dma_dev)) {
+			CAM_ERR(CAM_JPEG, "index out of range");
+			rc = -ENODEV;
+			goto compat_hw_name_failed;
+		}
+		g_jpeg_hw_mgr.devices[child_dev_intf->hw_type]
+			[child_dev_intf->hw_idx] = child_dev_intf;
+
+		of_node_put(child_node);
+	}
+
+	enc_hw = (struct cam_hw_info *)
+		g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC][0]->hw_priv;
+	enc_soc_info = &enc_hw->soc_info;
+	g_jpeg_hw_mgr.cdm_reg_map[CAM_JPEG_DEV_ENC][0] =
+		&enc_soc_info->reg_map[0];
+	dma_hw = (struct cam_hw_info *)
+		g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA][0]->hw_priv;
+	dma_soc_info = &dma_hw->soc_info;
+	g_jpeg_hw_mgr.cdm_reg_map[CAM_JPEG_DEV_DMA][0] =
+		&dma_soc_info->reg_map[0];
+
+	*p_num_enc_dev = num_dev;
+	*p_num_dma_dev = num_dma_dev;
+
+	return rc;
+
+compat_hw_name_failed:
+	kfree(g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA]);
+num_dma_failed:
+	kfree(g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC]);
+num_enc_failed:
+num_dev_failed:
+
+	return rc;
+}
+
+int cam_jpeg_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
+{
+	int i, rc;
+	uint32_t num_dev;
+	uint32_t num_dma_dev;
+	struct cam_hw_mgr_intf *hw_mgr_intf;
+	struct cam_iommu_handle cdm_handles;
+
+	hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr_hdl;
+	if (!of_node || !hw_mgr_intf) {
+		CAM_ERR(CAM_JPEG, "Invalid args of_node %pK hw_mgr %pK",
+			of_node, hw_mgr_intf);
+		return -EINVAL;
+	}
+
+	memset(hw_mgr_hdl, 0x0, sizeof(struct cam_hw_mgr_intf));
+	hw_mgr_intf->hw_mgr_priv = &g_jpeg_hw_mgr;
+	hw_mgr_intf->hw_get_caps = cam_jpeg_mgr_get_hw_caps;
+	hw_mgr_intf->hw_acquire = cam_jpeg_mgr_acquire_hw;
+	hw_mgr_intf->hw_release = cam_jpeg_mgr_release_hw;
+	hw_mgr_intf->hw_prepare_update = cam_jpeg_mgr_prepare_hw_update;
+	hw_mgr_intf->hw_config = cam_jpeg_mgr_config_hw;
+
+	mutex_init(&g_jpeg_hw_mgr.hw_mgr_mutex);
+	spin_lock_init(&g_jpeg_hw_mgr.hw_mgr_lock);
+
+	for (i = 0; i < CAM_JPEG_CTX_MAX; i++)
+		mutex_init(&g_jpeg_hw_mgr.ctx_data[i].ctx_mutex);
+
+	rc = cam_jpeg_init_devices(of_node, &num_dev, &num_dma_dev);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "jpeg init devices %d", rc);
+		goto smmu_get_failed;
+	}
+
+	rc = cam_smmu_get_handle("jpeg", &g_jpeg_hw_mgr.iommu_hdl);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "jpeg get iommu handle failed %d", rc);
+		goto smmu_get_failed;
+	}
+
+	CAM_DBG(CAM_JPEG, "mmu handle :%d", g_jpeg_hw_mgr.iommu_hdl);
+	rc = cam_smmu_ops(g_jpeg_hw_mgr.iommu_hdl, CAM_SMMU_ATTACH);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "jpeg attach failed: %d", rc);
+		goto jpeg_attach_failed;
+	}
+
+	rc = cam_cdm_get_iommu_handle("jpegenc", &cdm_handles);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "acquire cdm iommu handle Fail  %d", rc);
+		g_jpeg_hw_mgr.cdm_iommu_hdl = -1;
+		g_jpeg_hw_mgr.cdm_iommu_hdl_secure = -1;
+		goto cdm_iommu_failed;
+	}
+	g_jpeg_hw_mgr.cdm_iommu_hdl = cdm_handles.non_secure;
+	g_jpeg_hw_mgr.cdm_iommu_hdl_secure = cdm_handles.secure;
+
+	g_jpeg_hw_mgr.jpeg_caps.dev_iommu_handle.non_secure =
+		g_jpeg_hw_mgr.iommu_hdl;
+	g_jpeg_hw_mgr.jpeg_caps.dev_iommu_handle.secure =
+		g_jpeg_hw_mgr.iommu_sec_hdl;
+	g_jpeg_hw_mgr.jpeg_caps.cdm_iommu_handle.non_secure =
+		g_jpeg_hw_mgr.cdm_iommu_hdl;
+	g_jpeg_hw_mgr.jpeg_caps.cdm_iommu_handle.secure =
+		g_jpeg_hw_mgr.cdm_iommu_hdl_secure;
+	g_jpeg_hw_mgr.jpeg_caps.num_enc = num_dev;
+	g_jpeg_hw_mgr.jpeg_caps.num_dma = num_dma_dev;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.major = 4;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.minor = 2;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.incr  = 0;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.reserved = 0;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.major = 4;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.minor = 2;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.incr  = 0;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.reserved = 0;
+
+	rc = cam_jpeg_setup_workqs();
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "setup work qs failed  %d", rc);
+		goto cdm_iommu_failed;
+	}
+
+	return rc;
+
+cdm_iommu_failed:
+	cam_smmu_ops(g_jpeg_hw_mgr.iommu_hdl, CAM_SMMU_DETACH);
+	cam_smmu_destroy_handle(g_jpeg_hw_mgr.iommu_hdl);
+jpeg_attach_failed:
+	g_jpeg_hw_mgr.iommu_hdl = 0;
+smmu_get_failed:
+	mutex_destroy(&g_jpeg_hw_mgr.hw_mgr_mutex);
+	for (i = 0; i < CAM_JPEG_CTX_MAX; i++)
+		mutex_destroy(&g_jpeg_hw_mgr.ctx_data[i].ctx_mutex);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
new file mode 100644
index 0000000..9e3418d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
@@ -0,0 +1,164 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_HW_MGR_H
+#define CAM_JPEG_HW_MGR_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <media/cam_jpeg.h>
+
+#include "cam_jpeg_hw_intf.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_hw_intf.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+
+#define CAM_JPEG_WORKQ_NUM_TASK      30
+#define CAM_JPEG_WORKQ_TASK_CMD_TYPE 1
+#define CAM_JPEG_WORKQ_TASK_MSG_TYPE 2
+#define CAM_JPEG_HW_CFG_Q_MAX        50
+
+/**
+ * struct cam_jpeg_process_frame_work_data_t
+ *
+ * @type: Task type
+ * @data: Pointer to command data
+ * @request_id: Request id
+ */
+struct cam_jpeg_process_frame_work_data_t {
+	uint32_t type;
+	void *data;
+	uint64_t request_id;
+};
+
+/**
+ * struct cam_jpeg_process_irq_work_data_t
+ *
+ * @type: Task type
+ * @data: Pointer to message data
+ * @result_size: Result size of enc/dma
+ * @irq_status: IRQ status
+ */
+struct cam_jpeg_process_irq_work_data_t {
+	uint32_t type;
+	void *data;
+	int32_t result_size;
+	uint32_t irq_status;
+};
+
+/**
+ * struct cam_jpeg_hw_cdm_info_t
+ *
+ * @ref_cnt: Ref count of how many times device type is acquired
+ * @cdm_handle: Cdm handle
+ * @cdm_ops: Cdm ops struct
+ */
+struct cam_jpeg_hw_cdm_info_t {
+	int ref_cnt;
+	uint32_t cdm_handle;
+	struct cam_cdm_utils_ops *cdm_ops;
+};
+
+/**
+ * struct cam_jpeg_hw_cfg_req_t
+ *
+ * @list_head: List head
+ * @hw_cfg_args: Hw config args
+ * @dev_type: Dev type for cfg request
+ */
+struct cam_jpeg_hw_cfg_req {
+	struct list_head list;
+	struct cam_hw_config_args hw_cfg_args;
+	uint32_t dev_type;
+};
+
+/**
+ * struct cam_jpeg_hw_ctx_data
+ *
+ * @context_priv: Context private data, cam_context from
+ *     acquire.
+ * @ctx_mutex: Mutex for context
+ * @jpeg_dev_acquire_info: Acquire device info
+ * @ctxt_event_cb: Context callback function
+ * @in_use: Flag for context usage
+ * @wait_complete: Completion info
+ * @cdm_cmd: Cdm cmd submitted for that context.
+ * @cdm_cmd_chbase: Change base cdm command from context
+ * @cmd_chbase_buf_addr : Change base cmd buf address
+ */
+struct cam_jpeg_hw_ctx_data {
+	void *context_priv;
+	struct mutex ctx_mutex;
+	struct cam_jpeg_acquire_dev_info jpeg_dev_acquire_info;
+	cam_hw_event_cb_func ctxt_event_cb;
+	bool in_use;
+	struct completion wait_complete;
+	struct cam_cdm_bl_request *cdm_cmd;
+	struct cam_cdm_bl_request *cdm_cmd_chbase;
+	uint32_t *cmd_chbase_buf_addr;
+};
+
+/**
+ * struct cam_jpeg_hw_mgr
+ * @hw_mgr_mutex: Mutex for JPEG hardware manager
+ * @hw_mgr_lock: Spinlock for JPEG hardware manager
+ * @ctx_data: Context data
+ * @jpeg_caps: JPEG capabilities
+ * @iommu_hdl: Non secure IOMMU handle
+ * @iommu_sec_hdl: Secure IOMMU handle
+ * @work_process_frame: Work queue for hw config requests
+ * @work_process_irq_cb: Work queue for processing IRQs.
+ * @process_frame_work_data: Work data pool for hw config
+ *     requests
+ * @process_irq_cb_work_data: Work data pool for irq requests
+ * @cdm_iommu_hdl: Iommu handle received from cdm
+ * @cdm_iommu_hdl_secure: Secure iommu handle received from cdm
+ * @devices: Core hw Devices of JPEG hardware manager
+ * @cdm_info: Cdm info for each core device.
+ * @cdm_reg_map: Regmap of each device for cdm.
+ * @device_in_use: Flag device being used for an active request
+ * @dev_hw_cfg_args: Current cfg request per core dev
+ * @hw_config_req_list: Pending hw update requests list
+ * @free_req_list: Free nodes for above list
+ * @req_list: Nodes of hw update list
+ */
+struct cam_jpeg_hw_mgr {
+	struct mutex hw_mgr_mutex;
+	spinlock_t hw_mgr_lock;
+	struct cam_jpeg_hw_ctx_data ctx_data[CAM_JPEG_CTX_MAX];
+	struct cam_jpeg_query_cap_cmd jpeg_caps;
+	int32_t iommu_hdl;
+	int32_t iommu_sec_hdl;
+	struct cam_req_mgr_core_workq *work_process_frame;
+	struct cam_req_mgr_core_workq *work_process_irq_cb;
+	struct cam_jpeg_process_frame_work_data_t *process_frame_work_data;
+	struct cam_jpeg_process_irq_work_data_t *process_irq_cb_work_data;
+	int cdm_iommu_hdl;
+	int cdm_iommu_hdl_secure;
+
+	struct cam_hw_intf **devices[CAM_JPEG_DEV_TYPE_MAX];
+	struct cam_jpeg_hw_cdm_info_t cdm_info[CAM_JPEG_DEV_TYPE_MAX]
+		[CAM_JPEG_NUM_DEV_PER_RES_MAX];
+	struct cam_soc_reg_map *cdm_reg_map[CAM_JPEG_DEV_TYPE_MAX]
+		[CAM_JPEG_NUM_DEV_PER_RES_MAX];
+	uint32_t device_in_use[CAM_JPEG_DEV_TYPE_MAX]
+		[CAM_JPEG_NUM_DEV_PER_RES_MAX];
+	struct cam_jpeg_hw_cfg_req *dev_hw_cfg_args[CAM_JPEG_DEV_TYPE_MAX]
+		[CAM_JPEG_NUM_DEV_PER_RES_MAX];
+
+	struct list_head hw_config_req_list;
+	struct list_head free_req_list;
+	struct cam_jpeg_hw_cfg_req req_list[CAM_JPEG_HW_CFG_Q_MAX];
+};
+
+#endif /* CAM_JPEG_HW_MGR_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_dma_hw_intf.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_dma_hw_intf.h
new file mode 100644
index 0000000..71b21b9
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_dma_hw_intf.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_DMA_HW_INTF_H
+#define CAM_JPEG_DMA_HW_INTF_H
+
+#include <uapi/media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "cam_hw_mgr_intf.h"
+#include "cam_jpeg_hw_intf.h"
+
+enum cam_jpeg_dma_cmd_type {
+	CAM_JPEG_DMA_CMD_CDM_CFG,
+	CAM_JPEG_DMA_CMD_SET_IRQ_CB,
+	CAM_JPEG_DMA_CMD_MAX,
+};
+
+#endif /* CAM_JPEG_DMA_HW_INTF_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_enc_hw_intf.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_enc_hw_intf.h
new file mode 100644
index 0000000..f0b4e00
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_enc_hw_intf.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_ENC_HW_INTF_H
+#define CAM_JPEG_ENC_HW_INTF_H
+
+#include <uapi/media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "cam_hw_mgr_intf.h"
+#include "cam_jpeg_hw_intf.h"
+
+enum cam_jpeg_enc_cmd_type {
+	CAM_JPEG_ENC_CMD_CDM_CFG,
+	CAM_JPEG_ENC_CMD_SET_IRQ_CB,
+	CAM_JPEG_ENC_CMD_MAX,
+};
+
+#endif /* CAM_JPEG_ENC_HW_INTF_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h
new file mode 100644
index 0000000..3204388
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_HW_INTF_H
+#define CAM_JPEG_HW_INTF_H
+
+#define CAM_JPEG_CTX_MAX              8
+#define CAM_JPEG_DEV_PER_TYPE_MAX     1
+
+#define CAM_JPEG_CMD_BUF_MAX_SIZE     128
+#define CAM_JPEG_MSG_BUF_MAX_SIZE     CAM_JPEG_CMD_BUF_MAX_SIZE
+
+enum cam_jpeg_hw_type {
+	CAM_JPEG_DEV_ENC,
+	CAM_JPEG_DEV_DMA,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h
new file mode 100644
index 0000000..d5c8c9d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_HW_MGR_INTF_H
+#define CAM_JPEG_HW_MGR_INTF_H
+
+#include <uapi/media/cam_jpeg.h>
+#include <uapi/media/cam_defs.h>
+#include <linux/of.h>
+
+#include "cam_cpas_api.h"
+
+#define JPEG_TURBO_VOTE           640000000
+
+int cam_jpeg_hw_mgr_init(struct device_node *of_node,
+	uint64_t *hw_mgr_hdl);
+
+/**
+ * struct cam_jpeg_cpas_vote
+ * @ahb_vote: AHB vote info
+ * @axi_vote: AXI vote info
+ * @ahb_vote_valid: Flag for ahb vote data
+ * @axi_vote_valid: Flag for axi vote data
+ */
+struct cam_jpeg_cpas_vote {
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	uint32_t ahb_vote_valid;
+	uint32_t axi_vote_valid;
+};
+
+struct cam_jpeg_set_irq_cb {
+	int32_t (*jpeg_hw_mgr_cb)(
+		uint32_t irq_status,
+		int32_t result_size,
+		void *data);
+	void *data;
+	uint32_t b_set_cb;
+};
+
+#endif /* CAM_JPEG_HW_MGR_INTF_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/Makefile b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/Makefile
new file mode 100644
index 0000000..23b27bf
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_dma_dev.o jpeg_dma_core.o jpeg_dma_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c
new file mode 100644
index 0000000..05c1a95
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c
@@ -0,0 +1,165 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "jpeg_dma_core.h"
+#include "jpeg_dma_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_dma_hw_intf.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+int cam_jpeg_dma_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_dma_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_dma_device_core_info *core_info = NULL;
+	struct cam_jpeg_cpas_vote cpas_vote;
+	int rc;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &jpeg_dma_dev->soc_info;
+	core_info =
+		(struct cam_jpeg_dma_device_core_info *)jpeg_dma_dev->
+		core_info;
+
+	if (!soc_info || !core_info) {
+		CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
+	cpas_vote.axi_vote.compressed_bw = JPEG_TURBO_VOTE;
+	cpas_vote.axi_vote.uncompressed_bw = JPEG_TURBO_VOTE;
+
+	rc = cam_cpas_start(core_info->cpas_handle,
+		&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "cpass start failed: %d", rc);
+
+	rc = cam_jpeg_dma_enable_soc_resources(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "soc enable is failed %d", rc);
+		cam_cpas_stop(core_info->cpas_handle);
+	}
+
+	return rc;
+}
+
+int cam_jpeg_dma_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_dma_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_dma_device_core_info *core_info = NULL;
+	int rc;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &jpeg_dma_dev->soc_info;
+	core_info = (struct cam_jpeg_dma_device_core_info *)
+		jpeg_dma_dev->core_info;
+	if (!soc_info || !core_info) {
+		CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	rc = cam_jpeg_dma_disable_soc_resources(soc_info);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "soc enable failed %d", rc);
+
+	rc = cam_cpas_stop(core_info->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "cpas stop failed: %d", rc);
+
+	return 0;
+}
+
+int cam_jpeg_dma_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_dma_dev = device_priv;
+	struct cam_jpeg_dma_device_core_info *core_info = NULL;
+	int rc;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_JPEG, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	if (cmd_type >= CAM_JPEG_DMA_CMD_MAX) {
+		CAM_ERR(CAM_JPEG, "Invalid command : %x", cmd_type);
+		return -EINVAL;
+	}
+
+	core_info =
+		(struct cam_jpeg_dma_device_core_info *)jpeg_dma_dev->
+		core_info;
+
+	switch (cmd_type) {
+	case CAM_JPEG_DMA_CMD_SET_IRQ_CB:
+	{
+		struct cam_jpeg_set_irq_cb *irq_cb = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_JPEG, "cmd args NULL");
+			return -EINVAL;
+		}
+		if (irq_cb->b_set_cb) {
+			core_info->irq_cb.jpeg_hw_mgr_cb =
+				irq_cb->jpeg_hw_mgr_cb;
+			core_info->irq_cb.data = irq_cb->data;
+		} else {
+			core_info->irq_cb.jpeg_hw_mgr_cb = NULL;
+			core_info->irq_cb.data = NULL;
+		}
+		rc = 0;
+		break;
+	}
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+irqreturn_t cam_jpeg_dma_irq(int irq_num, void *data)
+{
+	return IRQ_HANDLED;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h
new file mode 100644
index 0000000..bb4e34a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_DMA_CORE_H
+#define CAM_JPEG_DMA_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+
+struct cam_jpeg_dma_device_hw_info {
+	uint32_t reserved;
+};
+
+struct cam_jpeg_dma_set_irq_cb {
+	int32_t (*jpeg_hw_mgr_cb)(uint32_t irq_status,
+		int32_t result_size, void *data);
+	void *data;
+};
+
+enum cam_jpeg_dma_core_state {
+	CAM_JPEG_DMA_CORE_NOT_READY,
+	CAM_JPEG_DMA_CORE_READY,
+	CAM_JPEG_DMA_CORE_RESETTING,
+	CAM_JPEG_DMA_CORE_STATE_MAX,
+};
+
+struct cam_jpeg_dma_device_core_info {
+	enum cam_jpeg_dma_core_state core_state;
+	struct cam_jpeg_dma_device_hw_info *jpeg_dma_hw_info;
+	uint32_t cpas_handle;
+	struct cam_jpeg_dma_set_irq_cb irq_cb;
+};
+
+int cam_jpeg_dma_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_dma_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_dma_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+irqreturn_t cam_jpeg_dma_irq(int irq_num, void *data);
+
+#endif /* CAM_JPEG_DMA_CORE_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c
new file mode 100644
index 0000000..829bb51
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c
@@ -0,0 +1,233 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+
+#include "jpeg_dma_core.h"
+#include "jpeg_dma_soc.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+static struct cam_jpeg_dma_device_hw_info cam_jpeg_dma_hw_info = {
+	.reserved = 0,
+};
+EXPORT_SYMBOL(cam_jpeg_dma_hw_info);
+
+static int cam_jpeg_dma_register_cpas(struct cam_hw_soc_info *soc_info,
+	struct cam_jpeg_dma_device_core_info *core_info,
+	uint32_t hw_idx)
+{
+	struct cam_cpas_register_params cpas_register_params;
+	int rc;
+
+	cpas_register_params.dev = &soc_info->pdev->dev;
+	memcpy(cpas_register_params.identifier, "jpeg-dma",
+		sizeof("jpeg-dma"));
+	cpas_register_params.cam_cpas_client_cb = NULL;
+	cpas_register_params.cell_index = hw_idx;
+	cpas_register_params.userdata = NULL;
+
+	rc = cam_cpas_register_client(&cpas_register_params);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "cpas_register failed: %d", rc);
+		return rc;
+	}
+	core_info->cpas_handle = cpas_register_params.client_handle;
+
+	return rc;
+}
+
+static int cam_jpeg_dma_unregister_cpas(
+	struct cam_jpeg_dma_device_core_info *core_info)
+{
+	int rc;
+
+	rc = cam_cpas_unregister_client(core_info->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "cpas unregister failed: %d", rc);
+	core_info->cpas_handle = 0;
+
+	return rc;
+}
+
+static int cam_jpeg_dma_remove(struct platform_device *pdev)
+{
+	struct cam_hw_info *jpeg_dma_dev = NULL;
+	struct cam_hw_intf *jpeg_dma_dev_intf = NULL;
+	struct cam_jpeg_dma_device_core_info *core_info = NULL;
+	int rc;
+
+	jpeg_dma_dev_intf = platform_get_drvdata(pdev);
+	if (!jpeg_dma_dev_intf) {
+		CAM_ERR(CAM_JPEG, "error No data in pdev");
+		return -EINVAL;
+	}
+
+	jpeg_dma_dev = jpeg_dma_dev_intf->hw_priv;
+	if (!jpeg_dma_dev) {
+		CAM_ERR(CAM_JPEG, "error HW data is NULL");
+		rc = -ENODEV;
+		goto free_jpeg_hw_intf;
+	}
+
+	core_info = (struct cam_jpeg_dma_device_core_info *)
+		jpeg_dma_dev->core_info;
+	if (!core_info) {
+		CAM_ERR(CAM_JPEG, "error core data NULL");
+		goto deinit_soc;
+	}
+
+	rc = cam_jpeg_dma_unregister_cpas(core_info);
+	if (rc)
+		CAM_ERR(CAM_JPEG, " unreg failed to reg cpas %d", rc);
+
+	kfree(core_info);
+
+deinit_soc:
+	rc = cam_soc_util_release_platform_resource(&jpeg_dma_dev->soc_info);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "Failed to deinit soc rc=%d", rc);
+
+	mutex_destroy(&jpeg_dma_dev->hw_mutex);
+	kfree(jpeg_dma_dev);
+
+free_jpeg_hw_intf:
+	kfree(jpeg_dma_dev_intf);
+	return rc;
+}
+
+static int cam_jpeg_dma_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info *jpeg_dma_dev = NULL;
+	struct cam_hw_intf *jpeg_dma_dev_intf = NULL;
+	const struct of_device_id *match_dev = NULL;
+	struct cam_jpeg_dma_device_core_info *core_info = NULL;
+	struct cam_jpeg_dma_device_hw_info *hw_info = NULL;
+	int rc;
+
+	jpeg_dma_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!jpeg_dma_dev_intf)
+		return -ENOMEM;
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &jpeg_dma_dev_intf->hw_idx);
+
+	jpeg_dma_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!jpeg_dma_dev) {
+		rc = -ENOMEM;
+		goto error_alloc_dev;
+	}
+	jpeg_dma_dev->soc_info.pdev = pdev;
+	jpeg_dma_dev_intf->hw_priv = jpeg_dma_dev;
+	jpeg_dma_dev_intf->hw_ops.init = cam_jpeg_dma_init_hw;
+	jpeg_dma_dev_intf->hw_ops.deinit = cam_jpeg_dma_deinit_hw;
+	jpeg_dma_dev_intf->hw_ops.process_cmd = cam_jpeg_dma_process_cmd;
+	jpeg_dma_dev_intf->hw_type = CAM_JPEG_DEV_DMA;
+
+	platform_set_drvdata(pdev, jpeg_dma_dev_intf);
+	jpeg_dma_dev->core_info =
+		kzalloc(sizeof(struct cam_jpeg_dma_device_core_info),
+			GFP_KERNEL);
+	if (!jpeg_dma_dev->core_info) {
+		rc = -ENOMEM;
+		goto error_alloc_core;
+	}
+	core_info = (struct cam_jpeg_dma_device_core_info *)jpeg_dma_dev->
+		core_info;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		CAM_ERR(CAM_JPEG, " No jpeg_dma hardware info");
+		rc = -EINVAL;
+		goto error_match_dev;
+	}
+	hw_info = (struct cam_jpeg_dma_device_hw_info *)match_dev->data;
+	core_info->jpeg_dma_hw_info = hw_info;
+	core_info->core_state = CAM_JPEG_DMA_CORE_NOT_READY;
+
+	rc = cam_jpeg_dma_init_soc_resources(&jpeg_dma_dev->soc_info,
+		cam_jpeg_dma_irq,
+		jpeg_dma_dev);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "%failed to init_soc %d", rc);
+		goto error_match_dev;
+	}
+
+	rc = cam_jpeg_dma_register_cpas(&jpeg_dma_dev->soc_info,
+		core_info, jpeg_dma_dev_intf->hw_idx);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, " failed to reg cpas %d", rc);
+		goto error_reg_cpas;
+	}
+	jpeg_dma_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&jpeg_dma_dev->hw_mutex);
+	spin_lock_init(&jpeg_dma_dev->hw_lock);
+	init_completion(&jpeg_dma_dev->hw_complete);
+
+	CAM_DBG(CAM_JPEG, " hwidx %d", jpeg_dma_dev_intf->hw_idx);
+
+	return rc;
+
+error_reg_cpas:
+	rc = cam_soc_util_release_platform_resource(&jpeg_dma_dev->soc_info);
+error_match_dev:
+	kfree(jpeg_dma_dev->core_info);
+error_alloc_core:
+	kfree(jpeg_dma_dev);
+error_alloc_dev:
+	kfree(jpeg_dma_dev_intf);
+	return rc;
+}
+
+static const struct of_device_id cam_jpeg_dma_dt_match[] = {
+	{
+		.compatible = "qcom,cam_jpeg_dma",
+		.data = &cam_jpeg_dma_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_jpeg_dma_dt_match);
+
+static struct platform_driver cam_jpeg_dma_driver = {
+	.probe = cam_jpeg_dma_probe,
+	.remove = cam_jpeg_dma_remove,
+	.driver = {
+		.name = "cam-jpeg-dma",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_jpeg_dma_dt_match,
+	},
+};
+
+static int __init cam_jpeg_dma_init_module(void)
+{
+	return platform_driver_register(&cam_jpeg_dma_driver);
+}
+
+static void __exit cam_jpeg_dma_exit_module(void)
+{
+	platform_driver_unregister(&cam_jpeg_dma_driver);
+}
+
+module_init(cam_jpeg_dma_init_module);
+module_exit(cam_jpeg_dma_exit_module);
+MODULE_DESCRIPTION("CAM JPEG_DMA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c
new file mode 100644
index 0000000..efc161b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c
@@ -0,0 +1,63 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "jpeg_dma_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+int cam_jpeg_dma_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t jpeg_dma_irq_handler, void *irq_data)
+{
+	int rc;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc)
+		return rc;
+
+	rc = cam_soc_util_request_platform_resource(soc_info,
+		jpeg_dma_irq_handler,
+		irq_data);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "init soc failed %d", rc);
+
+	return rc;
+}
+
+int cam_jpeg_dma_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_SVS_VOTE, true);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "enable platform failed %d", rc);
+
+	return rc;
+}
+
+int cam_jpeg_dma_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "disable platform failed %d", rc);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.h
new file mode 100644
index 0000000..bc9bed8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_JPEG_DMA_SOC_H_
+#define _CAM_JPEG_DMA_SOC_H_
+
+#include "cam_soc_util.h"
+
+int cam_jpeg_dma_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t jpeg_dma_irq_handler, void *irq_data);
+
+int cam_jpeg_dma_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_jpeg_dma_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_JPEG_DMA_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/Makefile b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/Makefile
new file mode 100644
index 0000000..b046a7f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_enc_dev.o jpeg_enc_core.o jpeg_enc_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c
new file mode 100644
index 0000000..25405cf
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c
@@ -0,0 +1,348 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "jpeg_enc_core.h"
+#include "jpeg_enc_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_enc_hw_intf.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK 0x00000001
+#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_SHIFT 0x00000000
+
+#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK 0x10000000
+#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_SHIFT 0x0000000a
+
+#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_MASK 0x00000800
+#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_SHIFT 0x0000000b
+
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF      (0x1<<19)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR     (0x1<<20)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR   (0x1<<21)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF (0x1<<22)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW    (0x1<<23)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM       (0x1<<24)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ   (0x1<<25)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM       (0x1<<26)
+#define CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK        (0x1<<29)
+
+#define CAM_JPEG_HW_MASK_COMP_FRAMEDONE \
+		CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK
+#define CAM_JPEG_HW_MASK_COMP_RESET_ACK \
+		CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK
+#define CAM_JPEG_HW_MASK_COMP_ERR \
+		(CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM | \
+		CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK)
+
+#define CAM_JPEG_HW_IRQ_IS_FRAME_DONE(jpeg_irq_status) \
+	(jpeg_irq_status & CAM_JPEG_HW_MASK_COMP_FRAMEDONE)
+#define CAM_JPEG_HW_IRQ_IS_RESET_ACK(jpeg_irq_status) \
+	(jpeg_irq_status & CAM_JPEG_HW_MASK_COMP_RESET_ACK)
+#define CAM_JPEG_HW_IRQ_IS_ERR(jpeg_irq_status) \
+	(jpeg_irq_status & CAM_JPEG_HW_MASK_COMP_ERR)
+
+#define CAM_JPEG_ENC_RESET_TIMEOUT msecs_to_jiffies(500)
+
+int cam_jpeg_enc_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_enc_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	struct cam_jpeg_cpas_vote cpas_vote;
+	int rc;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &jpeg_enc_dev->soc_info;
+	core_info =
+		(struct cam_jpeg_enc_device_core_info *)jpeg_enc_dev->
+		core_info;
+
+	if (!soc_info || !core_info) {
+		CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
+	cpas_vote.axi_vote.compressed_bw = JPEG_TURBO_VOTE;
+	cpas_vote.axi_vote.uncompressed_bw = JPEG_TURBO_VOTE;
+
+	rc = cam_cpas_start(core_info->cpas_handle,
+		&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "cpass start failed: %d", rc);
+
+	rc = cam_jpeg_enc_enable_soc_resources(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "soc enable is failed %d", rc);
+		cam_cpas_stop(core_info->cpas_handle);
+	}
+
+	return rc;
+}
+
+int cam_jpeg_enc_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_enc_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	int rc;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &jpeg_enc_dev->soc_info;
+	core_info = (struct cam_jpeg_enc_device_core_info *)
+		jpeg_enc_dev->core_info;
+	if (!soc_info || !core_info) {
+		CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	rc = cam_jpeg_enc_disable_soc_resources(soc_info);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "soc enable failed %d", rc);
+
+	rc = cam_cpas_stop(core_info->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "cpas stop failed: %d", rc);
+
+	return 0;
+}
+
+irqreturn_t cam_jpeg_enc_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *jpeg_enc_dev = data;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	uint32_t irq_status = 0;
+	uint32_t encoded_size = 0;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+	void __iomem *mem_base;
+
+	if (!jpeg_enc_dev) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return IRQ_HANDLED;
+	}
+	soc_info = &jpeg_enc_dev->soc_info;
+	core_info =
+		(struct cam_jpeg_enc_device_core_info *)jpeg_enc_dev->
+		core_info;
+	hw_info = core_info->jpeg_enc_hw_info;
+	mem_base = soc_info->reg_map[0].mem_base;
+
+	irq_status = cam_io_r_mb(mem_base +
+		core_info->jpeg_enc_hw_info->int_status);
+
+	cam_io_w_mb(irq_status,
+		soc_info->reg_map[0].mem_base +
+		core_info->jpeg_enc_hw_info->int_clr);
+
+	CAM_DBG(CAM_JPEG, "irq_num %d  irq_status = %x , core_state %d",
+		irq_num, irq_status, core_info->core_state);
+	if (CAM_JPEG_HW_IRQ_IS_FRAME_DONE(irq_status)) {
+		if (core_info->core_state == CAM_JPEG_ENC_CORE_READY) {
+			encoded_size = cam_io_r_mb(mem_base + 0x180);
+			if (core_info->irq_cb.jpeg_hw_mgr_cb) {
+				core_info->irq_cb.jpeg_hw_mgr_cb(irq_status,
+					encoded_size,
+					core_info->irq_cb.data);
+			} else {
+				CAM_ERR(CAM_JPEG, "unexpected done");
+			}
+		}
+
+		core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+	}
+	if (CAM_JPEG_HW_IRQ_IS_RESET_ACK(irq_status)) {
+		if (core_info->core_state == CAM_JPEG_ENC_CORE_RESETTING) {
+			core_info->core_state = CAM_JPEG_ENC_CORE_READY;
+			complete(&jpeg_enc_dev->hw_complete);
+		} else {
+			CAM_ERR(CAM_JPEG, "unexpected reset irq");
+		}
+	}
+	/* Unexpected/unintended HW interrupt */
+	if (CAM_JPEG_HW_IRQ_IS_ERR(irq_status)) {
+		core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+		CAM_ERR_RATE_LIMIT(CAM_JPEG,
+			"error irq_num %d  irq_status = %x , core_state %d",
+			irq_num, irq_status, core_info->core_state);
+
+		if (core_info->irq_cb.jpeg_hw_mgr_cb) {
+			core_info->irq_cb.jpeg_hw_mgr_cb(irq_status,
+				-1,
+				core_info->irq_cb.data);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+int cam_jpeg_enc_reset_hw(void *data,
+	void *start_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_enc_dev = data;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+	void __iomem *mem_base;
+	unsigned long rem_jiffies;
+
+	if (!jpeg_enc_dev) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return -EINVAL;
+	}
+	/* maskdisable.clrirq.maskenable.resetcmd */
+	soc_info = &jpeg_enc_dev->soc_info;
+	core_info =
+		(struct cam_jpeg_enc_device_core_info *)jpeg_enc_dev->
+		core_info;
+	hw_info = core_info->jpeg_enc_hw_info;
+	mem_base = soc_info->reg_map[0].mem_base;
+
+	if (core_info->core_state == CAM_JPEG_ENC_CORE_RESETTING) {
+		CAM_ERR(CAM_JPEG, "alrady resetting");
+		return 0;
+	}
+
+	reinit_completion(&jpeg_enc_dev->hw_complete);
+
+	core_info->core_state = CAM_JPEG_ENC_CORE_RESETTING;
+
+	cam_io_w_mb(0x00000000, mem_base + hw_info->int_mask);
+	cam_io_w_mb(0xFFFFFFFF, mem_base + hw_info->int_clr);
+	cam_io_w_mb(0xFFFFFFFF, mem_base + hw_info->int_mask);
+	cam_io_w_mb(0x00032093, mem_base + hw_info->reset_cmd);
+
+	rem_jiffies = wait_for_completion_timeout(&jpeg_enc_dev->hw_complete,
+		CAM_JPEG_ENC_RESET_TIMEOUT);
+	if (!rem_jiffies) {
+		CAM_ERR(CAM_JPEG, "error Reset Timeout");
+		core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+	}
+
+	return 0;
+}
+
+int cam_jpeg_enc_start_hw(void *data,
+	void *start_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_enc_dev = data;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+	void __iomem *mem_base;
+
+	if (!jpeg_enc_dev) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return -EINVAL;
+	}
+
+	soc_info = &jpeg_enc_dev->soc_info;
+	core_info = (struct cam_jpeg_enc_device_core_info *)
+		jpeg_enc_dev->core_info;
+	hw_info = core_info->jpeg_enc_hw_info;
+	mem_base = soc_info->reg_map[0].mem_base;
+
+	if (core_info->core_state != CAM_JPEG_ENC_CORE_READY) {
+		CAM_ERR(CAM_JPEG, "Error not ready");
+		return -EINVAL;
+	}
+
+	cam_io_w_mb(0x00000001, mem_base + 0x00000010);
+
+	return 0;
+}
+
+int cam_jpeg_enc_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_enc_dev = device_priv;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	int rc;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_JPEG, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	if (cmd_type >= CAM_JPEG_ENC_CMD_MAX) {
+		CAM_ERR(CAM_JPEG, "Invalid command : %x", cmd_type);
+		return -EINVAL;
+	}
+
+	core_info =
+		(struct cam_jpeg_enc_device_core_info *)jpeg_enc_dev->
+		core_info;
+
+	switch (cmd_type) {
+	case CAM_JPEG_ENC_CMD_SET_IRQ_CB:
+	{
+		struct cam_jpeg_set_irq_cb *irq_cb = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_JPEG, "cmd args NULL");
+			return -EINVAL;
+		}
+		if (irq_cb->b_set_cb) {
+			core_info->irq_cb.jpeg_hw_mgr_cb =
+				irq_cb->jpeg_hw_mgr_cb;
+			core_info->irq_cb.data = irq_cb->data;
+		} else {
+			core_info->irq_cb.jpeg_hw_mgr_cb = NULL;
+			core_info->irq_cb.data = NULL;
+		}
+		rc = 0;
+		break;
+	}
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	if (rc)
+		CAM_ERR(CAM_JPEG, "error cmdtype %d rc = %d", cmd_type, rc);
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h
new file mode 100644
index 0000000..6ae4cdc
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h
@@ -0,0 +1,62 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_ENC_CORE_H
+#define CAM_JPEG_ENC_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+
+struct cam_jpeg_enc_device_hw_info {
+	uint32_t hw_version;
+	uint32_t int_status;
+	uint32_t int_clr;
+	uint32_t int_mask;
+	uint32_t reset_cmd;
+};
+
+struct cam_jpeg_enc_set_irq_cb {
+	int32_t (*jpeg_hw_mgr_cb)(uint32_t irq_status,
+		int32_t result_size, void *data);
+	void *data;
+};
+
+enum cam_jpeg_enc_core_state {
+	CAM_JPEG_ENC_CORE_NOT_READY,
+	CAM_JPEG_ENC_CORE_READY,
+	CAM_JPEG_ENC_CORE_RESETTING,
+	CAM_JPEG_ENC_CORE_STATE_MAX,
+};
+
+struct cam_jpeg_enc_device_core_info {
+	enum cam_jpeg_enc_core_state core_state;
+	struct cam_jpeg_enc_device_hw_info *jpeg_enc_hw_info;
+	uint32_t cpas_handle;
+	struct cam_jpeg_enc_set_irq_cb irq_cb;
+};
+
+int cam_jpeg_enc_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_start_hw(void *device_priv,
+	void *start_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_reset_hw(void *device_priv,
+	void *reset_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+irqreturn_t cam_jpeg_enc_irq(int irq_num, void *data);
+
+#endif /* CAM_JPEG_ENC_CORE_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c
new file mode 100644
index 0000000..5dd1e1f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c
@@ -0,0 +1,238 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+
+#include "jpeg_enc_core.h"
+#include "jpeg_enc_soc.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+static struct cam_jpeg_enc_device_hw_info cam_jpeg_enc_hw_info = {
+	.int_clr = 0x1c,
+	.int_status = 0x20,
+	.int_mask = 0x18,
+	.reset_cmd = 0x8,
+	.hw_version = 0x0,
+};
+EXPORT_SYMBOL(cam_jpeg_enc_hw_info);
+
+static int cam_jpeg_enc_register_cpas(struct cam_hw_soc_info *soc_info,
+	struct cam_jpeg_enc_device_core_info *core_info,
+	uint32_t hw_idx)
+{
+	struct cam_cpas_register_params cpas_register_params;
+	int rc;
+
+	cpas_register_params.dev = &soc_info->pdev->dev;
+	memcpy(cpas_register_params.identifier, "jpeg-enc",
+		sizeof("jpeg-enc"));
+	cpas_register_params.cam_cpas_client_cb = NULL;
+	cpas_register_params.cell_index = hw_idx;
+	cpas_register_params.userdata = NULL;
+
+	rc = cam_cpas_register_client(&cpas_register_params);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "cpas_register failed: %d", rc);
+		return rc;
+	}
+	core_info->cpas_handle = cpas_register_params.client_handle;
+
+	return rc;
+}
+
+static int cam_jpeg_enc_unregister_cpas(
+	struct cam_jpeg_enc_device_core_info *core_info)
+{
+	int rc;
+
+	rc = cam_cpas_unregister_client(core_info->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "cpas unregister failed: %d", rc);
+	core_info->cpas_handle = 0;
+
+	return rc;
+}
+
+static int cam_jpeg_enc_remove(struct platform_device *pdev)
+{
+	struct cam_hw_info *jpeg_enc_dev = NULL;
+	struct cam_hw_intf *jpeg_enc_dev_intf = NULL;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	int rc;
+
+	jpeg_enc_dev_intf = platform_get_drvdata(pdev);
+	if (!jpeg_enc_dev_intf) {
+		CAM_ERR(CAM_JPEG, "error No data in pdev");
+		return -EINVAL;
+	}
+
+	jpeg_enc_dev = jpeg_enc_dev_intf->hw_priv;
+	if (!jpeg_enc_dev) {
+		CAM_ERR(CAM_JPEG, "error HW data is NULL");
+		rc = -ENODEV;
+		goto free_jpeg_hw_intf;
+	}
+
+	core_info = (struct cam_jpeg_enc_device_core_info *)
+		jpeg_enc_dev->core_info;
+	if (!core_info) {
+		CAM_ERR(CAM_JPEG, "error core data NULL");
+		goto deinit_soc;
+	}
+
+	rc = cam_jpeg_enc_unregister_cpas(core_info);
+	if (rc)
+		CAM_ERR(CAM_JPEG, " unreg failed to reg cpas %d", rc);
+
+	kfree(core_info);
+
+deinit_soc:
+	rc = cam_soc_util_release_platform_resource(&jpeg_enc_dev->soc_info);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "Failed to deinit soc rc=%d", rc);
+
+	mutex_destroy(&jpeg_enc_dev->hw_mutex);
+	kfree(jpeg_enc_dev);
+
+free_jpeg_hw_intf:
+	kfree(jpeg_enc_dev_intf);
+	return rc;
+}
+
+static int cam_jpeg_enc_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info *jpeg_enc_dev = NULL;
+	struct cam_hw_intf *jpeg_enc_dev_intf = NULL;
+	const struct of_device_id *match_dev = NULL;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+	int rc;
+
+	jpeg_enc_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!jpeg_enc_dev_intf)
+		return -ENOMEM;
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &jpeg_enc_dev_intf->hw_idx);
+
+	jpeg_enc_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!jpeg_enc_dev) {
+		rc = -ENOMEM;
+		goto error_alloc_dev;
+	}
+	jpeg_enc_dev->soc_info.pdev = pdev;
+	jpeg_enc_dev_intf->hw_priv = jpeg_enc_dev;
+	jpeg_enc_dev_intf->hw_ops.init = cam_jpeg_enc_init_hw;
+	jpeg_enc_dev_intf->hw_ops.deinit = cam_jpeg_enc_deinit_hw;
+	jpeg_enc_dev_intf->hw_ops.start = cam_jpeg_enc_start_hw;
+	jpeg_enc_dev_intf->hw_ops.reset = cam_jpeg_enc_reset_hw;
+	jpeg_enc_dev_intf->hw_ops.process_cmd = cam_jpeg_enc_process_cmd;
+	jpeg_enc_dev_intf->hw_type = CAM_JPEG_DEV_ENC;
+
+	platform_set_drvdata(pdev, jpeg_enc_dev_intf);
+	jpeg_enc_dev->core_info =
+		kzalloc(sizeof(struct cam_jpeg_enc_device_core_info),
+			GFP_KERNEL);
+	if (!jpeg_enc_dev->core_info) {
+		rc = -ENOMEM;
+		goto error_alloc_core;
+	}
+	core_info = (struct cam_jpeg_enc_device_core_info *)jpeg_enc_dev->
+		core_info;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		CAM_ERR(CAM_JPEG, " No jpeg_enc hardware info");
+		rc = -EINVAL;
+		goto error_match_dev;
+	}
+	hw_info = (struct cam_jpeg_enc_device_hw_info *)match_dev->data;
+	core_info->jpeg_enc_hw_info = hw_info;
+	core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+
+	rc = cam_jpeg_enc_init_soc_resources(&jpeg_enc_dev->soc_info,
+		cam_jpeg_enc_irq,
+		jpeg_enc_dev);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, " failed to init_soc %d", rc);
+		goto error_match_dev;
+	}
+
+	rc = cam_jpeg_enc_register_cpas(&jpeg_enc_dev->soc_info,
+		core_info, jpeg_enc_dev_intf->hw_idx);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, " failed to reg cpas %d", rc);
+		goto error_reg_cpas;
+	}
+	jpeg_enc_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&jpeg_enc_dev->hw_mutex);
+	spin_lock_init(&jpeg_enc_dev->hw_lock);
+	init_completion(&jpeg_enc_dev->hw_complete);
+
+	return rc;
+
+error_reg_cpas:
+	cam_soc_util_release_platform_resource(&jpeg_enc_dev->soc_info);
+error_match_dev:
+	kfree(jpeg_enc_dev->core_info);
+error_alloc_core:
+	kfree(jpeg_enc_dev);
+error_alloc_dev:
+	kfree(jpeg_enc_dev_intf);
+
+	return rc;
+}
+
+static const struct of_device_id cam_jpeg_enc_dt_match[] = {
+	{
+		.compatible = "qcom,cam_jpeg_enc",
+		.data = &cam_jpeg_enc_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_jpeg_enc_dt_match);
+
+static struct platform_driver cam_jpeg_enc_driver = {
+	.probe = cam_jpeg_enc_probe,
+	.remove = cam_jpeg_enc_remove,
+	.driver = {
+		.name = "cam-jpeg-enc",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_jpeg_enc_dt_match,
+	},
+};
+
+static int __init cam_jpeg_enc_init_module(void)
+{
+	return platform_driver_register(&cam_jpeg_enc_driver);
+}
+
+static void __exit cam_jpeg_enc_exit_module(void)
+{
+	platform_driver_unregister(&cam_jpeg_enc_driver);
+}
+
+module_init(cam_jpeg_enc_init_module);
+module_exit(cam_jpeg_enc_exit_module);
+MODULE_DESCRIPTION("CAM JPEG_ENC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c
new file mode 100644
index 0000000..3f450cd
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c
@@ -0,0 +1,63 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "jpeg_enc_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+int cam_jpeg_enc_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t jpeg_enc_irq_handler, void *irq_data)
+{
+	int rc;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc)
+		return rc;
+
+	rc = cam_soc_util_request_platform_resource(soc_info,
+		jpeg_enc_irq_handler,
+		irq_data);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "init soc failed %d", rc);
+
+	return rc;
+}
+
+int cam_jpeg_enc_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_SVS_VOTE, true);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "enable platform failed %d", rc);
+
+	return rc;
+}
+
+int cam_jpeg_enc_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "disable platform failed %d", rc);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.h
new file mode 100644
index 0000000..a0485a2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_JPEG_ENC_SOC_H_
+#define _CAM_JPEG_ENC_SOC_H_
+
+#include "cam_soc_util.h"
+
+int cam_jpeg_enc_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t jpeg_enc_irq_handler, void *irq_data);
+
+int cam_jpeg_enc_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_jpeg_enc_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_JPEG_ENC_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
index 1dcc54f..18097b0 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -500,7 +500,7 @@
 	}
 		break;
 	case CAM_QUERY_CAP: {
-		struct cam_actuator_query_cap actuator_cap;
+		struct cam_actuator_query_cap actuator_cap = {0};
 
 		actuator_cap.slot_info = a_ctrl->id;
 		if (copy_to_user((void __user *) cmd->handle, &actuator_cap,
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index 2655202..fcf76c8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -385,7 +385,7 @@
 	}
 		break;
 	case CAM_QUERY_CAP: {
-		struct cam_csiphy_query_cap csiphy_cap;
+		struct cam_csiphy_query_cap csiphy_cap = {0};
 
 		cam_csiphy_query_cap(csiphy_dev, &csiphy_cap);
 		if (copy_to_user((void __user *)cmd->handle,
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
index 40a8c179..06590e4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -571,6 +571,7 @@
 	if (!power_info->power_setting)
 		return -ENOMEM;
 
+	power_info->power_down_setting_size = 0;
 	power_info->power_down_setting =
 		(struct cam_sensor_power_setting *)
 		kzalloc(sizeof(struct cam_sensor_power_setting) *
diff --git a/drivers/media/platform/msm/camera/cam_smmu/Makefile b/drivers/media/platform/msm/camera/cam_smmu/Makefile
index 3619da7..e17dac6 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/Makefile
+++ b/drivers/media/platform/msm/camera/cam_smmu/Makefile
@@ -1 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_smmu_api.o
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index ca0dfac..ff7a0e5 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "CAM-SMMU %s:%d " fmt, __func__, __LINE__
-
 #include <linux/module.h>
 #include <linux/dma-buf.h>
 #include <asm/dma-iommu.h>
@@ -25,6 +23,7 @@
 #include <linux/genalloc.h>
 
 #include "cam_smmu_api.h"
+#include "cam_debug_util.h"
 
 #define SHARED_MEM_POOL_GRANULARITY 12
 
@@ -39,12 +38,6 @@
 #define GET_SMMU_HDL(x, y) (((x) << COOKIE_SIZE) | ((y) & COOKIE_MASK))
 #define GET_SMMU_TABLE_IDX(x) (((x) >> COOKIE_SIZE) & COOKIE_MASK)
 
-#ifdef CONFIG_CAM_SMMU_DBG
-#define CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
 struct firmware_alloc_info {
 	struct device *fw_dev;
 	void *fw_kva;
@@ -223,7 +216,7 @@
 
 	mutex_lock(&iommu_cb_set.payload_list_lock);
 	if (list_empty(&iommu_cb_set.payload_list)) {
-		pr_err("Payload list empty\n");
+		CAM_ERR(CAM_SMMU, "Payload list empty");
 		mutex_unlock(&iommu_cb_set.payload_list_lock);
 		return;
 	}
@@ -256,10 +249,11 @@
 {
 	struct cam_dma_buff_info *mapping;
 
-	pr_err("index = %d\n", idx);
+	CAM_ERR(CAM_SMMU, "index = %d", idx);
 	list_for_each_entry(mapping,
 		&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
-		pr_err("ion_fd = %d, paddr= 0x%pK, len = %u, region = %d\n",
+		CAM_ERR(CAM_SMMU,
+			"ion_fd = %d, paddr= 0x%pK, len = %u, region = %d",
 			 mapping->ion_fd, (void *)mapping->paddr,
 			 (unsigned int)mapping->len,
 			 mapping->region_id);
@@ -271,10 +265,10 @@
 	int i;
 
 	for (i = 0; i < iommu_cb_set.cb_num; i++) {
-		pr_err("i= %d, handle= %d, name_addr=%pK\n", i,
+		CAM_ERR(CAM_SMMU, "i= %d, handle= %d, name_addr=%pK", i,
 			   (int)iommu_cb_set.cb_info[i].handle,
 			   (void *)iommu_cb_set.cb_info[i].name);
-		pr_err("dev = %pK\n", iommu_cb_set.cb_info[i].dev);
+		CAM_ERR(CAM_SMMU, "dev = %pK", iommu_cb_set.cb_info[i].dev);
 	}
 }
 
@@ -290,18 +284,21 @@
 		end_addr = (unsigned long)mapping->paddr + mapping->len;
 
 		if (start_addr <= current_addr && current_addr < end_addr) {
-			pr_err("va %pK valid: range:%pK-%pK, fd = %d cb: %s\n",
+			CAM_ERR(CAM_SMMU,
+				"va %pK valid: range:%pK-%pK, fd = %d cb: %s",
 				vaddr, (void *)start_addr, (void *)end_addr,
 				mapping->ion_fd,
 				iommu_cb_set.cb_info[idx].name);
 			goto end;
 		} else {
-			CDBG("va %pK is not in this range: %pK-%pK, fd = %d\n",
+			CAM_DBG(CAM_SMMU,
+				"va %pK is not in this range: %pK-%pK, fd = %d",
 				vaddr, (void *)start_addr, (void *)end_addr,
 				mapping->ion_fd);
 		}
 	}
-	pr_err("Cannot find vaddr:%pK in SMMU %s uses invalid virt address\n",
+	CAM_ERR(CAM_SMMU,
+		"Cannot find vaddr:%pK in SMMU %s uses invalid virt address",
 		vaddr, iommu_cb_set.cb_info[idx].name);
 end:
 	return;
@@ -315,20 +312,22 @@
 	int idx, i = 0;
 
 	if (!token || (handle == HANDLE_INIT)) {
-		pr_err("Error: token is NULL or invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Error: token is NULL or invalid handle");
 		return;
 	}
 
 	idx = GET_SMMU_TABLE_IDX(handle);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, handle);
 		return;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, handle);
 		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 		return;
@@ -336,7 +335,8 @@
 
 	if (client_page_fault_handler) {
 		if (iommu_cb_set.cb_info[idx].cb_count == CAM_SMMU_CB_MAX) {
-			pr_err("%s Should not regiester more handlers\n",
+			CAM_ERR(CAM_SMMU,
+				"%s Should not regiester more handlers",
 				iommu_cb_set.cb_info[idx].name);
 			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 			return;
@@ -361,7 +361,8 @@
 			}
 		}
 		if (i == CAM_SMMU_CB_MAX)
-			pr_err("Error: hdl %x no matching tokens: %s\n",
+			CAM_ERR(CAM_SMMU,
+				"Error: hdl %x no matching tokens: %s",
 				handle, iommu_cb_set.cb_info[idx].name);
 	}
 	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
@@ -376,9 +377,10 @@
 	struct cam_smmu_work_payload *payload;
 
 	if (!token) {
-		pr_err("Error: token is NULL\n");
-		pr_err("Error: domain = %pK, device = %pK\n", domain, dev);
-		pr_err("iova = %lX, flags = %d\n", iova, flags);
+		CAM_ERR(CAM_SMMU, "Error: token is NULL");
+		CAM_ERR(CAM_SMMU, "Error: domain = %pK, device = %pK",
+			domain, dev);
+		CAM_ERR(CAM_SMMU, "iova = %lX, flags = %d", iova, flags);
 		return 0;
 	}
 
@@ -390,7 +392,8 @@
 	}
 
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: index is not valid, index = %d, token = %s\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: index is not valid, index = %d, token = %s",
 			idx, cb_name);
 		return 0;
 	}
@@ -427,7 +430,7 @@
 		return IOMMU_READ|IOMMU_WRITE;
 	case CAM_SMMU_MAP_INVALID:
 	default:
-		pr_err("Error: Direction is invalid. dir = %d\n", dir);
+		CAM_ERR(CAM_SMMU, "Error: Direction is invalid. dir = %d", dir);
 		break;
 	};
 	return IOMMU_INVALID_DIR;
@@ -445,7 +448,8 @@
 		return DMA_BIDIRECTIONAL;
 	case CAM_SMMU_MAP_INVALID:
 	default:
-		pr_err("Error: Direction is invalid. dir = %d\n", (int)dir);
+		CAM_ERR(CAM_SMMU, "Error: Direction is invalid. dir = %d",
+			(int)dir);
 		break;
 	}
 	return DMA_NONE;
@@ -478,7 +482,8 @@
 	int i;
 
 	if (hdl == HANDLE_INIT) {
-		CDBG("iommu handle is init number. Need to try again\n");
+		CAM_DBG(CAM_SMMU,
+			"iommu handle is init number. Need to try again");
 		return 1;
 	}
 
@@ -487,7 +492,8 @@
 			continue;
 
 		if (iommu_cb_set.cb_info[i].handle == hdl) {
-			CDBG("iommu handle %d conflicts\n", (int)hdl);
+			CAM_DBG(CAM_SMMU, "iommu handle %d conflicts",
+				(int)hdl);
 			return 1;
 		}
 	}
@@ -503,7 +509,7 @@
 
 	get_random_bytes(&rand, COOKIE_NUM_BYTE);
 	hdl = GET_SMMU_HDL(idx, rand);
-	CDBG("create handle value = %x\n", (int)hdl);
+	CAM_DBG(CAM_SMMU, "create handle value = %x", (int)hdl);
 	return hdl;
 }
 
@@ -515,7 +521,8 @@
 	/* attach the mapping to device */
 	rc = arm_iommu_attach_device(cb->dev, cb->mapping);
 	if (rc < 0) {
-		pr_err("Error: ARM IOMMU attach failed. ret = %d\n", rc);
+		CAM_ERR(CAM_SMMU, "Error: ARM IOMMU attach failed. ret = %d",
+			rc);
 		rc = -ENODEV;
 	}
 
@@ -533,7 +540,8 @@
 		if (!strcmp(iommu_cb_set.cb_info[i].name, name)) {
 			mutex_lock(&iommu_cb_set.cb_info[i].lock);
 			if (iommu_cb_set.cb_info[i].handle != HANDLE_INIT) {
-				pr_err("Error: %s already got handle 0x%x\n",
+				CAM_ERR(CAM_SMMU,
+					"Error: %s already got handle 0x%x",
 					name,
 					iommu_cb_set.cb_info[i].handle);
 				mutex_unlock(&iommu_cb_set.cb_info[i].lock);
@@ -549,14 +557,15 @@
 			iommu_cb_set.cb_info[i].handle = handle;
 			iommu_cb_set.cb_info[i].cb_count = 0;
 			*hdl = handle;
-			CDBG("%s creates handle 0x%x\n", name, handle);
+			CAM_DBG(CAM_SMMU, "%s creates handle 0x%x",
+				name, handle);
 			mutex_unlock(&iommu_cb_set.cb_info[i].lock);
 			return 0;
 		}
 	}
 
-	pr_err("Error: Cannot find name %s or all handle exist!\n",
-			name);
+	CAM_ERR(CAM_SMMU, "Error: Cannot find name %s or all handle exist",
+		name);
 	cam_smmu_print_table();
 	return -EINVAL;
 }
@@ -571,7 +580,8 @@
 
 	if (!count) {
 		err = -EINVAL;
-		pr_err("Page count is zero, size passed = %zu\n", size);
+		CAM_ERR(CAM_SMMU, "Page count is zero, size passed = %zu",
+			size);
 		goto bail;
 	}
 
@@ -630,12 +640,12 @@
 			      (1 << mapping->order) - 1) >> mapping->order;
 
 	if (!addr) {
-		pr_err("Error: Invalid address\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid address");
 		return -EINVAL;
 	}
 
 	if (start + count > mapping->bits) {
-		pr_err("Error: Invalid page bits in scratch map\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid page bits in scratch map");
 		return -EINVAL;
 	}
 
@@ -657,13 +667,13 @@
 	list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
 			list) {
 		if (mapping->paddr == virt_addr) {
-			CDBG("Found virtual address %lx\n",
+			CAM_DBG(CAM_SMMU, "Found virtual address %lx",
 				 (unsigned long)virt_addr);
 			return mapping;
 		}
 	}
 
-	pr_err("Error: Cannot find virtual address %lx by index %d\n",
+	CAM_ERR(CAM_SMMU, "Error: Cannot find virtual address %lx by index %d",
 		(unsigned long)virt_addr, idx);
 	return NULL;
 }
@@ -676,12 +686,12 @@
 	list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
 			list) {
 		if (mapping->ion_fd == ion_fd) {
-			CDBG(" find ion_fd %d\n", ion_fd);
+			CAM_DBG(CAM_SMMU, "find ion_fd %d", ion_fd);
 			return mapping;
 		}
 	}
 
-	pr_err("Error: Cannot find fd %d by index %d\n",
+	CAM_ERR(CAM_SMMU, "Error: Cannot find fd %d by index %d",
 		ion_fd, idx);
 	return NULL;
 }
@@ -693,7 +703,7 @@
 
 	list_for_each_entry_safe(mapping_info, temp,
 			&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
-		CDBG("Free mapping address %pK, i = %d, fd = %d\n",
+		CAM_DBG(CAM_SMMU, "Free mapping address %pK, i = %d, fd = %d",
 			(void *)mapping_info->paddr, idx,
 			mapping_info->ion_fd);
 
@@ -708,10 +718,12 @@
 					idx);
 
 		if (ret < 0) {
-			pr_err("Buffer delete failed: idx = %d\n", idx);
-			pr_err("Buffer delete failed: addr = %lx, fd = %d\n",
-					(unsigned long)mapping_info->paddr,
-					mapping_info->ion_fd);
+			CAM_ERR(CAM_SMMU, "Buffer delete failed: idx = %d",
+				idx);
+			CAM_ERR(CAM_SMMU,
+				"Buffer delete failed: addr = %lx, fd = %d",
+				(unsigned long)mapping_info->paddr,
+				mapping_info->ion_fd);
 			/*
 			 * Ignore this error and continue to delete other
 			 * buffers in the list
@@ -730,13 +742,13 @@
 	} else if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_DETACH) {
 		ret = cam_smmu_attach_device(idx);
 		if (ret < 0) {
-			pr_err("Error: ATTACH fail\n");
+			CAM_ERR(CAM_SMMU, "Error: ATTACH fail");
 			return -ENODEV;
 		}
 		iommu_cb_set.cb_info[idx].state = CAM_SMMU_ATTACH;
 		ret = 0;
 	} else {
-		pr_err("Error: Not detach/attach: %d\n",
+		CAM_ERR(CAM_SMMU, "Error: Not detach/attach: %d",
 			iommu_cb_set.cb_info[idx].state);
 		ret = -EINVAL;
 	}
@@ -768,28 +780,32 @@
 	uint32_t vaddr = 0;
 
 	if (!iova || !size || (smmu_hdl == HANDLE_INIT)) {
-		pr_err("Error: Input args are invalid\n");
+		CAM_ERR(CAM_SMMU, "Error: Input args are invalid");
 		return -EINVAL;
 	}
 
-	CDBG("Allocating iova size = %zu for smmu hdl=%X\n", size, smmu_hdl);
+	CAM_DBG(CAM_SMMU, "Allocating iova size = %zu for smmu hdl=%X",
+		size, smmu_hdl);
 
 	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, smmu_hdl);
 		return -EINVAL;
 	}
 
 	if (iommu_cb_set.cb_info[idx].handle != smmu_hdl) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, smmu_hdl);
 		rc = -EINVAL;
 		goto get_addr_end;
 	}
 
 	if (!iommu_cb_set.cb_info[idx].shared_support) {
-		pr_err("Error: Shared memory not supported for hdl = %X\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: Shared memory not supported for hdl = %X",
 			smmu_hdl);
 		rc = -EINVAL;
 		goto get_addr_end;
@@ -812,19 +828,21 @@
 	int idx;
 
 	if (!size || (smmu_hdl == HANDLE_INIT)) {
-		pr_err("Error: Input args are invalid\n");
+		CAM_ERR(CAM_SMMU, "Error: Input args are invalid");
 		return -EINVAL;
 	}
 
 	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, smmu_hdl);
 		return -EINVAL;
 	}
 
 	if (iommu_cb_set.cb_info[idx].handle != smmu_hdl) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, smmu_hdl);
 		rc = -EINVAL;
 		goto get_addr_end;
@@ -848,45 +866,47 @@
 	struct iommu_domain *domain;
 
 	if (!iova || !len || !cpuva || (smmu_hdl == HANDLE_INIT)) {
-		pr_err("Error: Input args are invalid\n");
+		CAM_ERR(CAM_SMMU, "Error: Input args are invalid");
 		return -EINVAL;
 	}
 
 	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, smmu_hdl);
 		rc = -EINVAL;
 		goto end;
 	}
 
 	if (!iommu_cb_set.cb_info[idx].firmware_support) {
-		pr_err("Firmware memory not supported for this SMMU handle\n");
+		CAM_ERR(CAM_SMMU,
+			"Firmware memory not supported for this SMMU handle");
 		rc = -EINVAL;
 		goto end;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].is_fw_allocated) {
-		pr_err("Trying to allocate twice\n");
+		CAM_ERR(CAM_SMMU, "Trying to allocate twice");
 		rc = -ENOMEM;
 		goto unlock_and_end;
 	}
 
 	firmware_len = iommu_cb_set.cb_info[idx].firmware_info.iova_len;
 	firmware_start = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
-	CDBG("Firmware area len from DT = %zu\n", firmware_len);
+	CAM_DBG(CAM_SMMU, "Firmware area len from DT = %zu", firmware_len);
 
 	icp_fw.fw_kva = dma_alloc_coherent(icp_fw.fw_dev,
 		firmware_len,
 		&icp_fw.fw_dma_hdl,
 		GFP_KERNEL);
 	if (!icp_fw.fw_kva) {
-		pr_err("FW memory alloc failed\n");
+		CAM_ERR(CAM_SMMU, "FW memory alloc failed");
 		rc = -ENOMEM;
 		goto unlock_and_end;
 	} else {
-		CDBG("DMA alloc returned fw = %pK, hdl = %pK\n",
+		CAM_DBG(CAM_SMMU, "DMA alloc returned fw = %pK, hdl = %pK",
 			icp_fw.fw_kva, (void *)icp_fw.fw_dma_hdl);
 	}
 
@@ -898,7 +918,7 @@
 		IOMMU_READ|IOMMU_WRITE|IOMMU_PRIV);
 
 	if (rc) {
-		pr_err("Failed to map FW into IOMMU\n");
+		CAM_ERR(CAM_SMMU, "Failed to map FW into IOMMU");
 		rc = -ENOMEM;
 		goto alloc_fail;
 	}
@@ -933,27 +953,30 @@
 	size_t unmapped = 0;
 
 	if (smmu_hdl == HANDLE_INIT) {
-		pr_err("Error: Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
 		return -EINVAL;
 	}
 
 	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, smmu_hdl);
 		rc = -EINVAL;
 		goto end;
 	}
 
 	if (!iommu_cb_set.cb_info[idx].firmware_support) {
-		pr_err("Firmware memory not supported for this SMMU handle\n");
+		CAM_ERR(CAM_SMMU,
+			"Firmware memory not supported for this SMMU handle");
 		rc = -EINVAL;
 		goto end;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (!iommu_cb_set.cb_info[idx].is_fw_allocated) {
-		pr_err("Trying to deallocate firmware that is not allocated\n");
+		CAM_ERR(CAM_SMMU,
+			"Trying to deallocate firmware that is not allocated");
 		rc = -ENOMEM;
 		goto unlock_and_end;
 	}
@@ -966,7 +989,7 @@
 		firmware_len);
 
 	if (unmapped != firmware_len) {
-		pr_err("Only %zu unmapped out of total %zu\n",
+		CAM_ERR(CAM_SMMU, "Only %zu unmapped out of total %zu",
 			unmapped,
 			firmware_len);
 		rc = -EINVAL;
@@ -997,18 +1020,18 @@
 	struct cam_context_bank_info *cb = NULL;
 
 	if (!region_info) {
-		pr_err("Invalid region_info pointer\n");
+		CAM_ERR(CAM_SMMU, "Invalid region_info pointer");
 		return -EINVAL;
 	}
 
 	if (smmu_hdl == HANDLE_INIT) {
-		pr_err("Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Invalid handle");
 		return -EINVAL;
 	}
 
 	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU, "Handle or index invalid. idx = %d hdl = %x",
 			idx, smmu_hdl);
 		return -EINVAL;
 	}
@@ -1016,7 +1039,7 @@
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	cb = &iommu_cb_set.cb_info[idx];
 	if (!cb) {
-		pr_err("SMMU context bank pointer invalid\n");
+		CAM_ERR(CAM_SMMU, "SMMU context bank pointer invalid");
 		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 		return -EINVAL;
 	}
@@ -1024,7 +1047,7 @@
 	switch (region_id) {
 	case CAM_SMMU_REGION_FIRMWARE:
 		if (!cb->firmware_support) {
-			pr_err("Firmware not supported\n");
+			CAM_ERR(CAM_SMMU, "Firmware not supported");
 			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 			return -ENODEV;
 		}
@@ -1033,7 +1056,7 @@
 		break;
 	case CAM_SMMU_REGION_SHARED:
 		if (!cb->shared_support) {
-			pr_err("Shared mem not supported\n");
+			CAM_ERR(CAM_SMMU, "Shared mem not supported");
 			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 			return -ENODEV;
 		}
@@ -1042,7 +1065,7 @@
 		break;
 	case CAM_SMMU_REGION_SCRATCH:
 		if (!cb->scratch_buf_support) {
-			pr_err("Scratch memory not supported\n");
+			CAM_ERR(CAM_SMMU, "Scratch memory not supported");
 			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 			return -ENODEV;
 		}
@@ -1051,7 +1074,7 @@
 		break;
 	case CAM_SMMU_REGION_IO:
 		if (!cb->io_support) {
-			pr_err("IO memory not supported\n");
+			CAM_ERR(CAM_SMMU, "IO memory not supported");
 			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 			return -ENODEV;
 		}
@@ -1059,7 +1082,7 @@
 		region_info->iova_len = cb->io_info.iova_len;
 		break;
 	default:
-		pr_err("Invalid region id: %d for smmu hdl: %X\n",
+		CAM_ERR(CAM_SMMU, "Invalid region id: %d for smmu hdl: %X",
 			smmu_hdl, region_id);
 		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 		return -EINVAL;
@@ -1088,28 +1111,28 @@
 	buf = dma_buf_get(ion_fd);
 	if (IS_ERR_OR_NULL(buf)) {
 		rc = PTR_ERR(buf);
-		pr_err("Error: dma get buf failed. fd = %d\n", ion_fd);
+		CAM_ERR(CAM_SMMU, "Error: dma get buf failed. fd = %d", ion_fd);
 		goto err_out;
 	}
 
 	attach = dma_buf_attach(buf, iommu_cb_set.cb_info[idx].dev);
 	if (IS_ERR_OR_NULL(attach)) {
 		rc = PTR_ERR(attach);
-		pr_err("Error: dma buf attach failed\n");
+		CAM_ERR(CAM_SMMU, "Error: dma buf attach failed");
 		goto err_put;
 	}
 
 	table = dma_buf_map_attachment(attach, dma_dir);
 	if (IS_ERR_OR_NULL(table)) {
 		rc = PTR_ERR(table);
-		pr_err("Error: dma buf map attachment failed\n");
+		CAM_ERR(CAM_SMMU, "Error: dma buf map attachment failed");
 		goto err_detach;
 	}
 
 	if (region_id == CAM_SMMU_REGION_SHARED) {
 		domain = iommu_cb_set.cb_info[idx].mapping->domain;
 		if (!domain) {
-			pr_err("CB has no domain set\n");
+			CAM_ERR(CAM_SMMU, "CB has no domain set");
 			goto err_unmap_sg;
 		}
 
@@ -1118,7 +1141,8 @@
 			&iova);
 
 		if (rc < 0) {
-			pr_err("IOVA alloc failed for shared memory\n");
+			CAM_ERR(CAM_SMMU,
+				"IOVA alloc failed for shared memory");
 			goto err_unmap_sg;
 		}
 
@@ -1129,17 +1153,17 @@
 			IOMMU_READ | IOMMU_WRITE);
 
 		if (size < 0) {
-			pr_err("IOMMU mapping failed\n");
+			CAM_ERR(CAM_SMMU, "IOMMU mapping failed");
 			rc = cam_smmu_free_iova(iova,
 				size,
 				iommu_cb_set.cb_info[idx].handle);
 
 			if (rc)
-				pr_err("IOVA free failed\n");
+				CAM_ERR(CAM_SMMU, "IOVA free failed");
 			rc = -ENOMEM;
 			goto err_unmap_sg;
 		} else {
-			CDBG("iommu_map_sg returned %zu\n", size);
+			CAM_DBG(CAM_SMMU, "iommu_map_sg returned %zu", size);
 			*paddr_ptr = iova;
 			*len_ptr = size;
 		}
@@ -1148,7 +1172,7 @@
 			table->sgl, table->nents, dma_dir, buf);
 
 		if (rc != table->nents) {
-			pr_err("Error: msm_dma_map_sg_lazy failed\n");
+			CAM_ERR(CAM_SMMU, "Error: msm_dma_map_sg_lazy failed");
 			rc = -ENOMEM;
 			goto err_unmap_sg;
 		} else {
@@ -1156,22 +1180,23 @@
 			*len_ptr = (size_t)sg_dma_len(table->sgl);
 		}
 	} else {
-		pr_err("Error: Wrong region id passed for %s\n", __func__);
+		CAM_ERR(CAM_SMMU, "Error: Wrong region id passed");
 		rc = -EINVAL;
 		goto err_unmap_sg;
 	}
 
 	if (table->sgl) {
-		CDBG("DMA buf: %pK, device: %pK, attach: %pK, table: %pK\n",
-				(void *)buf,
-				(void *)iommu_cb_set.cb_info[idx].dev,
-				(void *)attach, (void *)table);
-		CDBG("table sgl: %pK, rc: %d, dma_address: 0x%x\n",
-				(void *)table->sgl, rc,
-				(unsigned int)table->sgl->dma_address);
+		CAM_DBG(CAM_SMMU,
+			"DMA buf: %pK, device: %pK, attach: %pK, table: %pK",
+			(void *)buf,
+			(void *)iommu_cb_set.cb_info[idx].dev,
+			(void *)attach, (void *)table);
+		CAM_DBG(CAM_SMMU, "table sgl: %pK, rc: %d, dma_address: 0x%x",
+			(void *)table->sgl, rc,
+			(unsigned int)table->sgl->dma_address);
 	} else {
 		rc = -EINVAL;
-		pr_err("Error: table sgl is null\n");
+		CAM_ERR(CAM_SMMU, "Error: table sgl is null");
 		goto err_unmap_sg;
 	}
 
@@ -1192,13 +1217,13 @@
 	mapping_info->region_id = region_id;
 
 	if (!*paddr_ptr || !*len_ptr) {
-		pr_err("Error: Space Allocation failed!\n");
+		CAM_ERR(CAM_SMMU, "Error: Space Allocation failed");
 		kfree(mapping_info);
 		rc = -ENOSPC;
 		goto err_alloc;
 	}
-	CDBG("ion_fd = %d, dev = %pK, paddr= %pK, len = %u\n", ion_fd,
-		(void *)iommu_cb_set.cb_info[idx].dev,
+	CAM_DBG(CAM_SMMU, "ion_fd = %d, dev = %pK, paddr= %pK, len = %u",
+		ion_fd, (void *)iommu_cb_set.cb_info[idx].dev,
 		(void *)*paddr_ptr, (unsigned int)*len_ptr);
 
 	/* add to the list */
@@ -1241,17 +1266,19 @@
 
 	if ((!mapping_info->buf) || (!mapping_info->table) ||
 		(!mapping_info->attach)) {
-		pr_err("Error: Invalid params dev = %pK, table = %pK\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: Invalid params dev = %pK, table = %pK",
 			(void *)iommu_cb_set.cb_info[idx].dev,
 			(void *)mapping_info->table);
-		pr_err("Error:dma_buf = %pK, attach = %pK\n",
+		CAM_ERR(CAM_SMMU, "Error:dma_buf = %pK, attach = %pK",
 			(void *)mapping_info->buf,
 			(void *)mapping_info->attach);
 		return -EINVAL;
 	}
 
 	if (mapping_info->region_id == CAM_SMMU_REGION_SHARED) {
-		CDBG("Removing SHARED buffer paddr = %pK, len = %zu\n",
+		CAM_DBG(CAM_SMMU,
+			"Removing SHARED buffer paddr = %pK, len = %zu",
 			(void *)mapping_info->paddr, mapping_info->len);
 
 		domain = iommu_cb_set.cb_info[idx].mapping->domain;
@@ -1261,8 +1288,8 @@
 			mapping_info->len);
 
 		if (size != mapping_info->len) {
-			pr_err("IOMMU unmap failed\n");
-			pr_err("Unmapped = %zu, requested = %zu\n",
+			CAM_ERR(CAM_SMMU, "IOMMU unmap failed");
+			CAM_ERR(CAM_SMMU, "Unmapped = %zu, requested = %zu",
 				size,
 				mapping_info->len);
 		}
@@ -1272,7 +1299,7 @@
 			iommu_cb_set.cb_info[idx].handle);
 
 		if (rc)
-			pr_err("IOVA free failed\n");
+			CAM_ERR(CAM_SMMU, "IOVA free failed");
 
 	} else if (mapping_info->region_id == CAM_SMMU_REGION_IO) {
 		msm_dma_unmap_sg(iommu_cb_set.cb_info[idx].dev,
@@ -1317,19 +1344,19 @@
 	int ret = 0;
 
 	if (!identifier) {
-		pr_err("Error: iommu hardware name is NULL\n");
+		CAM_ERR(CAM_SMMU, "Error: iommu hardware name is NULL");
 		return -EINVAL;
 	}
 
 	if (!handle_ptr) {
-		pr_err("Error: handle pointer is NULL\n");
+		CAM_ERR(CAM_SMMU, "Error: handle pointer is NULL");
 		return -EINVAL;
 	}
 
 	/* create and put handle in the table */
 	ret = cam_smmu_create_add_handle_in_table(identifier, handle_ptr);
 	if (ret < 0)
-		pr_err("Error: %s get handle fail\n", identifier);
+		CAM_ERR(CAM_SMMU, "Error: %s get handle fail", identifier);
 
 	return ret;
 }
@@ -1340,20 +1367,21 @@
 	int ret = 0, idx;
 
 	if (handle == HANDLE_INIT) {
-		pr_err("Error: Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
 		return -EINVAL;
 	}
 
 	idx = GET_SMMU_TABLE_IDX(handle);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: Index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU, "Error: Index invalid. idx = %d hdl = %x",
 			idx, handle);
 		return -EINVAL;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, handle);
 		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 		return -EINVAL;
@@ -1371,7 +1399,7 @@
 	case CAM_SMMU_VOTE:
 	case CAM_SMMU_DEVOTE:
 	default:
-		pr_err("Error: idx = %d, ops = %d\n", idx, ops);
+		CAM_ERR(CAM_SMMU, "Error: idx = %d, ops = %d", idx, ops);
 		ret = -EINVAL;
 	}
 	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
@@ -1396,10 +1424,10 @@
 	struct page *page;
 	struct sg_table *table = NULL;
 
-	CDBG("%s: nents = %lu, idx = %d, virt_len  = %zx\n",
-		__func__, nents, idx, virt_len);
-	CDBG("%s: phys_len = %zx, iommu_dir = %d, virt_addr = %pK\n",
-		__func__, phys_len, iommu_dir, virt_addr);
+	CAM_DBG(CAM_SMMU, "nents = %lu, idx = %d, virt_len  = %zx",
+		nents, idx, virt_len);
+	CAM_DBG(CAM_SMMU, "phys_len = %zx, iommu_dir = %d, virt_addr = %pK",
+		phys_len, iommu_dir, virt_addr);
 
 	/*
 	 * This table will go inside the 'mapping' structure
@@ -1435,7 +1463,8 @@
 		virt_len, &iova);
 
 	if (rc < 0) {
-		pr_err("Could not find valid iova for scratch buffer");
+		CAM_ERR(CAM_SMMU,
+			"Could not find valid iova for scratch buffer");
 		goto err_iommu_map;
 	}
 
@@ -1444,7 +1473,7 @@
 		table->sgl,
 		table->nents,
 		iommu_dir) != virt_len) {
-		pr_err("iommu_map_sg() failed");
+		CAM_ERR(CAM_SMMU, "iommu_map_sg() failed");
 		goto err_iommu_map;
 	}
 
@@ -1466,22 +1495,23 @@
 	mapping_info->phys_len = phys_len;
 	mapping_info->region_id = CAM_SMMU_REGION_SCRATCH;
 
-	CDBG("%s: paddr = %pK, len = %zx, phys_len = %zx",
-		__func__, (void *)mapping_info->paddr,
+	CAM_DBG(CAM_SMMU, "paddr = %pK, len = %zx, phys_len = %zx",
+		(void *)mapping_info->paddr,
 		mapping_info->len, mapping_info->phys_len);
 
 	list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
 
 	*virt_addr = (dma_addr_t)iova;
 
-	CDBG("%s: mapped virtual address = %lx\n", __func__,
+	CAM_DBG(CAM_SMMU, "mapped virtual address = %lx",
 		(unsigned long)*virt_addr);
 	return 0;
 
 err_mapping_info:
 	unmapped = iommu_unmap(domain, iova,  virt_len);
 	if (unmapped != virt_len)
-		pr_err("Unmapped only %zx instead of %zx", unmapped, virt_len);
+		CAM_ERR(CAM_SMMU, "Unmapped only %zx instead of %zx",
+			unmapped, virt_len);
 err_iommu_map:
 	__free_pages(page, get_order(phys_len));
 err_page_alloc:
@@ -1504,7 +1534,8 @@
 		&iommu_cb_set.cb_info[idx].scratch_map;
 
 	if (!mapping_info->table) {
-		pr_err("Error: Invalid params: dev = %pK, table = %pK",
+		CAM_ERR(CAM_SMMU,
+			"Error: Invalid params: dev = %pK, table = %pK",
 			(void *)iommu_cb_set.cb_info[idx].dev,
 			(void *)mapping_info->table);
 		return -EINVAL;
@@ -1513,14 +1544,15 @@
 	/* Clean up the mapping_info struct from the list */
 	unmapped = iommu_unmap(domain, mapping_info->paddr, mapping_info->len);
 	if (unmapped != mapping_info->len)
-		pr_err("Unmapped only %zx instead of %zx",
+		CAM_ERR(CAM_SMMU, "Unmapped only %zx instead of %zx",
 			unmapped, mapping_info->len);
 
 	rc = cam_smmu_free_scratch_va(scratch_map,
 		mapping_info->paddr,
 		mapping_info->len);
 	if (rc < 0) {
-		pr_err("Error: Invalid iova while freeing scratch buffer\n");
+		CAM_ERR(CAM_SMMU,
+			"Error: Invalid iova while freeing scratch buffer");
 		rc = -EINVAL;
 	}
 
@@ -1546,67 +1578,74 @@
 	unsigned int iommu_dir;
 
 	if (!paddr_ptr || !virt_len || !phys_len) {
-		pr_err("Error: Input pointer or lengths invalid\n");
+		CAM_ERR(CAM_SMMU, "Error: Input pointer or lengths invalid");
 		return -EINVAL;
 	}
 
 	if (virt_len < phys_len) {
-		pr_err("Error: virt_len > phys_len\n");
+		CAM_ERR(CAM_SMMU, "Error: virt_len > phys_len");
 		return -EINVAL;
 	}
 
 	if (handle == HANDLE_INIT) {
-		pr_err("Error: Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
 		return -EINVAL;
 	}
 
 	iommu_dir = cam_smmu_translate_dir_to_iommu_dir(dir);
 	if (iommu_dir == IOMMU_INVALID_DIR) {
-		pr_err("Error: translate direction failed. dir = %d\n", dir);
+		CAM_ERR(CAM_SMMU,
+			"Error: translate direction failed. dir = %d", dir);
 		return -EINVAL;
 	}
 
 	idx = GET_SMMU_TABLE_IDX(handle);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, handle);
 		return -EINVAL;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, handle);
 		rc = -EINVAL;
 		goto error;
 	}
 
 	if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
-		pr_err("Error: Context bank does not support scratch bufs\n");
+		CAM_ERR(CAM_SMMU,
+			"Error: Context bank does not support scratch bufs");
 		rc = -EINVAL;
 		goto error;
 	}
 
-	CDBG("%s: smmu handle = %x, idx = %d, dir = %d\n",
-		__func__, handle, idx, dir);
-	CDBG("%s: virt_len = %zx, phys_len  = %zx\n",
-		__func__, phys_len, virt_len);
+	CAM_DBG(CAM_SMMU, "smmu handle = %x, idx = %d, dir = %d",
+		handle, idx, dir);
+	CAM_DBG(CAM_SMMU, "virt_len = %zx, phys_len  = %zx",
+		phys_len, virt_len);
 
 	if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
-		pr_err("Err:Dev %s should call SMMU attach before map buffer\n",
-				iommu_cb_set.cb_info[idx].name);
+		CAM_ERR(CAM_SMMU,
+			"Err:Dev %s should call SMMU attach before map buffer",
+			iommu_cb_set.cb_info[idx].name);
 		rc = -EINVAL;
 		goto error;
 	}
 
 	if (!IS_ALIGNED(virt_len, PAGE_SIZE)) {
-		pr_err("Requested scratch buffer length not page aligned\n");
+		CAM_ERR(CAM_SMMU,
+			"Requested scratch buffer length not page aligned");
 		rc = -EINVAL;
 		goto error;
 	}
 
 	if (!IS_ALIGNED(virt_len, phys_len)) {
-		pr_err("Requested virt length not aligned with phys length\n");
+		CAM_ERR(CAM_SMMU,
+			"Requested virt length not aligned with phys length");
 		rc = -EINVAL;
 		goto error;
 	}
@@ -1617,7 +1656,7 @@
 		iommu_dir,
 		paddr_ptr);
 	if (rc < 0)
-		pr_err("Error: mapping or add list fail\n");
+		CAM_ERR(CAM_SMMU, "Error: mapping or add list fail");
 
 error:
 	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
@@ -1632,28 +1671,31 @@
 	struct cam_dma_buff_info *mapping_info;
 
 	if (handle == HANDLE_INIT) {
-		pr_err("Error: Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
 		return -EINVAL;
 	}
 
 	/* find index in the iommu_cb_set.cb_info */
 	idx = GET_SMMU_TABLE_IDX(handle);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, handle);
 		return -EINVAL;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, handle);
 		rc = -EINVAL;
 		goto handle_err;
 	}
 
 	if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
-		pr_err("Error: Context bank does not support scratch buffers\n");
+		CAM_ERR(CAM_SMMU,
+			"Error: Context bank does not support scratch buffers");
 		rc = -EINVAL;
 		goto handle_err;
 	}
@@ -1663,7 +1705,7 @@
 	 */
 	mapping_info = cam_smmu_find_mapping_by_virt_address(idx, paddr);
 	if (!mapping_info) {
-		pr_err("Error: Invalid params\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid params");
 		rc = -ENODEV;
 		goto handle_err;
 	}
@@ -1671,7 +1713,7 @@
 	/* unmapping one buffer from device */
 	rc = cam_smmu_free_scratch_buffer_remove_from_list(mapping_info, idx);
 	if (rc < 0) {
-		pr_err("Error: unmap or remove list fail\n");
+		CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail");
 		goto handle_err;
 	}
 
@@ -1698,12 +1740,12 @@
 	enum cam_smmu_buf_state buf_state;
 
 	if (!paddr_ptr || !len_ptr) {
-		pr_err("Input pointers are invalid\n");
+		CAM_ERR(CAM_SMMU, "Input pointers are invalid");
 		return -EINVAL;
 	}
 
 	if (handle == HANDLE_INIT) {
-		pr_err("Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Invalid handle");
 		return -EINVAL;
 	}
 
@@ -1714,27 +1756,28 @@
 
 	dma_dir = cam_smmu_translate_dir(dir);
 	if (dma_dir == DMA_NONE) {
-		pr_err("translate direction failed. dir = %d\n", dir);
+		CAM_ERR(CAM_SMMU, "translate direction failed. dir = %d", dir);
 		return -EINVAL;
 	}
 
 	idx = GET_SMMU_TABLE_IDX(handle);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU, "handle or index invalid. idx = %d hdl = %x",
 			idx, handle);
 		return -EINVAL;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
-		pr_err("hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU, "hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, handle);
 		rc = -EINVAL;
 		goto get_addr_end;
 	}
 
 	if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
-		pr_err("Err:Dev %s should call SMMU attach before map buffer\n",
+		CAM_ERR(CAM_SMMU,
+			"Err:Dev %s should call SMMU attach before map buffer",
 				iommu_cb_set.cb_info[idx].name);
 		rc = -EINVAL;
 		goto get_addr_end;
@@ -1743,15 +1786,16 @@
 	buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr,
 		len_ptr);
 	if (buf_state == CAM_SMMU_BUFF_EXIST) {
-		CDBG("ion_fd:%d already in the list, give same addr back",
-				 ion_fd);
+		CAM_ERR(CAM_SMMU,
+			"ion_fd:%d already in the list, give same addr back",
+			 ion_fd);
 		rc = -EALREADY;
 		goto get_addr_end;
 	}
 	rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd, dma_dir,
 			paddr_ptr, len_ptr, region_id);
 	if (rc < 0)
-		pr_err("mapping or add list fail\n");
+		CAM_ERR(CAM_SMMU, "mapping or add list fail");
 
 get_addr_end:
 	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
@@ -1767,12 +1811,12 @@
 	enum cam_smmu_buf_state buf_state;
 
 	if (!paddr_ptr || !len_ptr) {
-		pr_err("Error: Input pointers are invalid\n");
+		CAM_ERR(CAM_SMMU, "Error: Input pointers are invalid");
 		return -EINVAL;
 	}
 
 	if (handle == HANDLE_INIT) {
-		pr_err("Error: Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
 		return -EINVAL;
 	}
 
@@ -1782,14 +1826,16 @@
 
 	idx = GET_SMMU_TABLE_IDX(handle);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, handle);
 		return -EINVAL;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, handle);
 		rc = -EINVAL;
 		goto get_addr_end;
@@ -1797,7 +1843,7 @@
 
 	buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr, len_ptr);
 	if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) {
-		CDBG("ion_fd:%d not in the mapped list", ion_fd);
+		CAM_ERR(CAM_SMMU, "ion_fd:%d not in the mapped list", ion_fd);
 		rc = -EINVAL;
 		goto get_addr_end;
 	}
@@ -1823,21 +1869,23 @@
 	struct cam_dma_buff_info *mapping_info;
 
 	if (handle == HANDLE_INIT) {
-		pr_err("Error: Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
 		return -EINVAL;
 	}
 
 	/* find index in the iommu_cb_set.cb_info */
 	idx = GET_SMMU_TABLE_IDX(handle);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, handle);
 		return -EINVAL;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, handle);
 		rc = -EINVAL;
 		goto unmap_end;
@@ -1846,17 +1894,17 @@
 	/* Based on ion fd and index, we can find mapping info of buffer */
 	mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
 	if (!mapping_info) {
-		pr_err("Error: Invalid params! idx = %d, fd = %d\n",
+		CAM_ERR(CAM_SMMU, "Error: Invalid params idx = %d, fd = %d",
 			idx, ion_fd);
 		rc = -EINVAL;
 		goto unmap_end;
 	}
 
 	/* Unmapping one buffer from device */
-	CDBG("SMMU: removing buffer idx = %d\n", idx);
+	CAM_DBG(CAM_SMMU, "SMMU: removing buffer idx = %d", idx);
 	rc = cam_smmu_unmap_buf_and_remove_from_list(mapping_info, idx);
 	if (rc < 0)
-		pr_err("Error: unmap or remove list fail\n");
+		CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail");
 
 unmap_end:
 	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
@@ -1871,21 +1919,23 @@
 	struct cam_dma_buff_info *mapping_info;
 
 	if (handle == HANDLE_INIT) {
-		pr_err("Error: Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
 		return -EINVAL;
 	}
 
 	/* find index in the iommu_cb_set.cb_info */
 	idx = GET_SMMU_TABLE_IDX(handle);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, handle);
 		return -EINVAL;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, handle);
 		rc = -EINVAL;
 		goto put_addr_end;
@@ -1894,7 +1944,7 @@
 	/* based on ion fd and index, we can find mapping info of buffer */
 	mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
 	if (!mapping_info) {
-		pr_err("Error: Invalid params! idx = %d, fd = %d\n",
+		CAM_ERR(CAM_SMMU, "Error: Invalid params idx = %d, fd = %d",
 			idx, ion_fd);
 		rc = -EINVAL;
 		goto put_addr_end;
@@ -1911,27 +1961,29 @@
 	int idx;
 
 	if (handle == HANDLE_INIT) {
-		pr_err("Error: Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
 		return -EINVAL;
 	}
 
 	idx = GET_SMMU_TABLE_IDX(handle);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, handle);
 		return -EINVAL;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, handle);
 		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 		return -EINVAL;
 	}
 
 	if (!list_empty_careful(&iommu_cb_set.cb_info[idx].smmu_buf_list)) {
-		pr_err("Client %s buffer list is not clean!\n",
+		CAM_ERR(CAM_SMMU, "Client %s buffer list is not clean",
 			iommu_cb_set.cb_info[idx].name);
 		cam_smmu_print_list(idx);
 		cam_smmu_clean_buffer_list(idx);
@@ -1981,7 +2033,7 @@
 	int rc = 0;
 
 	if (!cb || !dev) {
-		pr_err("Error: invalid input params\n");
+		CAM_ERR(CAM_SMMU, "Error: invalid input params");
 		return -EINVAL;
 	}
 
@@ -2001,12 +2053,13 @@
 			cb->shared_info.iova_len,
 			-1);
 
-		CDBG("Shared mem start->%lX\n",
+		CAM_DBG(CAM_SMMU, "Shared mem start->%lX",
 			(unsigned long)cb->shared_info.iova_start);
-		CDBG("Shared mem len->%zu\n", cb->shared_info.iova_len);
+		CAM_DBG(CAM_SMMU, "Shared mem len->%zu",
+			cb->shared_info.iova_len);
 
 		if (rc) {
-			pr_err("Genpool chunk creation failed\n");
+			CAM_ERR(CAM_SMMU, "Genpool chunk creation failed");
 			gen_pool_destroy(cb->shared_mem_pool);
 			cb->shared_mem_pool = NULL;
 			return rc;
@@ -2019,7 +2072,8 @@
 			cb->scratch_info.iova_len,
 			0);
 		if (rc < 0) {
-			pr_err("Error: failed to create scratch map\n");
+			CAM_ERR(CAM_SMMU,
+				"Error: failed to create scratch map");
 			rc = -ENODEV;
 			goto end;
 		}
@@ -2030,12 +2084,12 @@
 		cb->mapping = arm_iommu_create_mapping(&platform_bus_type,
 			cb->io_info.iova_start, cb->io_info.iova_len);
 		if (IS_ERR(cb->mapping)) {
-			pr_err("Error: create mapping Failed\n");
+			CAM_ERR(CAM_SMMU, "Error: create mapping Failed");
 			rc = -ENODEV;
 			goto end;
 		}
 	} else {
-		pr_err("Context bank does not have IO region\n");
+		CAM_ERR(CAM_SMMU, "Context bank does not have IO region");
 		rc = -ENODEV;
 		goto end;
 	}
@@ -2060,7 +2114,7 @@
 	struct device_node *domains_child_node = NULL;
 
 	if (!dev) {
-		pr_err("Error: Invalid device\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid device");
 		return -ENODEV;
 	}
 
@@ -2078,7 +2132,7 @@
 	}
 
 	if (iommu_cb_set.cb_num == 0) {
-		pr_err("Error: no context banks present\n");
+		CAM_ERR(CAM_SMMU, "Error: no context banks present");
 		return -ENOENT;
 	}
 
@@ -2088,14 +2142,14 @@
 		GFP_KERNEL);
 
 	if (!iommu_cb_set.cb_info) {
-		pr_err("Error: cannot allocate context banks\n");
+		CAM_ERR(CAM_SMMU, "Error: cannot allocate context banks");
 		return -ENOMEM;
 	}
 
 	cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_INIT);
 	iommu_cb_set.cb_init_count = 0;
 
-	CDBG("no of context banks :%d\n", iommu_cb_set.cb_num);
+	CAM_DBG(CAM_SMMU, "no of context banks :%d", iommu_cb_set.cb_num);
 	return 0;
 }
 
@@ -2109,13 +2163,13 @@
 	int num_regions = 0;
 
 	if (!of_node || !cb) {
-		pr_err("Invalid argument(s)\n");
+		CAM_ERR(CAM_SMMU, "Invalid argument(s)");
 		return -EINVAL;
 	}
 
 	mem_map_node = of_get_child_by_name(of_node, "iova-mem-map");
 	if (!mem_map_node) {
-		pr_err("iova-mem-map not present\n");
+		CAM_ERR(CAM_SMMU, "iova-mem-map not present");
 		return -EINVAL;
 	}
 
@@ -2129,7 +2183,7 @@
 			"iova-region-name", &region_name);
 		if (rc < 0) {
 			of_node_put(mem_map_node);
-			pr_err("IOVA region not found\n");
+			CAM_ERR(CAM_SMMU, "IOVA region not found");
 			return -EINVAL;
 		}
 
@@ -2137,7 +2191,7 @@
 			"iova-region-start", &region_start);
 		if (rc < 0) {
 			of_node_put(mem_map_node);
-			pr_err("Failed to read iova-region-start\n");
+			CAM_ERR(CAM_SMMU, "Failed to read iova-region-start");
 			return -EINVAL;
 		}
 
@@ -2145,7 +2199,7 @@
 			"iova-region-len", &region_len);
 		if (rc < 0) {
 			of_node_put(mem_map_node);
-			pr_err("Failed to read iova-region-len\n");
+			CAM_ERR(CAM_SMMU, "Failed to read iova-region-len");
 			return -EINVAL;
 		}
 
@@ -2153,7 +2207,7 @@
 			"iova-region-id", &region_id);
 		if (rc < 0) {
 			of_node_put(mem_map_node);
-			pr_err("Failed to read iova-region-id\n");
+			CAM_ERR(CAM_SMMU, "Failed to read iova-region-id");
 			return -EINVAL;
 		}
 
@@ -2179,20 +2233,22 @@
 			cb->io_info.iova_len = region_len;
 			break;
 		default:
-			pr_err("Incorrect region id present in DT file: %d\n",
+			CAM_ERR(CAM_SMMU,
+				"Incorrect region id present in DT file: %d",
 				region_id);
 		}
 
-		CDBG("Found label -> %s\n", cb->name);
-		CDBG("Found region -> %s\n", region_name);
-		CDBG("region_start -> %X\n", region_start);
-		CDBG("region_len -> %X\n", region_len);
-		CDBG("region_id -> %X\n", region_id);
+		CAM_DBG(CAM_SMMU, "Found label -> %s", cb->name);
+		CAM_DBG(CAM_SMMU, "Found region -> %s", region_name);
+		CAM_DBG(CAM_SMMU, "region_start -> %X", region_start);
+		CAM_DBG(CAM_SMMU, "region_len -> %X", region_len);
+		CAM_DBG(CAM_SMMU, "region_id -> %X", region_id);
 	}
 	of_node_put(mem_map_node);
 
 	if (!num_regions) {
-		pr_err("No memory regions found, at least one needed\n");
+		CAM_ERR(CAM_SMMU,
+			"No memory regions found, at least one needed");
 		rc = -ENODEV;
 	}
 
@@ -2207,13 +2263,13 @@
 	struct device *ctx = NULL;
 
 	if (!dev) {
-		pr_err("Error: Invalid device\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid device");
 		return -ENODEV;
 	}
 
 	/* check the bounds */
 	if (iommu_cb_set.cb_init_count >= iommu_cb_set.cb_num) {
-		pr_err("Error: populate more than allocated cb\n");
+		CAM_ERR(CAM_SMMU, "Error: populate more than allocated cb");
 		rc = -EBADHANDLE;
 		goto cb_init_fail;
 	}
@@ -2224,29 +2280,31 @@
 	/* set the name of the context bank */
 	rc = of_property_read_string(dev->of_node, "label", &cb->name);
 	if (rc < 0) {
-		pr_err("Error: failed to read label from sub device\n");
+		CAM_ERR(CAM_SMMU,
+			"Error: failed to read label from sub device");
 		goto cb_init_fail;
 	}
 
 	rc = cam_smmu_get_memory_regions_info(dev->of_node,
 		cb);
 	if (rc < 0) {
-		pr_err("Error: Getting region info\n");
+		CAM_ERR(CAM_SMMU, "Error: Getting region info");
 		return rc;
 	}
 
 	/* set up the iommu mapping for the  context bank */
 	if (type == CAM_QSMMU) {
-		pr_err("Error: QSMMU ctx not supported for : %s\n", cb->name);
+		CAM_ERR(CAM_SMMU, "Error: QSMMU ctx not supported for : %s",
+			cb->name);
 		return -ENODEV;
 	}
 
 	ctx = dev;
-	CDBG("getting Arm SMMU ctx : %s\n", cb->name);
+	CAM_DBG(CAM_SMMU, "getting Arm SMMU ctx : %s", cb->name);
 
 	rc = cam_smmu_setup_cb(cb, ctx);
 	if (rc < 0) {
-		pr_err("Error: failed to setup cb : %s\n", cb->name);
+		CAM_ERR(CAM_SMMU, "Error: failed to setup cb : %s", cb->name);
 		goto cb_init_fail;
 	}
 
@@ -2258,7 +2316,7 @@
 	/* increment count to next bank */
 	iommu_cb_set.cb_init_count++;
 
-	CDBG("X: cb init count :%d\n", iommu_cb_set.cb_init_count);
+	CAM_DBG(CAM_SMMU, "X: cb init count :%d", iommu_cb_set.cb_init_count);
 
 cb_init_fail:
 	return rc;
@@ -2272,14 +2330,14 @@
 	if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu")) {
 		rc = cam_alloc_smmu_context_banks(dev);
 		if (rc < 0) {
-			pr_err("Error: allocating context banks\n");
+			CAM_ERR(CAM_SMMU, "Error: allocating context banks");
 			return -ENOMEM;
 		}
 	}
 	if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu-cb")) {
 		rc = cam_populate_smmu_context_banks(dev, CAM_ARM_SMMU);
 		if (rc < 0) {
-			pr_err("Error: populating context banks\n");
+			CAM_ERR(CAM_SMMU, "Error: populating context banks");
 			return -ENOMEM;
 		}
 		return rc;
@@ -2287,7 +2345,7 @@
 	if (of_device_is_compatible(dev->of_node, "qcom,qsmmu-cam-cb")) {
 		rc = cam_populate_smmu_context_banks(dev, CAM_QSMMU);
 		if (rc < 0) {
-			pr_err("Error: populating context banks\n");
+			CAM_ERR(CAM_SMMU, "Error: populating context banks");
 			return -ENOMEM;
 		}
 		return rc;
@@ -2304,7 +2362,7 @@
 	rc = of_platform_populate(pdev->dev.of_node, msm_cam_smmu_dt_match,
 				NULL, &pdev->dev);
 	if (rc < 0) {
-		pr_err("Error: populating devices\n");
+		CAM_ERR(CAM_SMMU, "Error: populating devices");
 	} else {
 		INIT_WORK(&iommu_cb_set.smmu_work, cam_smmu_page_fault_work);
 		mutex_init(&iommu_cb_set.payload_list_lock);
diff --git a/drivers/media/platform/msm/camera/cam_sync/Makefile b/drivers/media/platform/msm/camera/cam_sync/Makefile
index e3012cb..8e884ca 100644
--- a/drivers/media/platform/msm/camera/cam_sync/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sync/Makefile
@@ -1 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_sync.o cam_sync_util.o
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index 96f40e1..644cb63 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "CAM-SYNC %s:%d " fmt, __func__, __LINE__
-
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/irqflags.h>
@@ -19,6 +17,7 @@
 #include <linux/platform_device.h>
 #include <linux/debugfs.h>
 #include "cam_sync_util.h"
+#include "cam_debug_util.h"
 
 struct sync_device *sync_dev;
 
@@ -35,7 +34,8 @@
 
 	rc = cam_sync_init_object(sync_dev->sync_table, idx, name);
 	if (rc) {
-		pr_err("Error: Unable to init row at idx = %ld\n", idx);
+		CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
+			idx);
 		spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
 		return -EINVAL;
 	}
@@ -62,7 +62,8 @@
 	row = sync_dev->sync_table + sync_obj;
 
 	if (row->state == CAM_SYNC_STATE_INVALID) {
-		pr_err("Error: accessing an uninitialized sync obj %d\n",
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj %d",
 			sync_obj);
 		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
 		return -EINVAL;
@@ -124,7 +125,8 @@
 	row = sync_dev->sync_table + sync_obj;
 
 	if (row->state == CAM_SYNC_STATE_INVALID) {
-		pr_err("Error: accessing an uninitialized sync obj = %d\n",
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj = %d",
 			sync_obj);
 		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
 		return -EINVAL;
@@ -157,12 +159,14 @@
 	/* Objects to be signaled will be added into this list */
 	INIT_LIST_HEAD(&sync_list);
 
-	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
+		CAM_ERR(CAM_SYNC, "Error: Out of range sync obj");
 		return -EINVAL;
-
+	}
 	row = sync_dev->sync_table + sync_obj;
 	if (row->state == CAM_SYNC_STATE_INVALID) {
-		pr_err("Error: accessing an uninitialized sync obj = %d\n",
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj = %d",
 			sync_obj);
 		return -EINVAL;
 	}
@@ -170,14 +174,15 @@
 	spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
 	if (row->type == CAM_SYNC_TYPE_GROUP) {
 		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
-		pr_err("Error: Signaling a GROUP sync object = %d\n",
+		CAM_ERR(CAM_SYNC, "Error: Signaling a GROUP sync object = %d",
 			sync_obj);
 		return -EINVAL;
 	}
 
 	if (row->state != CAM_SYNC_STATE_ACTIVE) {
 		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
-		pr_err("Error: Sync object already signaled sync_obj = %d",
+		CAM_ERR(CAM_SYNC,
+			"Error: Sync object already signaled sync_obj = %d",
 			sync_obj);
 		return -EALREADY;
 	}
@@ -185,7 +190,8 @@
 	if (status != CAM_SYNC_STATE_SIGNALED_SUCCESS &&
 		status != CAM_SYNC_STATE_SIGNALED_ERROR) {
 		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
-		pr_err("Error: signaling with undefined status = %d\n",
+		CAM_ERR(CAM_SYNC,
+			"Error: signaling with undefined status = %d",
 			status);
 		return -EINVAL;
 	}
@@ -297,18 +303,20 @@
 	rc = cam_sync_util_validate_merge(sync_obj,
 		num_objs);
 	if (rc < 0) {
-		pr_err("Validation failed, Merge not allowed");
+		CAM_ERR(CAM_SYNC, "Validation failed, Merge not allowed");
 		return -EINVAL;
 	}
 
 	rc = cam_sync_util_find_and_set_empty_row(sync_dev, &idx);
 	if (rc < 0) {
-		pr_err("Error: Unable to find empty row, table full");
+		CAM_ERR(CAM_SYNC,
+			"Error: Unable to find empty row, table full");
 		return -EINVAL;
 	}
 
 	if (idx <= 0 || idx >= CAM_SYNC_MAX_OBJS) {
-		pr_err("Error: Invalid empty row index returned = %ld", idx);
+		CAM_ERR(CAM_SYNC,
+			"Error: Invalid empty row index returned = %ld", idx);
 		return -EINVAL;
 	}
 
@@ -317,7 +325,8 @@
 		num_objs);
 
 	if (rc < 0) {
-		pr_err("Error: Unable to init row at idx = %ld\n", idx);
+		CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
+			idx);
 		return -EINVAL;
 	}
 
@@ -335,7 +344,8 @@
 
 	row = sync_dev->sync_table + sync_obj;
 	if (row->state == CAM_SYNC_STATE_INVALID) {
-		pr_err("Error: accessing an uninitialized sync obj: idx = %d\n",
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj: idx = %d",
 			sync_obj);
 		return -EINVAL;
 	}
@@ -356,7 +366,8 @@
 	row = sync_dev->sync_table + sync_obj;
 
 	if (row->state == CAM_SYNC_STATE_INVALID) {
-		pr_err("Error: accessing an uninitialized sync obj = %d\n",
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj = %d",
 			sync_obj);
 		return -EINVAL;
 	}
@@ -365,15 +376,16 @@
 		msecs_to_jiffies(timeout_ms));
 
 	if (!timeleft) {
-		pr_err("Error: cam_sync_wait() timed out for sync obj = %d\n",
-			sync_obj);
+		CAM_ERR(CAM_SYNC,
+			"Error: timed out for sync obj = %d", sync_obj);
 		rc = -ETIMEDOUT;
 	} else {
 		switch (row->state) {
 		case CAM_SYNC_STATE_INVALID:
 		case CAM_SYNC_STATE_ACTIVE:
 		case CAM_SYNC_STATE_SIGNALED_ERROR:
-			pr_err("Error: Wait on invalid state = %d, obj = %d\n",
+			CAM_ERR(CAM_SYNC,
+				"Error: Wait on invalid state = %d, obj = %d",
 				row->state, sync_obj);
 			rc = -EINVAL;
 			break;
@@ -566,7 +578,8 @@
 	row =  sync_dev->sync_table + sync_obj;
 
 	if (row->state == CAM_SYNC_STATE_INVALID) {
-		pr_err("Error: accessing an uninitialized sync obj = %d\n",
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj = %d",
 			sync_obj);
 		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
 		kfree(user_payload_kernel);
@@ -616,12 +629,12 @@
 	struct sync_table_row *row = NULL;
 
 	if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
-		CDBG("Incorrect ioctl size\n");
+		CAM_ERR(CAM_SYNC, "Incorrect ioctl size");
 		return -EINVAL;
 	}
 
 	if (!k_ioctl->ioctl_ptr) {
-		CDBG("Invalid embedded ioctl ptr\n");
+		CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr");
 		return -EINVAL;
 	}
 
@@ -638,7 +651,8 @@
 	row =  sync_dev->sync_table + sync_obj;
 
 	if (row->state == CAM_SYNC_STATE_INVALID) {
-		pr_err("Error: accessing an uninitialized sync obj = %d\n",
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj = %d",
 			sync_obj);
 		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
 		return -EINVAL;
@@ -667,7 +681,7 @@
 	struct cam_private_ioctl_arg k_ioctl;
 
 	if (!sync_dev) {
-		pr_err("%s sync_dev NULL\n", __func__);
+		CAM_ERR(CAM_SYNC, "sync_dev NULL");
 		return -EINVAL;
 	}
 
@@ -735,7 +749,7 @@
 	struct sync_device *sync_dev = video_drvdata(filep);
 
 	if (!sync_dev) {
-		pr_err("%s Sync device NULL\n", __func__);
+		CAM_ERR(CAM_SYNC, "Sync device NULL");
 		return -ENODEV;
 	}
 
@@ -752,7 +766,7 @@
 		sync_dev->cam_sync_eventq = filep->private_data;
 		spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
 	} else {
-		pr_err("v4l2_fh_open failed : %d\n", rc);
+		CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
 	}
 	mutex_unlock(&sync_dev->table_lock);
 
@@ -766,7 +780,7 @@
 	struct sync_device *sync_dev = video_drvdata(filep);
 
 	if (!sync_dev) {
-		pr_err("%s Sync device NULL\n", __func__);
+		CAM_ERR(CAM_SYNC, "Sync device NULL");
 		rc = -ENODEV;
 		return rc;
 	}
@@ -784,11 +798,13 @@
 			 */
 			rc = cam_sync_signal(i, CAM_SYNC_STATE_SIGNALED_ERROR);
 			if (rc < 0)
-				pr_err("Cleanup signal failed: idx = %d\n", i);
+				CAM_ERR(CAM_SYNC,
+					"Cleanup signal failed: idx = %d", i);
 
 			rc = cam_sync_destroy(i);
 			if (rc < 0)
-				pr_err("Cleanup destroy failed: idx = %d\n", i);
+				CAM_ERR(CAM_SYNC,
+					"Cleanup destroy failed: idx = %d", i);
 		}
 	}
 	mutex_unlock(&sync_dev->table_lock);
@@ -951,7 +967,8 @@
 		WQ_HIGHPRI | WQ_UNBOUND, 0);
 
 	if (!sync_dev->work_queue) {
-		pr_err("Error: high priority work queue creation failed!\n");
+		CAM_ERR(CAM_SYNC,
+			"Error: high priority work queue creation failed");
 		rc = -ENOMEM;
 		goto v4l2_fail;
 	}
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
index 3b3cbff..c62aacf 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "CAM-SYNC-UTIL %s:%d " fmt, __func__, __LINE__
-
 #include "cam_sync_util.h"
 
 int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
@@ -85,7 +83,8 @@
 			active_count++;
 			break;
 		default:
-			pr_err("Invalid state of child object during merge\n");
+			CAM_ERR(CAM_SYNC,
+				"Invalid state of child object during merge");
 			return CAM_SYNC_STATE_SIGNALED_ERROR;
 		}
 	}
@@ -256,7 +255,7 @@
 	struct sync_table_row *row = NULL;
 
 	if (num_objs <= 1) {
-		pr_err("Single object merge is not allowed\n");
+		CAM_ERR(CAM_SYNC, "Single object merge is not allowed");
 		return -EINVAL;
 	}
 
@@ -265,7 +264,8 @@
 		spin_lock_bh(&sync_dev->row_spinlocks[sync_obj[i]]);
 		if (row->type == CAM_SYNC_TYPE_GROUP ||
 			row->state == CAM_SYNC_STATE_INVALID) {
-			pr_err("Group obj %d can't be merged or obj UNINIT\n",
+			CAM_ERR(CAM_SYNC,
+				"Group obj %d can't be merged or obj UNINIT",
 				sync_obj[i]);
 			spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj[i]]);
 			return -EINVAL;
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
index 9dedd14..8b60ce1 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
@@ -16,6 +16,7 @@
 
 #include <cam_sync_api.h>
 #include "cam_sync_private.h"
+#include "cam_debug_util.h"
 
 extern struct sync_device *sync_dev;
 
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
index 78cd9d8..ec08c3c 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
@@ -10,22 +10,18 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/err.h>
 #include "cam_io_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 int cam_io_w(uint32_t data, void __iomem *addr)
 {
 	if (!addr)
 		return -EINVAL;
 
-	CDBG("0x%pK %08x\n", addr, data);
+	CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
 	writel_relaxed(data, addr);
 
 	return 0;
@@ -36,7 +32,7 @@
 	if (!addr)
 		return -EINVAL;
 
-	CDBG("0x%pK %08x\n", addr, data);
+	CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
 	/* Ensure previous writes are done */
 	wmb();
 	writel_relaxed(data, addr);
@@ -49,12 +45,12 @@
 	uint32_t data;
 
 	if (!addr) {
-		pr_err("Invalid args\n");
+		CAM_ERR(CAM_UTIL, "Invalid args");
 		return 0;
 	}
 
 	data = readl_relaxed(addr);
-	CDBG("0x%pK %08x\n", addr, data);
+	CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
 
 	return data;
 }
@@ -64,14 +60,14 @@
 	uint32_t data;
 
 	if (!addr) {
-		pr_err("Invalid args\n");
+		CAM_ERR(CAM_UTIL, "Invalid args");
 		return 0;
 	}
 
 	/* Ensure previous read is done */
 	rmb();
 	data = readl_relaxed(addr);
-	CDBG("0x%pK %08x\n", addr, data);
+	CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
 
 	return data;
 }
@@ -86,10 +82,10 @@
 	if (!dest_addr || !src_addr)
 		return -EINVAL;
 
-	CDBG("%pK %pK %d\n", dest_addr, src_addr, len);
+	CAM_DBG(CAM_UTIL, "%pK %pK %d", dest_addr, src_addr, len);
 
 	for (i = 0; i < len/4; i++) {
-		CDBG("0x%pK %08x\n", d, *s);
+		CAM_DBG(CAM_UTIL, "0x%pK %08x", d, *s);
 		writel_relaxed(*s++, d++);
 	}
 
@@ -106,7 +102,7 @@
 	if (!dest_addr || !src_addr)
 		return -EINVAL;
 
-	CDBG("%pK %pK %d\n", dest_addr, src_addr, len);
+	CAM_DBG(CAM_UTIL, "%pK %pK %d", dest_addr, src_addr, len);
 
 	/*
 	 * Do not use cam_io_w_mb to avoid double wmb() after a write
@@ -114,7 +110,7 @@
 	 */
 	wmb();
 	for (i = 0; i < (len / 4); i++) {
-		CDBG("0x%pK %08x\n", d, *s);
+		CAM_DBG(CAM_UTIL, "0x%pK %08x", d, *s);
 		writel_relaxed(*s++, d++);
 	}
 
@@ -138,7 +134,7 @@
 	}
 
 	if (cnt > retry) {
-		pr_debug("Poll failed by value\n");
+		CAM_DBG(CAM_UTIL, "Poll failed by value");
 		rc = -EINVAL;
 	}
 
@@ -163,7 +159,7 @@
 	}
 
 	if (cnt > retry) {
-		pr_debug("Poll failed with mask\n");
+		CAM_DBG(CAM_UTIL, "Poll failed with mask");
 		rc = -EINVAL;
 	}
 
@@ -179,7 +175,7 @@
 		return -EINVAL;
 
 	for (i = 0; i < len; i++) {
-		CDBG("i= %d len =%d val=%x addr =%pK\n",
+		CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr =%pK",
 			i, len, data[i], addr);
 		writel_relaxed(data[i], addr);
 	}
@@ -196,7 +192,7 @@
 		return -EINVAL;
 
 	for (i = 0; i < len; i++) {
-		CDBG("i= %d len =%d val=%x addr =%pK\n",
+		CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr =%pK",
 			i, len, data[i], addr);
 		/* Ensure previous writes are done */
 		wmb();
@@ -217,7 +213,7 @@
 		return -EINVAL;
 
 	for (i = 0; i < len; i++) {
-		CDBG("i= %d len =%d val=%x addr_base =%pK reg=%x\n",
+		CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr_base =%pK reg=%x",
 			i, len, __VAL(i), addr_base, __OFFSET(i));
 		writel_relaxed(__VAL(i), addr_base + __OFFSET(i));
 	}
@@ -236,7 +232,7 @@
 	/* Ensure write is done */
 	wmb();
 	for (i = 0; i < len; i++) {
-		CDBG("i= %d len =%d val=%x addr_base =%pK reg=%x\n",
+		CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr_base =%pK reg=%x",
 			i, len, __VAL(i), addr_base, __OFFSET(i));
 		writel_relaxed(__VAL(i), addr_base + __OFFSET(i));
 	}
@@ -254,7 +250,8 @@
 	int           i;
 	uint32_t      data;
 
-	CDBG("addr=%pK offset=0x%x size=%d\n", base_addr, start_offset, size);
+	CAM_DBG(CAM_UTIL, "addr=%pK offset=0x%x size=%d",
+		base_addr, start_offset, size);
 
 	if (!base_addr || (size <= 0))
 		return -EINVAL;
@@ -271,13 +268,13 @@
 		snprintf(p_str, 9, "%08x ", data);
 		p_str += 9;
 		if ((i + 1) % NUM_REGISTER_PER_LINE == 0) {
-			pr_err("%s\n", line_str);
+			CAM_ERR(CAM_UTIL, "%s", line_str);
 			line_str[0] = '\0';
 			p_str = line_str;
 		}
 	}
 	if (line_str[0] != '\0')
-		pr_err("%s\n", line_str);
+		CAM_ERR(CAM_UTIL, "%s", line_str);
 
 	return 0;
 }
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
index 442d0bd..a1cdfe9 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
 #include <linux/types.h>
 #include <linux/slab.h>
 
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index 1d86bb1..1990230 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -10,17 +10,13 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
 #include <linux/of.h>
 #include <linux/clk.h>
 #include <linux/slab.h>
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
 #include "cam_soc_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 int cam_soc_util_get_level_from_string(const char *string,
 	enum cam_vote_level *level)
@@ -43,7 +39,7 @@
 	} else if (!strcmp(string, "turbo")) {
 		*level = CAM_TURBO_VOTE;
 	} else {
-		pr_err("Invalid string %s\n", string);
+		CAM_ERR(CAM_UTIL, "Invalid string %s", string);
 		return -EINVAL;
 	}
 
@@ -68,7 +64,8 @@
 	enum cam_vote_level *apply_level)
 {
 	if (req_level >= CAM_MAX_VOTE) {
-		pr_err("Invalid clock level parameter %d\n", req_level);
+		CAM_ERR(CAM_UTIL, "Invalid clock level parameter %d",
+			req_level);
 		return -EINVAL;
 	}
 
@@ -84,13 +81,15 @@
 			}
 
 		if (i == CAM_MAX_VOTE) {
-			pr_err("No valid clock level found to apply, req=%d\n",
+			CAM_ERR(CAM_UTIL,
+				"No valid clock level found to apply, req=%d",
 				req_level);
 			return -EINVAL;
 		}
 	}
 
-	CDBG("Req level %d, Applying %d\n", req_level, *apply_level);
+	CAM_DBG(CAM_UTIL, "Req level %d, Applying %d",
+		req_level, *apply_level);
 
 	return 0;
 }
@@ -98,12 +97,12 @@
 int cam_soc_util_irq_enable(struct cam_hw_soc_info *soc_info)
 {
 	if (!soc_info) {
-		pr_err("Invalid arguments\n");
+		CAM_ERR(CAM_UTIL, "Invalid arguments");
 		return -EINVAL;
 	}
 
 	if (!soc_info->irq_line) {
-		pr_err("No IRQ line available\n");
+		CAM_ERR(CAM_UTIL, "No IRQ line available");
 		return -ENODEV;
 	}
 
@@ -115,12 +114,12 @@
 int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info)
 {
 	if (!soc_info) {
-		pr_err("Invalid arguments\n");
+		CAM_ERR(CAM_UTIL, "Invalid arguments");
 		return -EINVAL;
 	}
 
 	if (!soc_info->irq_line) {
-		pr_err("No IRQ line available\n");
+		CAM_ERR(CAM_UTIL, "No IRQ line available");
 		return -ENODEV;
 	}
 
@@ -138,33 +137,34 @@
 	if (!clk || !clk_name)
 		return -EINVAL;
 
-	CDBG("set %s, rate %d\n", clk_name, clk_rate);
+	CAM_DBG(CAM_UTIL, "set %s, rate %d", clk_name, clk_rate);
 	if (clk_rate > 0) {
 		clk_rate_round = clk_round_rate(clk, clk_rate);
-		CDBG("new_rate %ld\n", clk_rate_round);
+		CAM_DBG(CAM_UTIL, "new_rate %ld", clk_rate_round);
 		if (clk_rate_round < 0) {
-			pr_err("round failed for clock %s rc = %ld\n",
+			CAM_ERR(CAM_UTIL, "round failed for clock %s rc = %ld",
 				clk_name, clk_rate_round);
 			return clk_rate_round;
 		}
 		rc = clk_set_rate(clk, clk_rate_round);
 		if (rc) {
-			pr_err("set_rate failed on %s\n", clk_name);
+			CAM_ERR(CAM_UTIL, "set_rate failed on %s", clk_name);
 			return rc;
 		}
 	} else if (clk_rate == INIT_RATE) {
 		clk_rate_round = clk_get_rate(clk);
-		CDBG("init new_rate %ld\n", clk_rate_round);
+		CAM_DBG(CAM_UTIL, "init new_rate %ld", clk_rate_round);
 		if (clk_rate_round == 0) {
 			clk_rate_round = clk_round_rate(clk, 0);
 			if (clk_rate_round <= 0) {
-				pr_err("round rate failed on %s\n", clk_name);
+				CAM_ERR(CAM_UTIL, "round rate failed on %s",
+					clk_name);
 				return clk_rate_round;
 			}
 		}
 		rc = clk_set_rate(clk, clk_rate_round);
 		if (rc) {
-			pr_err("set_rate failed on %s\n", clk_name);
+			CAM_ERR(CAM_UTIL, "set_rate failed on %s", clk_name);
 			return rc;
 		}
 	}
@@ -186,7 +186,7 @@
 
 	rc = clk_prepare_enable(clk);
 	if (rc) {
-		pr_err("enable failed for %s: rc(%d)\n", clk_name, rc);
+		CAM_ERR(CAM_UTIL, "enable failed for %s: rc(%d)", clk_name, rc);
 		return rc;
 	}
 
@@ -198,7 +198,7 @@
 	if (!clk || !clk_name)
 		return -EINVAL;
 
-	CDBG("disable %s\n", clk_name);
+	CAM_DBG(CAM_UTIL, "disable %s", clk_name);
 	clk_disable_unprepare(clk);
 
 	return 0;
@@ -223,7 +223,8 @@
 
 	if ((soc_info->num_clk == 0) ||
 		(soc_info->num_clk >= CAM_SOC_MAX_CLK)) {
-		pr_err("Invalid number of clock %d\n", soc_info->num_clk);
+		CAM_ERR(CAM_UTIL, "Invalid number of clock %d",
+			soc_info->num_clk);
 		return -EINVAL;
 	}
 
@@ -304,14 +305,14 @@
 
 	count = of_property_count_strings(of_node, "clock-names");
 
-	CDBG("count = %d\n", count);
+	CAM_DBG(CAM_UTIL, "count = %d", count);
 	if (count > CAM_SOC_MAX_CLK) {
-		pr_err("invalid count of clocks, count=%d", count);
+		CAM_ERR(CAM_UTIL, "invalid count of clocks, count=%d", count);
 		rc = -EINVAL;
 		return rc;
 	}
 	if (count <= 0) {
-		CDBG("No clock-names found\n");
+		CAM_DBG(CAM_UTIL, "No clock-names found");
 		count = 0;
 		soc_info->num_clk = count;
 		return 0;
@@ -321,9 +322,11 @@
 	for (i = 0; i < count; i++) {
 		rc = of_property_read_string_index(of_node, "clock-names",
 				i, &(soc_info->clk_name[i]));
-		CDBG("clock-names[%d] = %s\n", i, soc_info->clk_name[i]);
+		CAM_DBG(CAM_UTIL, "clock-names[%d] = %s",
+			i, soc_info->clk_name[i]);
 		if (rc) {
-			pr_err("i= %d count= %d reading clock-names failed\n",
+			CAM_ERR(CAM_UTIL,
+				"i= %d count= %d reading clock-names failed",
 				i, count);
 			return rc;
 		}
@@ -331,12 +334,13 @@
 
 	num_clk_rates = of_property_count_u32_elems(of_node, "clock-rates");
 	if (num_clk_rates <= 0) {
-		pr_err("reading clock-rates count failed\n");
+		CAM_ERR(CAM_UTIL, "reading clock-rates count failed");
 		return -EINVAL;
 	}
 
 	if ((num_clk_rates % soc_info->num_clk) != 0) {
-		pr_err("mismatch clk/rates, No of clocks=%d, No of rates=%d\n",
+		CAM_ERR(CAM_UTIL,
+			"mismatch clk/rates, No of clocks=%d, No of rates=%d",
 			soc_info->num_clk, num_clk_rates);
 		return -EINVAL;
 	}
@@ -346,7 +350,8 @@
 	num_clk_level_strings = of_property_count_strings(of_node,
 		"clock-cntl-level");
 	if (num_clk_level_strings != num_clk_levels) {
-		pr_err("Mismatch No of levels=%d, No of level string=%d\n",
+		CAM_ERR(CAM_UTIL,
+			"Mismatch No of levels=%d, No of level string=%d",
 			num_clk_levels, num_clk_level_strings);
 		return -EINVAL;
 	}
@@ -355,7 +360,8 @@
 		rc = of_property_read_string_index(of_node,
 			"clock-cntl-level", i, &clk_cntl_lvl_string);
 		if (rc) {
-			pr_err("Error reading clock-cntl-level, rc=%d\n", rc);
+			CAM_ERR(CAM_UTIL,
+				"Error reading clock-cntl-level, rc=%d", rc);
 			return rc;
 		}
 
@@ -364,14 +370,16 @@
 		if (rc)
 			return rc;
 
-		CDBG("[%d] : %s %d\n", i, clk_cntl_lvl_string, level);
+		CAM_DBG(CAM_UTIL,
+			"[%d] : %s %d", i, clk_cntl_lvl_string, level);
 		soc_info->clk_level_valid[level] = true;
 		for (j = 0; j < soc_info->num_clk; j++) {
 			rc = of_property_read_u32_index(of_node, "clock-rates",
 				((i * soc_info->num_clk) + j),
 				&soc_info->clk_rate[level][j]);
 			if (rc) {
-				pr_err("Error reading clock-rates, rc=%d\n",
+				CAM_ERR(CAM_UTIL,
+					"Error reading clock-rates, rc=%d",
 					rc);
 				return rc;
 			}
@@ -381,7 +389,8 @@
 				(long)NO_SET_RATE :
 				soc_info->clk_rate[level][j];
 
-			CDBG("soc_info->clk_rate[%d][%d] = %d\n", level, j,
+			CAM_DBG(CAM_UTIL, "soc_info->clk_rate[%d][%d] = %d",
+				level, j,
 				soc_info->clk_rate[level][j]);
 		}
 	}
@@ -390,7 +399,7 @@
 	rc = of_property_read_string_index(of_node, "src-clock-name", 0,
 		&src_clk_str);
 	if (rc || !src_clk_str) {
-		CDBG("No src_clk_str found\n");
+		CAM_DBG(CAM_UTIL, "No src_clk_str found");
 		rc = 0;
 		/* Bottom loop is dependent on src_clk_str. So return here */
 		return rc;
@@ -399,7 +408,8 @@
 	for (i = 0; i < soc_info->num_clk; i++) {
 		if (strcmp(soc_info->clk_name[i], src_clk_str) == 0) {
 			soc_info->src_clk_idx = i;
-			CDBG("src clock = %s, index = %d\n", src_clk_str, i);
+			CAM_DBG(CAM_UTIL, "src clock = %s, index = %d",
+				src_clk_str, i);
 			break;
 		}
 	}
@@ -415,7 +425,8 @@
 
 	if ((soc_info->num_clk == 0) ||
 		(soc_info->num_clk >= CAM_SOC_MAX_CLK)) {
-		pr_err("Invalid number of clock %d\n", soc_info->num_clk);
+		CAM_ERR(CAM_UTIL, "Invalid number of clock %d",
+			soc_info->num_clk);
 		return -EINVAL;
 	}
 
@@ -448,7 +459,7 @@
 
 	count /= sizeof(uint32_t);
 	if (!count) {
-		pr_err("gpio-req-tbl-num 0\n");
+		CAM_ERR(CAM_UTIL, "gpio-req-tbl-num 0");
 		return 0;
 	}
 
@@ -467,30 +478,32 @@
 	rc = of_property_read_u32_array(of_node, "gpio-req-tbl-num",
 		val_array, count);
 	if (rc) {
-		pr_err("failed in reading gpio-req-tbl-num, rc = %d\n", rc);
+		CAM_ERR(CAM_UTIL, "failed in reading gpio-req-tbl-num, rc = %d",
+			rc);
 		goto free_gpio_req_tbl;
 	}
 
 	for (i = 0; i < count; i++) {
 		if (val_array[i] >= gpio_array_size) {
-			pr_err("gpio req tbl index %d invalid\n", val_array[i]);
+			CAM_ERR(CAM_UTIL, "gpio req tbl index %d invalid",
+				val_array[i]);
 			goto free_gpio_req_tbl;
 		}
 		gconf->cam_gpio_req_tbl[i].gpio = gpio_array[val_array[i]];
-		CDBG("cam_gpio_req_tbl[%d].gpio = %d\n", i,
+		CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].gpio = %d", i,
 			gconf->cam_gpio_req_tbl[i].gpio);
 	}
 
 	rc = of_property_read_u32_array(of_node, "gpio-req-tbl-flags",
 		val_array, count);
 	if (rc) {
-		pr_err("Failed in gpio-req-tbl-flags, rc %d\n", rc);
+		CAM_ERR(CAM_UTIL, "Failed in gpio-req-tbl-flags, rc %d", rc);
 		goto free_gpio_req_tbl;
 	}
 
 	for (i = 0; i < count; i++) {
 		gconf->cam_gpio_req_tbl[i].flags = val_array[i];
-		CDBG("cam_gpio_req_tbl[%d].flags = %ld\n", i,
+		CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].flags = %ld", i,
 			gconf->cam_gpio_req_tbl[i].flags);
 	}
 
@@ -499,10 +512,10 @@
 			"gpio-req-tbl-label", i,
 			&gconf->cam_gpio_req_tbl[i].label);
 		if (rc) {
-			pr_err("Failed rc %d\n", rc);
+			CAM_ERR(CAM_UTIL, "Failed rc %d", rc);
 			goto free_gpio_req_tbl;
 		}
-		CDBG("cam_gpio_req_tbl[%d].label = %s\n", i,
+		CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].label = %s", i,
 			gconf->cam_gpio_req_tbl[i].label);
 	}
 
@@ -536,7 +549,7 @@
 
 	/* Validate input parameters */
 	if (!of_node) {
-		pr_err("Invalid param of_node\n");
+		CAM_ERR(CAM_UTIL, "Invalid param of_node");
 		return -EINVAL;
 	}
 
@@ -545,7 +558,7 @@
 	if (gpio_array_size <= 0)
 		return 0;
 
-	CDBG("gpio count %d\n", gpio_array_size);
+	CAM_DBG(CAM_UTIL, "gpio count %d", gpio_array_size);
 
 	gpio_array = kcalloc(gpio_array_size, sizeof(uint16_t), GFP_KERNEL);
 	if (!gpio_array)
@@ -553,7 +566,7 @@
 
 	for (i = 0; i < gpio_array_size; i++) {
 		gpio_array[i] = of_get_gpio(of_node, i);
-		CDBG("gpio_array[%d] = %d", i, gpio_array[i]);
+		CAM_DBG(CAM_UTIL, "gpio_array[%d] = %d", i, gpio_array[i]);
 	}
 
 	gconf = kzalloc(sizeof(*gconf), GFP_KERNEL);
@@ -563,7 +576,7 @@
 	rc = cam_soc_util_get_dt_gpio_req_tbl(of_node, gconf, gpio_array,
 		gpio_array_size);
 	if (rc) {
-		pr_err("failed in msm_camera_get_dt_gpio_req_tbl\n");
+		CAM_ERR(CAM_UTIL, "failed in msm_camera_get_dt_gpio_req_tbl");
 		goto free_gpio_array;
 	}
 
@@ -603,23 +616,23 @@
 
 
 	if (!gpio_conf) {
-		CDBG("No GPIO entry\n");
+		CAM_DBG(CAM_UTIL, "No GPIO entry");
 		return 0;
 	}
 	if (gpio_conf->cam_gpio_common_tbl_size <= 0) {
-		pr_err("GPIO table size is invalid\n");
+		CAM_ERR(CAM_UTIL, "GPIO table size is invalid");
 		return -EINVAL;
 	}
 	size = gpio_conf->cam_gpio_req_tbl_size;
 	gpio_tbl = gpio_conf->cam_gpio_req_tbl;
 
 	if (!gpio_tbl || !size) {
-		pr_err("Invalid gpio_tbl %pK / size %d\n",
+		CAM_ERR(CAM_UTIL, "Invalid gpio_tbl %pK / size %d",
 			gpio_tbl, size);
 		return -EINVAL;
 	}
 	for (i = 0; i < size; i++) {
-		CDBG("i=%d, gpio=%d dir=%ld\n", i,
+		CAM_DBG(CAM_UTIL, "i=%d, gpio=%d dir=%ld", i,
 			gpio_tbl[i].gpio, gpio_tbl[i].flags);
 	}
 	if (gpio_en) {
@@ -632,7 +645,7 @@
 				 * apply new gpios, outout a error message
 				 * for driver bringup debug
 				 */
-				pr_err("gpio %d:%s request fails\n",
+				CAM_ERR(CAM_UTIL, "gpio %d:%s request fails",
 					gpio_tbl[i].gpio, gpio_tbl[i].label);
 			}
 		}
@@ -651,7 +664,7 @@
 	struct platform_device *pdev = NULL;
 
 	if (!soc_info || !soc_info->pdev) {
-		pr_err("Invalid parameters\n");
+		CAM_ERR(CAM_UTIL, "Invalid parameters");
 		return -EINVAL;
 	}
 
@@ -662,7 +675,7 @@
 	count = of_property_count_strings(of_node, "regulator-names");
 	if (count != -EINVAL) {
 		if (count <= 0) {
-			pr_err("no regulators found\n");
+			CAM_ERR(CAM_UTIL, "no regulators found");
 			count = 0;
 			return -EINVAL;
 		}
@@ -670,22 +683,23 @@
 		soc_info->num_rgltr = count;
 
 	} else {
-		CDBG("No regulators node found\n");
+		CAM_DBG(CAM_UTIL, "No regulators node found");
 		return 0;
 	}
 
 	for (i = 0; i < soc_info->num_rgltr; i++) {
 		rc = of_property_read_string_index(of_node,
 			"regulator-names", i, &soc_info->rgltr_name[i]);
-		CDBG("rgltr_name[%d] = %s\n", i, soc_info->rgltr_name[i]);
+		CAM_DBG(CAM_UTIL, "rgltr_name[%d] = %s",
+			i, soc_info->rgltr_name[i]);
 		if (rc) {
-			pr_err("no regulator resource at cnt=%d\n", i);
+			CAM_ERR(CAM_UTIL, "no regulator resource at cnt=%d", i);
 			return -ENODEV;
 		}
 	}
 
 	if (!of_property_read_bool(of_node, "rgltr-cntrl-support")) {
-		CDBG("No regulator control parameter defined\n");
+		CAM_DBG(CAM_UTIL, "No regulator control parameter defined");
 		soc_info->rgltr_ctrl_support = false;
 		return 0;
 	}
@@ -695,21 +709,21 @@
 	rc = of_property_read_u32_array(of_node, "rgltr-min-voltage",
 		soc_info->rgltr_min_volt, soc_info->num_rgltr);
 	if (rc) {
-		pr_err("No minimum volatage value found, rc=%d\n", rc);
+		CAM_ERR(CAM_UTIL, "No minimum volatage value found, rc=%d", rc);
 		return -EINVAL;
 	}
 
 	rc = of_property_read_u32_array(of_node, "rgltr-max-voltage",
 		soc_info->rgltr_max_volt, soc_info->num_rgltr);
 	if (rc) {
-		pr_err("No maximum volatage value found, rc=%d\n", rc);
+		CAM_ERR(CAM_UTIL, "No maximum volatage value found, rc=%d", rc);
 		return -EINVAL;
 	}
 
 	rc = of_property_read_u32_array(of_node, "rgltr-load-current",
 		soc_info->rgltr_op_mode, soc_info->num_rgltr);
 	if (rc) {
-		pr_err("No Load curent found rc=%d\n", rc);
+		CAM_ERR(CAM_UTIL, "No Load curent found rc=%d", rc);
 		return -EINVAL;
 	}
 
@@ -730,12 +744,13 @@
 
 	rc = of_property_read_u32(of_node, "cell-index", &soc_info->index);
 	if (rc) {
-		pr_err("device %s failed to read cell-index\n", pdev->name);
+		CAM_ERR(CAM_UTIL, "device %s failed to read cell-index",
+			pdev->name);
 		return rc;
 	}
 	count = of_property_count_strings(of_node, "reg-names");
 	if (count <= 0) {
-		pr_err("no reg-names found\n");
+		CAM_ERR(CAM_UTIL, "no reg-names found");
 		count = 0;
 	}
 	soc_info->num_mem_block = count;
@@ -744,7 +759,7 @@
 		rc = of_property_read_string_index(of_node, "reg-names", i,
 			&soc_info->mem_block_name[i]);
 		if (rc) {
-			pr_err("failed to read reg-names at %d\n", i);
+			CAM_ERR(CAM_UTIL, "failed to read reg-names at %d", i);
 			return rc;
 		}
 		soc_info->mem_block[i] =
@@ -752,7 +767,7 @@
 			soc_info->mem_block_name[i]);
 
 		if (!soc_info->mem_block[i]) {
-			pr_err("no mem resource by name %s\n",
+			CAM_ERR(CAM_UTIL, "no mem resource by name %s",
 				soc_info->mem_block_name[i]);
 			rc = -ENODEV;
 			return rc;
@@ -763,7 +778,7 @@
 		rc = of_property_read_u32_array(of_node, "reg-cam-base",
 			soc_info->mem_block_cam_base, soc_info->num_mem_block);
 		if (rc) {
-			pr_err("Error reading register offsets\n");
+			CAM_ERR(CAM_UTIL, "Error reading register offsets");
 			return rc;
 		}
 	}
@@ -771,13 +786,13 @@
 	rc = of_property_read_string_index(of_node, "interrupt-names", 0,
 		&soc_info->irq_name);
 	if (rc) {
-		pr_warn("No interrupt line present\n");
+		CAM_WARN(CAM_UTIL, "No interrupt line present");
 		rc = 0;
 	} else {
 		soc_info->irq_line = platform_get_resource_byname(pdev,
 			IORESOURCE_IRQ, soc_info->irq_name);
 		if (!soc_info->irq_line) {
-			pr_err("no irq resource\n");
+			CAM_ERR(CAM_UTIL, "no irq resource");
 			rc = -ENODEV;
 			return rc;
 		}
@@ -817,7 +832,7 @@
 	if (IS_ERR_OR_NULL(*reg)) {
 		rc = PTR_ERR(*reg);
 		rc = rc ? rc : -EINVAL;
-		pr_err("Regulator %s get failed %d\n", rgltr_name, rc);
+		CAM_ERR(CAM_UTIL, "Regulator %s get failed %d", rgltr_name, rc);
 		*reg = NULL;
 	}
 	return rc;
@@ -831,13 +846,13 @@
 	int32_t rc = 0;
 
 	if (!rgltr) {
-		pr_err("Invalid NULL parameter\n");
+		CAM_ERR(CAM_UTIL, "Invalid NULL parameter");
 		return -EINVAL;
 	}
 
 	rc = regulator_disable(rgltr);
 	if (rc) {
-		pr_err("%s regulator disable failed\n", rgltr_name);
+		CAM_ERR(CAM_UTIL, "%s regulator disable failed", rgltr_name);
 		return rc;
 	}
 
@@ -864,31 +879,32 @@
 	int32_t rc = 0;
 
 	if (!rgltr) {
-		pr_err("Invalid NULL parameter\n");
+		CAM_ERR(CAM_UTIL, "Invalid NULL parameter");
 		return -EINVAL;
 	}
 
 	if (regulator_count_voltages(rgltr) > 0) {
-		CDBG("voltage min=%d, max=%d\n",
+		CAM_DBG(CAM_UTIL, "voltage min=%d, max=%d",
 			rgltr_min_volt, rgltr_max_volt);
 
 		rc = regulator_set_voltage(
 			rgltr, rgltr_min_volt, rgltr_max_volt);
 		if (rc) {
-			pr_err("%s set voltage failed\n", rgltr_name);
+			CAM_ERR(CAM_UTIL, "%s set voltage failed", rgltr_name);
 			return rc;
 		}
 
 		rc = regulator_set_load(rgltr, rgltr_op_mode);
 		if (rc) {
-			pr_err("%s set optimum mode failed\n", rgltr_name);
+			CAM_ERR(CAM_UTIL, "%s set optimum mode failed",
+				rgltr_name);
 			return rc;
 		}
 	}
 
 	rc = regulator_enable(rgltr);
 	if (rc) {
-		pr_err("%s regulator_enable failed\n", rgltr_name);
+		CAM_ERR(CAM_UTIL, "%s regulator_enable failed", rgltr_name);
 		return rc;
 	}
 
@@ -909,7 +925,7 @@
 
 	device_pctrl->pinctrl = devm_pinctrl_get(dev);
 	if (IS_ERR_OR_NULL(device_pctrl->pinctrl)) {
-		CDBG("Pinctrl not available\n");
+		CAM_DBG(CAM_UTIL, "Pinctrl not available");
 		device_pctrl->pinctrl = NULL;
 		return 0;
 	}
@@ -917,7 +933,8 @@
 		pinctrl_lookup_state(device_pctrl->pinctrl,
 				CAM_SOC_PINCTRL_STATE_DEFAULT);
 	if (IS_ERR_OR_NULL(device_pctrl->gpio_state_active)) {
-		pr_err("Failed to get the active state pinctrl handle\n");
+		CAM_ERR(CAM_UTIL,
+			"Failed to get the active state pinctrl handle");
 		device_pctrl->gpio_state_active = NULL;
 		return -EINVAL;
 	}
@@ -925,7 +942,8 @@
 		= pinctrl_lookup_state(device_pctrl->pinctrl,
 				CAM_SOC_PINCTRL_STATE_SLEEP);
 	if (IS_ERR_OR_NULL(device_pctrl->gpio_state_suspend)) {
-		pr_err("Failed to get the suspend state pinctrl handle\n");
+		CAM_ERR(CAM_UTIL,
+			"Failed to get the suspend state pinctrl handle");
 		device_pctrl->gpio_state_suspend = NULL;
 		return -EINVAL;
 	}
@@ -973,7 +991,8 @@
 		}
 
 		if (rc) {
-			pr_err("%s enable failed\n", soc_info->rgltr_name[j]);
+			CAM_ERR(CAM_UTIL, "%s enable failed",
+				soc_info->rgltr_name[j]);
 			goto disable_rgltr;
 		}
 	}
@@ -1007,7 +1026,7 @@
 
 
 	if (!soc_info || !soc_info->pdev) {
-		pr_err("Invalid parameters\n");
+		CAM_ERR(CAM_UTIL, "Invalid parameters");
 		return -EINVAL;
 	}
 
@@ -1018,7 +1037,8 @@
 			if (!request_mem_region(soc_info->mem_block[i]->start,
 				resource_size(soc_info->mem_block[i]),
 				soc_info->mem_block_name[i])){
-				pr_err("Error Mem Region request Failed:%s\n",
+				CAM_ERR(CAM_UTIL,
+					"Error Mem Region request Failed:%s",
 					soc_info->mem_block_name[i]);
 				rc = -ENOMEM;
 				goto unmap_base;
@@ -1028,7 +1048,7 @@
 			soc_info->mem_block[i]->start,
 			resource_size(soc_info->mem_block[i]));
 		if (!soc_info->reg_map[i].mem_base) {
-			pr_err("i= %d base NULL\n", i);
+			CAM_ERR(CAM_UTIL, "i= %d base NULL", i);
 			rc = -ENOMEM;
 			goto unmap_base;
 		}
@@ -1041,7 +1061,7 @@
 
 	for (i = 0; i < soc_info->num_rgltr; i++) {
 		if (soc_info->rgltr_name[i] == NULL) {
-			pr_err("can't find regulator name\n");
+			CAM_ERR(CAM_UTIL, "can't find regulator name");
 			goto put_regulator;
 		}
 
@@ -1056,7 +1076,7 @@
 			handler, IRQF_TRIGGER_RISING,
 			soc_info->irq_name, irq_data);
 		if (rc) {
-			pr_err("irq request fail\n");
+			CAM_ERR(CAM_UTIL, "irq request fail");
 			rc = -EBUSY;
 			goto put_regulator;
 		}
@@ -1069,7 +1089,8 @@
 		soc_info->clk[i] = clk_get(&soc_info->pdev->dev,
 			soc_info->clk_name[i]);
 		if (!soc_info->clk[i]) {
-			pr_err("get failed for %s\n", soc_info->clk_name[i]);
+			CAM_ERR(CAM_UTIL, "get failed for %s",
+				soc_info->clk_name[i]);
 			rc = -ENOENT;
 			goto put_clk;
 		}
@@ -1077,11 +1098,11 @@
 
 	rc = cam_soc_util_request_pinctrl(soc_info);
 	if (rc)
-		CDBG("Failed in request pinctrl, rc=%d\n", rc);
+		CAM_DBG(CAM_UTIL, "Failed in request pinctrl, rc=%d", rc);
 
 	rc = cam_soc_util_request_gpio_table(soc_info, true);
 	if (rc) {
-		pr_err("Failed in request gpio table, rc=%d\n", rc);
+		CAM_ERR(CAM_UTIL, "Failed in request gpio table, rc=%d", rc);
 		goto put_clk;
 	}
 
@@ -1135,7 +1156,7 @@
 	struct platform_device *pdev = NULL;
 
 	if (!soc_info || !soc_info->pdev) {
-		pr_err("Invalid parameter\n");
+		CAM_ERR(CAM_UTIL, "Invalid parameter");
 		return -EINVAL;
 	}
 
@@ -1186,7 +1207,7 @@
 
 	rc = cam_soc_util_regulator_enable_default(soc_info);
 	if (rc) {
-		pr_err("Regulators enable failed\n");
+		CAM_ERR(CAM_UTIL, "Regulators enable failed");
 		return rc;
 	}
 
@@ -1257,7 +1278,7 @@
 {
 	void __iomem     *base_addr = NULL;
 
-	CDBG("base_idx %u size=%d\n", base_index, size);
+	CAM_DBG(CAM_UTIL, "base_idx %u size=%d", base_index, size);
 
 	if (!soc_info || base_index >= soc_info->num_reg_map ||
 		size <= 0 || (offset + size) >=
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 9c6bf42..6b09a54 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -229,7 +229,7 @@
 		vote_data[i].work_mode = inst->clk_data.work_mode;
 		fill_recon_stats(inst, &vote_data[i]);
 
-		if (core->resources.sys_cache_enabled)
+		if (core->resources.sys_cache_res_set)
 			vote_data[i].use_sys_cache = true;
 
 		i++;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 755f0c86..8888980 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -161,7 +161,7 @@
 	uint32_t dcvs_tbl_size;
 	struct dcvs_limit *dcvs_limit;
 	bool sys_cache_present;
-	bool sys_cache_enabled;
+	bool sys_cache_res_set;
 	struct subcache_set subcache_set;
 	struct reg_set reg_set;
 	struct addr_set qdss_addr_set;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index b430d14..15246d3 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -99,9 +99,10 @@
 static void __unload_fw(struct venus_hfi_device *device);
 static int __tzbsp_set_video_state(enum tzbsp_video_state state);
 static int __enable_subcaches(struct venus_hfi_device *device);
+static int __set_subcaches(struct venus_hfi_device *device);
+static int __release_subcaches(struct venus_hfi_device *device);
 static int __disable_subcaches(struct venus_hfi_device *device);
 
-
 /**
  * Utility function to enforce some of our assumptions.  Spam calls to this
  * in hotspots in code to double check some of the assumptions that we hold.
@@ -1747,10 +1748,8 @@
 		dprintk(VIDC_WARN, "Failed to send image version pkt to f/w\n");
 
 	rc = __enable_subcaches(device);
-	if (rc) {
-		dprintk(VIDC_WARN,
-			"Failed to enable subcaches, err = %d\n", rc);
-	}
+	if (!rc)
+		__set_subcaches(device);
 
 	if (dev->res->pm_qos_latency_us) {
 #ifdef CONFIG_SMP
@@ -2806,7 +2805,7 @@
 
 	rc = __suspend(device);
 	if (rc)
-		dprintk(VIDC_ERR, "Failed venus power off\n");
+		dprintk(VIDC_ERR, "Failed __suspend\n");
 
 	/* Cancel pending delayed works if any */
 	cancel_delayed_work(&venus_hfi_pm_work);
@@ -3763,39 +3762,64 @@
 	int rc = 0;
 	u32 c = 0;
 	struct subcache_info *sinfo;
-	u32 resource[VIDC_MAX_SUBCACHE_SIZE];
-	struct hfi_resource_syscache_info_type *sc_res_info;
-	struct hfi_resource_subcache_type *sc_res;
-	struct vidc_resource_hdr rhdr;
 
 	if (msm_vidc_syscache_disable || !is_sys_cache_present(device))
 		return 0;
 
-	memset((void *)resource, 0x0, (sizeof(u32) * VIDC_MAX_SUBCACHE_SIZE));
-
-	sc_res_info = (struct hfi_resource_syscache_info_type *)resource;
-	sc_res = &(sc_res_info->rg_subcache_entries[0]);
-
 	/* Activate subcaches */
 	venus_hfi_for_each_subcache(device, sinfo) {
 		rc = llcc_slice_activate(sinfo->subcache);
 		if (rc) {
 			dprintk(VIDC_ERR, "Failed to activate %s: %d\n",
 				sinfo->name, rc);
-			continue;
+			goto err_activate_fail;
 		}
 		sinfo->isactive = true;
-
-		/* Update the entry */
-		sc_res[c].size = sinfo->subcache->llcc_slice_size;
-		sc_res[c].sc_id = sinfo->subcache->llcc_slice_id;
-		dprintk(VIDC_DBG, "Activate subcache %s\n", sinfo->name);
+		dprintk(VIDC_DBG, "Activated subcache %s\n", sinfo->name);
 		c++;
 	}
 
+	dprintk(VIDC_DBG, "Activated %d Subcaches to Venus\n", c);
+
+	return 0;
+
+err_activate_fail:
+	__release_subcaches(device);
+	__disable_subcaches(device);
+	return -EINVAL;
+}
+
+static int __set_subcaches(struct venus_hfi_device *device)
+{
+	int rc = 0;
+	u32 c = 0;
+	struct subcache_info *sinfo;
+	u32 resource[VIDC_MAX_SUBCACHE_SIZE];
+	struct hfi_resource_syscache_info_type *sc_res_info;
+	struct hfi_resource_subcache_type *sc_res;
+	struct vidc_resource_hdr rhdr;
+
+	if (device->res->sys_cache_res_set) {
+		dprintk(VIDC_DBG, "Subcaches already set to Venus\n");
+		return 0;
+	}
+
+	memset((void *)resource, 0x0, (sizeof(u32) * VIDC_MAX_SUBCACHE_SIZE));
+
+	sc_res_info = (struct hfi_resource_syscache_info_type *)resource;
+	sc_res = &(sc_res_info->rg_subcache_entries[0]);
+
+	venus_hfi_for_each_subcache(device, sinfo) {
+		if (sinfo->isactive == true) {
+			sc_res[c].size = sinfo->subcache->llcc_slice_size;
+			sc_res[c].sc_id = sinfo->subcache->llcc_slice_id;
+			c++;
+		}
+	}
+
 	/* Set resource to Venus for activated subcaches */
 	if (c) {
-		dprintk(VIDC_DBG, "Setting Subcaches\n");
+		dprintk(VIDC_DBG, "Setting %d Subcaches\n", c);
 
 		rhdr.resource_handle = sc_res_info; /* cookie */
 		rhdr.resource_id = VIDC_RESOURCE_SYSCACHE;
@@ -3814,9 +3838,8 @@
 			sinfo->isset = true;
 	}
 
-	dprintk(VIDC_DBG, "Activated & Set Subcaches to Venus\n");
-
-	device->res->sys_cache_enabled = true;
+	dprintk(VIDC_DBG, "Set Subcaches done to Venus\n");
+	device->res->sys_cache_res_set = true;
 
 	return 0;
 
@@ -3826,7 +3849,7 @@
 	return rc;
 }
 
-static int __disable_subcaches(struct venus_hfi_device *device)
+static int __release_subcaches(struct venus_hfi_device *device)
 {
 	struct subcache_info *sinfo;
 	int rc = 0;
@@ -3839,8 +3862,6 @@
 	if (msm_vidc_syscache_disable || !is_sys_cache_present(device))
 		return 0;
 
-	dprintk(VIDC_DBG, "Disabling Subcaches\n");
-
 	memset((void *)resource, 0x0, (sizeof(u32) * VIDC_MAX_SUBCACHE_SIZE));
 
 	sc_res_info = (struct hfi_resource_syscache_info_type *)resource;
@@ -3858,16 +3879,29 @@
 	}
 
 	if (c > 0) {
+		dprintk(VIDC_DBG, "Releasing %d subcaches\n", c);
 		rhdr.resource_handle = sc_res_info; /* cookie */
 		rhdr.resource_id = VIDC_RESOURCE_SYSCACHE;
 
 		rc = __core_release_resource(device, &rhdr);
 		if (rc)
-			dprintk(VIDC_ERR, "Failed to release subcaches\n");
-
-		dprintk(VIDC_DBG, "Release %d subcaches\n", c);
+			dprintk(VIDC_ERR,
+				"Failed to release %d subcaches\n", c);
 	}
 
+	device->res->sys_cache_res_set = false;
+
+	return rc;
+}
+
+static int __disable_subcaches(struct venus_hfi_device *device)
+{
+	struct subcache_info *sinfo;
+	int rc = 0;
+
+	if (msm_vidc_syscache_disable || !is_sys_cache_present(device))
+		return 0;
+
 	/* De-activate subcaches */
 	venus_hfi_for_each_subcache_reverse(device, sinfo) {
 		if (sinfo->isactive == true) {
@@ -3883,8 +3917,6 @@
 		}
 	}
 
-	device->res->sys_cache_enabled = false;
-
 	return rc;
 }
 
@@ -3984,10 +4016,7 @@
 		return 0;
 	}
 
-	dprintk(VIDC_PROF, "Entering power collapse\n");
-
-	if (__disable_subcaches(device))
-		dprintk(VIDC_ERR, "Failed to disable subcaches\n");
+	dprintk(VIDC_PROF, "Entering suspend\n");
 
 	if (device->res->pm_qos_latency_us &&
 		pm_qos_request_active(&device->qos))
@@ -3999,8 +4028,10 @@
 		goto err_tzbsp_suspend;
 	}
 
+	__disable_subcaches(device);
+
 	__venus_power_off(device);
-	dprintk(VIDC_PROF, "Venus power collapsed\n");
+	dprintk(VIDC_PROF, "Venus power off\n");
 	return rc;
 
 err_tzbsp_suspend:
@@ -4061,10 +4092,8 @@
 	__sys_set_debug(device, msm_vidc_fw_debug);
 
 	rc = __enable_subcaches(device);
-	if (rc) {
-		dprintk(VIDC_WARN,
-			"Failed to enable subcaches, err = %d\n", rc);
-	}
+	if (!rc)
+		__set_subcaches(device);
 
 	dprintk(VIDC_PROF, "Resumed from power collapse\n");
 exit:
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c6f3496..120fd54 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -4589,6 +4589,10 @@
 
 	dev_set_drvdata(&card->dev, md);
 
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	mmc_set_bus_resume_policy(card->host, 1);
+#endif
+
 	if (mmc_add_disk(md))
 		goto out;
 
@@ -4632,6 +4636,9 @@
 	pm_runtime_put_noidle(&card->dev);
 	mmc_blk_remove_req(md);
 	dev_set_drvdata(&card->dev, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	mmc_set_bus_resume_policy(card->host, 0);
+#endif
 }
 
 static int _mmc_blk_suspend(struct mmc_card *card, bool wait)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 0bf89b4..978dd9a 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -4472,7 +4472,7 @@
 
 	BUG_ON(host->card);
 
-	mmc_register_extcon(host);
+	mmc_unregister_extcon(host);
 
 	mmc_claim_host(host);
 	mmc_power_off(host);
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 9f417bb..4a9232e 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -2582,6 +2582,21 @@
 EXPORT_SYMBOL(ipa_stop_gsi_channel);
 
 /**
+ * ipa_start_gsi_channel()- Startsa GSI channel in IPA
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+int ipa_start_gsi_channel(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_start_gsi_channel, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_start_gsi_channel);
+
+/**
  * ipa_get_version_string() - Get string representation of IPA version
  * @ver: IPA version
  *
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index 133e058..20471eb 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -324,6 +324,8 @@
 
 	int (*ipa_stop_gsi_channel)(u32 clnt_hdl);
 
+	int (*ipa_start_gsi_channel)(u32 clnt_hdl);
+
 	struct iommu_domain *(*ipa_get_smmu_domain)(void);
 
 	int (*ipa_disable_apps_wan_cons_deaggr)(uint32_t agg_size,
diff --git a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
index 79da63e..a623d0b 100644
--- a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -114,6 +114,7 @@
  * @send_dl_skb: client callback for sending skb in downlink direction
  * @stats: statistics, how many packets were transmitted using the SW bridge
  * @is_conencted: is bridge connected ?
+ * @is_suspended: is bridge suspended ?
  * @mode: ODU mode (router/bridge)
  * @lock: for the initialization, connect and disconnect synchronization
  * @llv6_addr: link local IPv6 address of ODU network interface
@@ -122,6 +123,8 @@
  * @odu_prod_hdl: handle for IPA_CLIENT_ODU_PROD pipe
  * @odu_emb_cons_hdl: handle for IPA_CLIENT_ODU_EMB_CONS pipe
  * @odu_teth_cons_hdl: handle for IPA_CLIENT_ODU_TETH_CONS pipe
+ * @rm_comp: completion object for IP RM
+ * @wakeup_request: client callback to wakeup
  */
 struct odu_bridge_ctx {
 	struct class *class;
@@ -135,6 +138,7 @@
 	int (*send_dl_skb)(void *priv, struct sk_buff *skb);
 	struct stats stats;
 	bool is_connected;
+	bool is_suspended;
 	enum odu_bridge_mode mode;
 	struct mutex lock;
 	struct in6_addr llv6_addr;
@@ -146,6 +150,8 @@
 	u32 ipa_sys_desc_size;
 	void *logbuf;
 	void *logbuf_low;
+	struct completion rm_comp;
+	void (*wakeup_request)(void *);
 };
 static struct odu_bridge_ctx *odu_bridge_ctx;
 
@@ -1246,6 +1252,288 @@
 }
 EXPORT_SYMBOL(odu_bridge_cleanup);
 
+/* IPA Bridge implementation */
+#ifdef CONFIG_IPA3
+
+static void ipa_br_rm_notify(void *user_data, enum ipa_rm_event event,
+	unsigned long data)
+{
+	if (event == IPA_RM_RESOURCE_GRANTED)
+		complete(&odu_bridge_ctx->rm_comp);
+}
+
+static int ipa_br_request_prod(void)
+{
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	reinit_completion(&odu_bridge_ctx->rm_comp);
+	ODU_BRIDGE_DBG("requesting odu prod\n");
+	res = ipa_rm_request_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+	if (res) {
+		if (res != -EINPROGRESS) {
+			ODU_BRIDGE_ERR("failed to request prod %d\n", res);
+			return res;
+		}
+		wait_for_completion(&odu_bridge_ctx->rm_comp);
+	}
+
+	ODU_BRIDGE_FUNC_EXIT();
+	return 0;
+
+}
+
+static int ipa_br_release_prod(void)
+{
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	reinit_completion(&odu_bridge_ctx->rm_comp);
+	ODU_BRIDGE_DBG("requesting odu prod\n");
+	res = ipa_rm_release_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+	if (res) {
+		ODU_BRIDGE_ERR("failed to release prod %d\n", res);
+		return res;
+	}
+
+	ODU_BRIDGE_FUNC_EXIT();
+	return 0;
+
+}
+
+static int ipa_br_cons_request(void)
+{
+	ODU_BRIDGE_FUNC_ENTRY();
+	if (odu_bridge_ctx->is_suspended)
+		odu_bridge_ctx->wakeup_request(odu_bridge_ctx->priv);
+	ODU_BRIDGE_FUNC_EXIT();
+	return 0;
+}
+
+static int ipa_br_cons_release(void)
+{
+	ODU_BRIDGE_FUNC_ENTRY();
+	ODU_BRIDGE_FUNC_EXIT();
+	return 0;
+}
+
+/* IPA Bridge API is the new API which will replaces old odu_bridge API */
+int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl)
+{
+	int ret;
+	struct ipa_rm_create_params create_params;
+
+	if (!params || !params->wakeup_request || !hdl) {
+		ODU_BRIDGE_ERR("NULL arg\n");
+		return -EINVAL;
+	}
+
+
+	ret = odu_bridge_init(&params->info);
+	if (ret)
+		return ret;
+
+	odu_bridge_ctx->wakeup_request = params->wakeup_request;
+
+	/* create IPA RM resources for power management */
+	init_completion(&odu_bridge_ctx->rm_comp);
+	memset(&create_params, 0, sizeof(create_params));
+	create_params.name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
+	create_params.reg_params.user_data = odu_bridge_ctx;
+	create_params.reg_params.notify_cb = ipa_br_rm_notify;
+	create_params.floor_voltage = IPA_VOLTAGE_SVS;
+	ret = ipa_rm_create_resource(&create_params);
+	if (ret) {
+		ODU_BRIDGE_ERR("failed to create RM prod %d\n", ret);
+		goto fail_rm_prod;
+	}
+
+	ret = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+		IPA_RM_RESOURCE_APPS_CONS);
+	if (ret) {
+		ODU_BRIDGE_ERR("failed to add ODU->APPS dependency %d\n", ret);
+		goto fail_add_dep;
+	}
+
+	memset(&create_params, 0, sizeof(create_params));
+	create_params.name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+	create_params.request_resource = ipa_br_cons_request;
+	create_params.release_resource = ipa_br_cons_release;
+	create_params.floor_voltage = IPA_VOLTAGE_SVS;
+	ret = ipa_rm_create_resource(&create_params);
+	if (ret) {
+		ODU_BRIDGE_ERR("failed to create RM cons %d\n", ret);
+		goto fail_rm_cons;
+	}
+
+	/* handle is ignored for now */
+	*hdl = 0;
+
+	return 0;
+
+fail_rm_cons:
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+		IPA_RM_RESOURCE_APPS_CONS);
+fail_add_dep:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+fail_rm_prod:
+	odu_bridge_cleanup();
+	return ret;
+}
+EXPORT_SYMBOL(ipa_bridge_init);
+
+int ipa_bridge_connect(u32 hdl)
+{
+	int ret;
+
+	if (!odu_bridge_ctx) {
+		ODU_BRIDGE_ERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	if (odu_bridge_ctx->is_connected) {
+		ODU_BRIDGE_ERR("already connected\n");
+		return -EFAULT;
+	}
+
+	ret = ipa_br_request_prod();
+	if (ret)
+		return ret;
+
+	return odu_bridge_connect();
+}
+EXPORT_SYMBOL(ipa_bridge_connect);
+
+int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth)
+{
+	struct ipa_rm_perf_profile profile = {0};
+	int ret;
+
+	profile.max_supported_bandwidth_mbps = bandwidth;
+	ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_ODU_ADAPT_PROD, &profile);
+	if (ret) {
+		ODU_BRIDGE_ERR("failed to set perf profile to prod %d\n", ret);
+		return ret;
+	}
+
+	ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_ODU_ADAPT_CONS, &profile);
+	if (ret) {
+		ODU_BRIDGE_ERR("failed to set perf profile to cons %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_set_perf_profile);
+
+int ipa_bridge_disconnect(u32 hdl)
+{
+	int ret;
+
+	ret = odu_bridge_disconnect();
+	if (ret)
+		return ret;
+
+	ret = ipa_br_release_prod();
+	if (ret)
+		return ret;
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_disconnect);
+
+int ipa_bridge_suspend(u32 hdl)
+{
+	int ret;
+
+	if (!odu_bridge_ctx) {
+		ODU_BRIDGE_ERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	if (!odu_bridge_ctx->is_connected) {
+		ODU_BRIDGE_ERR("bridge is  disconnected\n");
+		return -EFAULT;
+	}
+
+	if (odu_bridge_ctx->is_suspended) {
+		ODU_BRIDGE_ERR("bridge is already suspended\n");
+		return -EFAULT;
+	}
+
+	/* stop cons channel to prevent downlink data during suspend */
+	ret = ipa_stop_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl);
+	if (ret) {
+		ODU_BRIDGE_ERR("failed to stop CONS channel %d\n", ret);
+		return ret;
+	}
+
+	ret = ipa_br_release_prod();
+	if (ret) {
+		ODU_BRIDGE_ERR("failed to release prod %d\n", ret);
+		ipa_start_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl);
+		return ret;
+	}
+	odu_bridge_ctx->is_suspended = true;
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_suspend);
+
+int ipa_bridge_resume(u32 hdl)
+{
+	int ret;
+
+	if (!odu_bridge_ctx) {
+		ODU_BRIDGE_ERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	if (!odu_bridge_ctx->is_connected) {
+		ODU_BRIDGE_ERR("bridge is  disconnected\n");
+		return -EFAULT;
+	}
+
+	if (!odu_bridge_ctx->is_suspended) {
+		ODU_BRIDGE_ERR("bridge is not suspended\n");
+		return -EFAULT;
+	}
+
+	ret = ipa_br_request_prod();
+	if (ret)
+		return ret;
+
+	ret = ipa_start_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl);
+	if (ret) {
+		ODU_BRIDGE_ERR("failed to start CONS channel %d\n", ret);
+		return ret;
+	}
+	odu_bridge_ctx->is_suspended = false;
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_resume);
+
+int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb,
+	struct ipa_tx_meta *metadata)
+{
+	return odu_bridge_tx_dp(skb, metadata);
+}
+EXPORT_SYMBOL(ipa_bridge_tx_dp);
+
+int ipa_bridge_cleanup(u32 hdl)
+{
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+		IPA_RM_RESOURCE_APPS_CONS);
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+	return odu_bridge_cleanup();
+}
+EXPORT_SYMBOL(ipa_bridge_cleanup);
+
+#endif /* CONFIG_IPA3 */
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("ODU bridge driver");
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index 07bca0c..32c8b25 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -384,5 +384,6 @@
 			      void *user_data);
 void ipa_ntn_uc_dereg_rdyCB(void);
 const char *ipa_get_version_string(enum ipa_hw_type ver);
+int ipa_start_gsi_channel(u32 clnt_hdl);
 
 #endif /* _IPA_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index a85addb..141bff1 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -1742,9 +1742,6 @@
 	iowrite32(val, base + offset);
 }
 
-int ipa_bridge_init(void);
-void ipa_bridge_cleanup(void);
-
 ssize_t ipa_read(struct file *filp, char __user *buf, size_t count,
 		 loff_t *f_pos);
 int ipa_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index cd4e016..9ebe111 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -266,7 +266,9 @@
 	int cnt = 0;
 	int start_idx;
 	int end_idx;
+	unsigned long flags;
 
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
 	start_idx = (ipa3_ctx->ipa3_active_clients_logging.log_tail + 1) %
 			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
 	end_idx = ipa3_ctx->ipa3_active_clients_logging.log_head;
@@ -277,6 +279,8 @@
 				.log_buffer[i]);
 		cnt += nbytes;
 	}
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+		flags);
 
 	return cnt;
 }
@@ -286,7 +290,9 @@
 	int i;
 	struct ipa3_active_client_htable_entry *iterator;
 	int cnt = 0;
+	unsigned long flags;
 
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
 	cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
 	hash_for_each(ipa3_ctx->ipa3_active_clients_logging.htable, i,
 			iterator, list) {
@@ -319,6 +325,8 @@
 	cnt += scnprintf(buf + cnt, size - cnt,
 			"\nTotal active clients count: %d\n",
 			atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+		flags);
 
 	return cnt;
 }
@@ -368,6 +376,7 @@
 {
 	int i;
 
+	spin_lock_init(&ipa3_ctx->ipa3_active_clients_logging.lock);
 	ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] = kzalloc(
 			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES *
 			sizeof(char[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]),
@@ -399,20 +408,28 @@
 
 void ipa3_active_clients_log_clear(void)
 {
-	mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
 	ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
 	ipa3_ctx->ipa3_active_clients_logging.log_tail =
 			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
-	mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+		flags);
 }
 
 static void ipa3_active_clients_log_destroy(void)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
 	ipa3_ctx->ipa3_active_clients_logging.log_rdy = 0;
 	kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]);
 	ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
 	ipa3_ctx->ipa3_active_clients_logging.log_tail =
 			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+		flags);
 }
 
 enum ipa_smmu_cb_type {
@@ -3402,7 +3419,10 @@
 	struct ipa3_active_client_htable_entry *hfound;
 	u32 hkey;
 	char str_to_hash[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
+	unsigned long flags;
 
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
+	int_ctx = true;
 	hfound = NULL;
 	memset(str_to_hash, 0, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
 	strlcpy(str_to_hash, id->id_string, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
@@ -3422,6 +3442,9 @@
 				int_ctx ? GFP_ATOMIC : GFP_KERNEL);
 		if (hentry == NULL) {
 			IPAERR("failed allocating active clients hash entry");
+			spin_unlock_irqrestore(
+				&ipa3_ctx->ipa3_active_clients_logging.lock,
+				flags);
 			return;
 		}
 		hentry->type = id->type;
@@ -3446,6 +3469,8 @@
 				id->id_string, id->file, id->line);
 		ipa3_active_clients_log_insert(temp_str);
 	}
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+		flags);
 }
 
 void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index ed31423..58702e9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -187,6 +187,7 @@
 };
 
 struct ipa3_active_clients_log_ctx {
+	spinlock_t lock;
 	char *log_buffer[IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES];
 	int log_head;
 	int log_tail;
@@ -1887,10 +1888,6 @@
 				void *private_data,
 				void *interrupt_data);
 
-
-int ipa_bridge_init(void);
-void ipa_bridge_cleanup(void);
-
 ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count,
 		 loff_t *f_pos);
 int ipa3_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 9ca4b7d..1a99808 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -4455,6 +4455,7 @@
 	api_ctrl->ipa_create_wdi_mapping = ipa3_create_wdi_mapping;
 	api_ctrl->ipa_get_gsi_ep_info = ipa3_get_gsi_ep_info;
 	api_ctrl->ipa_stop_gsi_channel = ipa3_stop_gsi_channel;
+	api_ctrl->ipa_start_gsi_channel = ipa3_start_gsi_channel;
 	api_ctrl->ipa_register_ipa_ready_cb = ipa3_register_ipa_ready_cb;
 	api_ctrl->ipa_inc_client_enable_clks = ipa3_inc_client_enable_clks;
 	api_ctrl->ipa_dec_client_disable_clks = ipa3_dec_client_disable_clks;
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index 62e0978..92321ad 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -3095,84 +3095,6 @@
 	return 0;
 }
 
-static ssize_t
-usb_bam_show_inactivity_timer(struct device *dev, struct device_attribute *attr,
-		    char *buf)
-{
-	char *buff = buf;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(bam_enable_strings); i++) {
-		buff += snprintf(buff, PAGE_SIZE, "%s: %dms\n",
-					bam_enable_strings[i],
-					msm_usb_bam[i].inactivity_timer_ms);
-	}
-
-	return buff - buf;
-}
-
-static ssize_t usb_bam_store_inactivity_timer(struct device *dev,
-				     struct device_attribute *attr,
-				     const char *buff, size_t count)
-{
-	char buf[USB_BAM_MAX_STR_LEN];
-	char *trimmed_buf, *bam_str, *bam_name, *timer;
-	int timer_d;
-	int bam, ret;
-
-	if (strnstr(buff, "help", USB_BAM_MAX_STR_LEN)) {
-		pr_info("Usage: <bam_name> <ms>,<bam_name> <ms>,...\n");
-		pr_info("\tbam_name: [%s, %s, %s]\n",
-			bam_enable_strings[DWC3_CTRL],
-			bam_enable_strings[CI_CTRL],
-			bam_enable_strings[HSIC_CTRL]);
-		pr_info("\tms: time in ms. Use 0 to disable timer\n");
-		return count;
-	}
-
-	strlcpy(buf, buff, sizeof(buf));
-	trimmed_buf = strim(buf);
-
-	while (trimmed_buf) {
-		bam_str = strsep(&trimmed_buf, ",");
-		if (bam_str) {
-			bam_name = strsep(&bam_str, " ");
-			bam = get_bam_type_from_core_name(bam_name);
-			if (bam < 0 || bam >= MAX_BAMS) {
-				log_event_err("%s: Invalid bam, type=%d ,name=%s\n",
-					__func__, bam, bam_name);
-				return -EINVAL;
-			}
-
-			timer = strsep(&bam_str, " ");
-
-			if (!timer)
-				continue;
-
-			ret = kstrtoint(timer, 0, &timer_d);
-			if (ret) {
-				log_event_err("%s: err:%d with value:(%d)\n",
-						__func__, ret, timer_d);
-				return ret;
-			}
-
-			/* Apply new timer setting if bam has running pipes */
-			if (msm_usb_bam[bam].inactivity_timer_ms != timer_d) {
-				msm_usb_bam[bam].inactivity_timer_ms = timer_d;
-				if (msm_usb_bam[bam].pipes_enabled_per_bam > 0
-						&& !info[bam].in_lpm)
-					usb_bam_set_inactivity_timer(bam);
-			}
-		}
-	}
-
-	return count;
-}
-
-static DEVICE_ATTR(inactivity_timer, 0600,
-		   usb_bam_show_inactivity_timer,
-		   usb_bam_store_inactivity_timer);
-
 static int usb_bam_panic_notifier(struct notifier_block *this,
 		unsigned long event, void *ptr)
 {
@@ -3221,12 +3143,6 @@
 
 	dev_dbg(&pdev->dev, "usb_bam_probe\n");
 
-	ret = device_create_file(&pdev->dev, &dev_attr_inactivity_timer);
-	if (ret) {
-		dev_err(&pdev->dev, "failed to create fs node\n");
-		return ret;
-	}
-
 	io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!io_res) {
 		dev_err(&pdev->dev, "missing BAM memory resource\n");
diff --git a/drivers/power/supply/qcom/Makefile b/drivers/power/supply/qcom/Makefile
index bc19b24..6cc83ab 100644
--- a/drivers/power/supply/qcom/Makefile
+++ b/drivers/power/supply/qcom/Makefile
@@ -2,6 +2,6 @@
 obj-$(CONFIG_SMB135X_CHARGER)   += smb135x-charger.o pmic-voter.o
 obj-$(CONFIG_SMB1355_SLAVE_CHARGER)   += smb1355-charger.o pmic-voter.o
 obj-$(CONFIG_SMB1351_USB_CHARGER) += smb1351-charger.o pmic-voter.o battery.o
-obj-$(CONFIG_QPNP_SMB2)		+= qpnp-smb2.o smb-lib.o pmic-voter.o storm-watch.o battery.o
+obj-$(CONFIG_QPNP_SMB2)		+= step-chg-jeita.o battery.o qpnp-smb2.o smb-lib.o pmic-voter.o storm-watch.o
 obj-$(CONFIG_SMB138X_CHARGER)	+= smb138x-charger.o smb-lib.o pmic-voter.o storm-watch.o battery.o
 obj-$(CONFIG_QPNP_QNOVO)	+= qpnp-qnovo.o battery.o
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 42a16d6..d522926 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -1611,7 +1611,7 @@
 static int fg_charge_full_update(struct fg_chip *chip)
 {
 	union power_supply_propval prop = {0, };
-	int rc, msoc, bsoc, recharge_soc;
+	int rc, msoc, bsoc, recharge_soc, msoc_raw;
 	u8 full_soc[2] = {0xFF, 0xFF};
 
 	if (!chip->dt.hold_soc_while_full)
@@ -1647,6 +1647,7 @@
 		pr_err("Error in getting msoc, rc=%d\n", rc);
 		goto out;
 	}
+	msoc_raw = DIV_ROUND_CLOSEST(msoc * FULL_SOC_RAW, FULL_CAPACITY);
 
 	fg_dbg(chip, FG_STATUS, "msoc: %d bsoc: %x health: %d status: %d full: %d\n",
 		msoc, bsoc, chip->health, chip->charge_status,
@@ -1670,7 +1671,7 @@
 			fg_dbg(chip, FG_STATUS, "Terminated charging @ SOC%d\n",
 				msoc);
 		}
-	} else if ((bsoc >> 8) <= recharge_soc && chip->charge_full) {
+	} else if (msoc_raw < recharge_soc && chip->charge_full) {
 		chip->delta_soc = FULL_CAPACITY - msoc;
 
 		/*
@@ -1700,8 +1701,8 @@
 				rc);
 			goto out;
 		}
-		fg_dbg(chip, FG_STATUS, "bsoc: %d recharge_soc: %d delta_soc: %d\n",
-			bsoc >> 8, recharge_soc, chip->delta_soc);
+		fg_dbg(chip, FG_STATUS, "msoc_raw = %d bsoc: %d recharge_soc: %d delta_soc: %d\n",
+			msoc_raw, bsoc >> 8, recharge_soc, chip->delta_soc);
 	} else {
 		goto out;
 	}
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index e94873c..5605c8a 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -19,6 +19,7 @@
 #include <linux/power_supply.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
+#include <linux/log2.h>
 #include <linux/qpnp/qpnp-revid.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/of_regulator.h>
@@ -122,87 +123,6 @@
 		.max_u	= 1575000,
 		.step_u	= 25000,
 	},
-	.step_soc_threshold[0]		= {
-		.name	= "step charge soc threshold 1",
-		.reg	= STEP_CHG_SOC_OR_BATT_V_TH1_REG,
-		.min_u	= 0,
-		.max_u	= 100,
-		.step_u	= 1,
-	},
-	.step_soc_threshold[1]		= {
-		.name	= "step charge soc threshold 2",
-		.reg	= STEP_CHG_SOC_OR_BATT_V_TH2_REG,
-		.min_u	= 0,
-		.max_u	= 100,
-		.step_u	= 1,
-	},
-	.step_soc_threshold[2]         = {
-		.name	= "step charge soc threshold 3",
-		.reg	= STEP_CHG_SOC_OR_BATT_V_TH3_REG,
-		.min_u	= 0,
-		.max_u	= 100,
-		.step_u	= 1,
-	},
-	.step_soc_threshold[3]         = {
-		.name	= "step charge soc threshold 4",
-		.reg	= STEP_CHG_SOC_OR_BATT_V_TH4_REG,
-		.min_u	= 0,
-		.max_u	= 100,
-		.step_u	= 1,
-	},
-	.step_soc			= {
-		.name	= "step charge soc",
-		.reg	= STEP_CHG_SOC_VBATT_V_REG,
-		.min_u	= 0,
-		.max_u	= 100,
-		.step_u	= 1,
-		.set_proc	= smblib_mapping_soc_from_field_value,
-	},
-	.step_cc_delta[0]	= {
-		.name	= "step charge current delta 1",
-		.reg	= STEP_CHG_CURRENT_DELTA1_REG,
-		.min_u	= 100000,
-		.max_u	= 3200000,
-		.step_u	= 100000,
-		.get_proc	= smblib_mapping_cc_delta_to_field_value,
-		.set_proc	= smblib_mapping_cc_delta_from_field_value,
-	},
-	.step_cc_delta[1]	= {
-		.name	= "step charge current delta 2",
-		.reg	= STEP_CHG_CURRENT_DELTA2_REG,
-		.min_u	= 100000,
-		.max_u	= 3200000,
-		.step_u	= 100000,
-		.get_proc	= smblib_mapping_cc_delta_to_field_value,
-		.set_proc	= smblib_mapping_cc_delta_from_field_value,
-	},
-	.step_cc_delta[2]	= {
-		.name	= "step charge current delta 3",
-		.reg	= STEP_CHG_CURRENT_DELTA3_REG,
-		.min_u	= 100000,
-		.max_u	= 3200000,
-		.step_u	= 100000,
-		.get_proc	= smblib_mapping_cc_delta_to_field_value,
-		.set_proc	= smblib_mapping_cc_delta_from_field_value,
-	},
-	.step_cc_delta[3]	= {
-		.name	= "step charge current delta 4",
-		.reg	= STEP_CHG_CURRENT_DELTA4_REG,
-		.min_u	= 100000,
-		.max_u	= 3200000,
-		.step_u	= 100000,
-		.get_proc	= smblib_mapping_cc_delta_to_field_value,
-		.set_proc	= smblib_mapping_cc_delta_from_field_value,
-	},
-	.step_cc_delta[4]	= {
-		.name	= "step charge current delta 5",
-		.reg	= STEP_CHG_CURRENT_DELTA5_REG,
-		.min_u	= 100000,
-		.max_u	= 3200000,
-		.step_u	= 100000,
-		.get_proc	= smblib_mapping_cc_delta_to_field_value,
-		.set_proc	= smblib_mapping_cc_delta_from_field_value,
-	},
 	.freq_buck		= {
 		.name	= "buck switching frequency",
 		.reg	= CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG,
@@ -236,7 +156,6 @@
 	},
 };
 
-#define STEP_CHARGING_MAX_STEPS	5
 struct smb_dt_props {
 	int	usb_icl_ua;
 	int	dc_icl_ua;
@@ -244,14 +163,13 @@
 	int	wipower_max_uw;
 	int	min_freq_khz;
 	int	max_freq_khz;
-	u32	step_soc_threshold[STEP_CHARGING_MAX_STEPS - 1];
-	s32	step_cc_delta[STEP_CHARGING_MAX_STEPS];
 	struct	device_node *revid_dev_node;
 	int	float_option;
 	int	chg_inhibit_thr_mv;
 	bool	no_battery;
 	bool	hvdcp_disable;
 	bool	auto_recharge_soc;
+	int	wd_bark_time;
 };
 
 struct smb2 {
@@ -273,6 +191,11 @@
 #define MICRO_1P5A		1500000
 #define MICRO_P1A		100000
 #define OTG_DEFAULT_DEGLITCH_TIME_MS	50
+#define MIN_WD_BARK_TIME		16
+#define DEFAULT_WD_BARK_TIME		64
+#define BITE_WDOG_TIMEOUT_8S		0x3
+#define BARK_WDOG_TIMEOUT_MASK		GENMASK(3, 2)
+#define BARK_WDOG_TIMEOUT_SHIFT		2
 static int smb2_parse_dt(struct smb2 *chip)
 {
 	struct smb_charger *chg = &chip->chg;
@@ -284,27 +207,13 @@
 		return -EINVAL;
 	}
 
-	chg->step_chg_enabled = true;
+	chg->step_chg_enabled = of_property_read_bool(node,
+				"qcom,step-charging-enable");
 
-	if (of_property_count_u32_elems(node, "qcom,step-soc-thresholds")
-			!= STEP_CHARGING_MAX_STEPS - 1)
-		chg->step_chg_enabled = false;
-
-	rc = of_property_read_u32_array(node, "qcom,step-soc-thresholds",
-			chip->dt.step_soc_threshold,
-			STEP_CHARGING_MAX_STEPS - 1);
-	if (rc < 0)
-		chg->step_chg_enabled = false;
-
-	if (of_property_count_u32_elems(node, "qcom,step-current-deltas")
-			!= STEP_CHARGING_MAX_STEPS)
-		chg->step_chg_enabled = false;
-
-	rc = of_property_read_u32_array(node, "qcom,step-current-deltas",
-			chip->dt.step_cc_delta,
-			STEP_CHARGING_MAX_STEPS);
-	if (rc < 0)
-		chg->step_chg_enabled = false;
+	rc = of_property_read_u32(node, "qcom,wd-bark-time-secs",
+					&chip->dt.wd_bark_time);
+	if (rc < 0 || chip->dt.wd_bark_time < MIN_WD_BARK_TIME)
+		chip->dt.wd_bark_time = DEFAULT_WD_BARK_TIME;
 
 	chip->dt.no_battery = of_property_read_bool(node,
 						"qcom,batteryless-platform");
@@ -988,7 +897,6 @@
 	POWER_SUPPLY_PROP_TEMP,
 	POWER_SUPPLY_PROP_TECHNOLOGY,
 	POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED,
-	POWER_SUPPLY_PROP_STEP_CHARGING_STEP,
 	POWER_SUPPLY_PROP_CHARGE_DONE,
 	POWER_SUPPLY_PROP_PARALLEL_DISABLE,
 	POWER_SUPPLY_PROP_SET_SHIP_MODE,
@@ -1051,9 +959,6 @@
 	case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
 		val->intval = chg->step_chg_enabled;
 		break;
-	case POWER_SUPPLY_PROP_STEP_CHARGING_STEP:
-		rc = smblib_get_prop_step_chg_step(chg, val);
-		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
 		rc = smblib_get_prop_batt_voltage_now(chg, val);
 		break;
@@ -1167,6 +1072,9 @@
 			vote(chg->fcc_votable, BATT_PROFILE_VOTER, false, 0);
 		}
 		break;
+	case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
+		chg->step_chg_enabled = !!val->intval;
+		break;
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
 		chg->batt_profile_fcc_ua = val->intval;
 		vote(chg->fcc_votable, BATT_PROFILE_VOTER, true, val->intval);
@@ -1207,6 +1115,7 @@
 	case POWER_SUPPLY_PROP_DP_DM:
 	case POWER_SUPPLY_PROP_RERUN_AICL:
 	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+	case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
 		return 1;
 	default:
 		break;
@@ -1334,73 +1243,6 @@
 /***************************
  * HARDWARE INITIALIZATION *
  ***************************/
-static int smb2_config_step_charging(struct smb2 *chip)
-{
-	struct smb_charger *chg = &chip->chg;
-	int rc = 0;
-	int i;
-
-	if (!chg->step_chg_enabled)
-		return rc;
-
-	for (i = 0; i < STEP_CHARGING_MAX_STEPS - 1; i++) {
-		rc = smblib_set_charge_param(chg,
-					     &chg->param.step_soc_threshold[i],
-					     chip->dt.step_soc_threshold[i]);
-		if (rc < 0) {
-			pr_err("Couldn't configure soc thresholds rc = %d\n",
-				rc);
-			goto err_out;
-		}
-	}
-
-	for (i = 0; i < STEP_CHARGING_MAX_STEPS; i++) {
-		rc = smblib_set_charge_param(chg, &chg->param.step_cc_delta[i],
-					     chip->dt.step_cc_delta[i]);
-		if (rc < 0) {
-			pr_err("Couldn't configure cc delta rc = %d\n",
-				rc);
-			goto err_out;
-		}
-	}
-
-	rc = smblib_write(chg, STEP_CHG_UPDATE_REQUEST_TIMEOUT_CFG_REG,
-			  STEP_CHG_UPDATE_REQUEST_TIMEOUT_40S);
-	if (rc < 0) {
-		dev_err(chg->dev,
-			"Couldn't configure soc request timeout reg rc=%d\n",
-			 rc);
-		goto err_out;
-	}
-
-	rc = smblib_write(chg, STEP_CHG_UPDATE_FAIL_TIMEOUT_CFG_REG,
-			  STEP_CHG_UPDATE_FAIL_TIMEOUT_120S);
-	if (rc < 0) {
-		dev_err(chg->dev,
-			"Couldn't configure soc fail timeout reg rc=%d\n",
-			rc);
-		goto err_out;
-	}
-
-	/*
-	 *  enable step charging, source soc, standard mode, go to final
-	 *  state in case of failure.
-	 */
-	rc = smblib_write(chg, CHGR_STEP_CHG_MODE_CFG_REG,
-			       STEP_CHARGING_ENABLE_BIT |
-			       STEP_CHARGING_SOURCE_SELECT_BIT |
-			       STEP_CHARGING_SOC_FAIL_OPTION_BIT);
-	if (rc < 0) {
-		dev_err(chg->dev, "Couldn't configure charger rc=%d\n", rc);
-		goto err_out;
-	}
-
-	return 0;
-err_out:
-	chg->step_chg_enabled = false;
-	return rc;
-}
-
 static int smb2_config_wipower_input_power(struct smb2 *chip, int uw)
 {
 	int rc;
@@ -1576,7 +1418,7 @@
 {
 	struct smb_charger *chg = &chip->chg;
 	int rc;
-	u8 stat;
+	u8 stat, val;
 
 	if (chip->dt.no_battery)
 		chg->fake_capacity = 50;
@@ -1724,11 +1566,27 @@
 		return rc;
 	}
 
-	/* configure step charging */
-	rc = smb2_config_step_charging(chip);
-	if (rc < 0) {
-		dev_err(chg->dev, "Couldn't configure step charging rc=%d\n",
-			rc);
+	val = (ilog2(chip->dt.wd_bark_time / 16) << BARK_WDOG_TIMEOUT_SHIFT) &
+						BARK_WDOG_TIMEOUT_MASK;
+	val |= BITE_WDOG_TIMEOUT_8S;
+	rc = smblib_masked_write(chg, SNARL_BARK_BITE_WD_CFG_REG,
+			BITE_WDOG_DISABLE_CHARGING_CFG_BIT |
+			BARK_WDOG_TIMEOUT_MASK | BITE_WDOG_TIMEOUT_MASK,
+			val);
+	if (rc) {
+		pr_err("Couldn't configue WD config rc=%d\n", rc);
+		return rc;
+	}
+
+	/* enable WD BARK and enable it on plugin */
+	rc = smblib_masked_write(chg, WD_CFG_REG,
+			WATCHDOG_TRIGGER_AFP_EN_BIT |
+			WDOG_TIMER_EN_ON_PLUGIN_BIT |
+			BARK_WDOG_INT_EN_BIT,
+			WDOG_TIMER_EN_ON_PLUGIN_BIT |
+			BARK_WDOG_INT_EN_BIT);
+	if (rc) {
+		pr_err("Couldn't configue WD config rc=%d\n", rc);
 		return rc;
 	}
 
@@ -1787,6 +1645,13 @@
 		return rc;
 	}
 
+	rc = smblib_read(chg, USBIN_OPTIONS_2_CFG_REG, &chg->float_cfg);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't read float charger options rc=%d\n",
+			rc);
+		return rc;
+	}
+
 	switch (chip->dt.chg_inhibit_thr_mv) {
 	case 50:
 		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
@@ -1851,6 +1716,12 @@
 	struct smb_charger *chg = &chip->chg;
 	int rc;
 
+	/* In case the usb path is suspended, we would have missed disabling
+	 * the icl change interrupt because the interrupt could have been
+	 * not requested
+	 */
+	rerun_election(chg->usb_icl_votable);
+
 	/* configure power role for dual-role */
 	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
 				 TYPEC_POWER_ROLE_CMD_MASK, 0);
@@ -1942,8 +1813,8 @@
 	smblib_handle_usb_source_change(0, &irq_data);
 	smblib_handle_chg_state_change(0, &irq_data);
 	smblib_handle_icl_change(0, &irq_data);
-	smblib_handle_step_chg_state_change(0, &irq_data);
-	smblib_handle_step_chg_soc_update_request(0, &irq_data);
+	smblib_handle_batt_temp_changed(0, &irq_data);
+	smblib_handle_wdog_bark(0, &irq_data);
 
 	return 0;
 }
@@ -1965,18 +1836,15 @@
 	},
 	[STEP_CHG_STATE_CHANGE_IRQ] = {
 		.name		= "step-chg-state-change",
-		.handler	= smblib_handle_step_chg_state_change,
-		.wake		= true,
+		.handler	= NULL,
 	},
 	[STEP_CHG_SOC_UPDATE_FAIL_IRQ] = {
 		.name		= "step-chg-soc-update-fail",
-		.handler	= smblib_handle_step_chg_soc_update_fail,
-		.wake		= true,
+		.handler	= NULL,
 	},
 	[STEP_CHG_SOC_UPDATE_REQ_IRQ] = {
 		.name		= "step-chg-soc-update-request",
-		.handler	= smblib_handle_step_chg_soc_update_request,
-		.wake		= true,
+		.handler	= NULL,
 	},
 /* OTG IRQs */
 	[OTG_FAIL_IRQ] = {
@@ -1999,6 +1867,7 @@
 	[BATT_TEMP_IRQ] = {
 		.name		= "bat-temp",
 		.handler	= smblib_handle_batt_temp_changed,
+		.wake		= true,
 	},
 	[BATT_OCP_IRQ] = {
 		.name		= "bat-ocp",
@@ -2094,7 +1963,8 @@
 	},
 	[WDOG_BARK_IRQ] = {
 		.name		= "wdog-bark",
-		.handler	= NULL,
+		.handler	= smblib_handle_wdog_bark,
+		.wake		= true,
 	},
 	[AICL_FAIL_IRQ] = {
 		.name		= "aicl-fail",
@@ -2200,6 +2070,8 @@
 				return rc;
 		}
 	}
+	if (chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq)
+		chg->usb_icl_change_irq_enabled = true;
 
 	return rc;
 }
@@ -2332,18 +2204,18 @@
 		return rc;
 	}
 
-	rc = smblib_init(chg);
-	if (rc < 0) {
-		pr_err("Smblib_init failed rc=%d\n", rc);
-		goto cleanup;
-	}
-
 	rc = smb2_parse_dt(chip);
 	if (rc < 0) {
 		pr_err("Couldn't parse device tree rc=%d\n", rc);
 		goto cleanup;
 	}
 
+	rc = smblib_init(chg);
+	if (rc < 0) {
+		pr_err("Smblib_init failed rc=%d\n", rc);
+		goto cleanup;
+	}
+
 	/* set driver data before resources request it */
 	platform_set_drvdata(pdev, chip);
 
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 3f26e5e..57a85de 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -22,6 +22,7 @@
 #include "smb-lib.h"
 #include "smb-reg.h"
 #include "battery.h"
+#include "step-chg-jeita.h"
 #include "storm-watch.h"
 
 #define smblib_err(chg, fmt, ...)		\
@@ -101,35 +102,6 @@
 	return rc;
 }
 
-static int smblib_get_step_cc_delta(struct smb_charger *chg, int *cc_delta_ua)
-{
-	int rc, step_state;
-	u8 stat;
-
-	if (!chg->step_chg_enabled) {
-		*cc_delta_ua = 0;
-		return 0;
-	}
-
-	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
-			rc);
-		return rc;
-	}
-
-	step_state = (stat & STEP_CHARGING_STATUS_MASK) >>
-				STEP_CHARGING_STATUS_SHIFT;
-	rc = smblib_get_charge_param(chg, &chg->param.step_cc_delta[step_state],
-				     cc_delta_ua);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't get step cc delta rc=%d\n", rc);
-		return rc;
-	}
-
-	return 0;
-}
-
 static int smblib_get_jeita_cc_delta(struct smb_charger *chg, int *cc_delta_ua)
 {
 	int rc, cc_minus_ua;
@@ -148,7 +120,7 @@
 	}
 
 	rc = smblib_get_charge_param(chg, &chg->param.jeita_cc_comp,
-				     &cc_minus_ua);
+					&cc_minus_ua);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't get jeita cc minus rc=%d\n", rc);
 		return rc;
@@ -401,31 +373,17 @@
 	return rc;
 }
 
-static int step_charge_soc_update(struct smb_charger *chg, int capacity)
-{
-	int rc = 0;
-
-	rc = smblib_set_charge_param(chg, &chg->param.step_soc, capacity);
-	if (rc < 0) {
-		smblib_err(chg, "Error in updating soc, rc=%d\n", rc);
-		return rc;
-	}
-
-	rc = smblib_write(chg, STEP_CHG_SOC_VBATT_V_UPDATE_REG,
-			STEP_CHG_SOC_VBATT_V_UPDATE_BIT);
-	if (rc < 0) {
-		smblib_err(chg,
-			"Couldn't set STEP_CHG_SOC_VBATT_V_UPDATE_REG rc=%d\n",
-			rc);
-		return rc;
-	}
-
-	return rc;
-}
-
 int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend)
 {
 	int rc = 0;
+	int irq = chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq;
+
+	if (suspend && irq) {
+		if (chg->usb_icl_change_irq_enabled) {
+			disable_irq_nosync(irq);
+			chg->usb_icl_change_irq_enabled = false;
+		}
+	}
 
 	rc = smblib_masked_write(chg, USBIN_CMD_IL_REG, USBIN_SUSPEND_BIT,
 				 suspend ? USBIN_SUSPEND_BIT : 0);
@@ -433,6 +391,13 @@
 		smblib_err(chg, "Couldn't write %s to USBIN_SUSPEND_BIT rc=%d\n",
 			suspend ? "suspend" : "resume", rc);
 
+	if (!suspend && irq) {
+		if (!chg->usb_icl_change_irq_enabled) {
+			enable_irq(irq);
+			chg->usb_icl_change_irq_enabled = true;
+		}
+	}
+
 	return rc;
 }
 
@@ -522,6 +487,45 @@
 /********************
  * HELPER FUNCTIONS *
  ********************/
+static int smblib_request_dpdm(struct smb_charger *chg, bool enable)
+{
+	int rc = 0;
+
+	/* fetch the DPDM regulator */
+	if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
+				"dpdm-supply", NULL)) {
+		chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
+		if (IS_ERR(chg->dpdm_reg)) {
+			rc = PTR_ERR(chg->dpdm_reg);
+			smblib_err(chg, "Couldn't get dpdm regulator rc=%d\n",
+					rc);
+			chg->dpdm_reg = NULL;
+			return rc;
+		}
+	}
+
+	if (enable) {
+		if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
+			smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
+			rc = regulator_enable(chg->dpdm_reg);
+			if (rc < 0)
+				smblib_err(chg,
+					"Couldn't enable dpdm regulator rc=%d\n",
+					rc);
+		}
+	} else {
+		if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
+			smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
+			rc = regulator_disable(chg->dpdm_reg);
+			if (rc < 0)
+				smblib_err(chg,
+					"Couldn't disable dpdm regulator rc=%d\n",
+					rc);
+		}
+	}
+
+	return rc;
+}
 
 static void smblib_rerun_apsd(struct smb_charger *chg)
 {
@@ -548,10 +552,17 @@
 	const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
 
 	/* if PD is active, APSD is disabled so won't have a valid result */
-	if (chg->pd_active)
+	if (chg->pd_active) {
 		chg->real_charger_type = POWER_SUPPLY_TYPE_USB_PD;
-	else
+	} else {
+		/*
+		 * Update real charger type only if its not FLOAT
+		 * detected as as SDP
+		 */
+		if (!(apsd_result->pst == POWER_SUPPLY_TYPE_USB_FLOAT &&
+			chg->real_charger_type == POWER_SUPPLY_TYPE_USB))
 		chg->real_charger_type = apsd_result->pst;
+	}
 
 	smblib_dbg(chg, PR_MISC, "APSD=%s PD=%d\n",
 					apsd_result->name, chg->pd_active);
@@ -634,13 +645,9 @@
 
 	cancel_delayed_work_sync(&chg->pl_enable_work);
 
-	if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
-		smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
-		rc = regulator_disable(chg->dpdm_reg);
-		if (rc < 0)
-			smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
-				rc);
-	}
+	rc = smblib_request_dpdm(chg, false);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't to disable DPDM rc=%d\n", rc);
 
 	if (chg->wa_flags & BOOST_BACK_WA) {
 		data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
@@ -732,24 +739,9 @@
 	if (!val.intval)
 		return 0;
 
-	/* fetch the DPDM regulator */
-	if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
-						"dpdm-supply", NULL)) {
-		chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
-		if (IS_ERR(chg->dpdm_reg)) {
-			smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n",
-				PTR_ERR(chg->dpdm_reg));
-			chg->dpdm_reg = NULL;
-		}
-	}
-
-	if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
-		smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
-		rc = regulator_enable(chg->dpdm_reg);
-		if (rc < 0)
-			smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
-				rc);
-	}
+	rc = smblib_request_dpdm(chg, true);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
 
 	chg->uusb_apsd_rerun_done = true;
 	smblib_rerun_apsd(chg);
@@ -819,6 +811,7 @@
 {
 	int rc;
 	u8 icl_options;
+	const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
 
 	/* power source is SDP */
 	switch (icl_ua) {
@@ -843,6 +836,21 @@
 		return -EINVAL;
 	}
 
+	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB &&
+		apsd_result->pst == POWER_SUPPLY_TYPE_USB_FLOAT) {
+		/*
+		 * change the float charger configuration to SDP, if this
+		 * is the case of SDP being detected as FLOAT
+		 */
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+			FORCE_FLOAT_SDP_CFG_BIT, FORCE_FLOAT_SDP_CFG_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't set float ICL options rc=%d\n",
+						rc);
+			return rc;
+		}
+	}
+
 	rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
 		CFG_USB3P0_SEL_BIT | USB51_MODE_BIT, icl_options);
 	if (rc < 0) {
@@ -884,7 +892,6 @@
 	if (icl_ua < USBIN_25MA)
 		return smblib_set_usb_suspend(chg, true);
 
-	disable_irq_nosync(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
 	if (icl_ua == INT_MAX)
 		goto override_suspend_config;
 
@@ -942,7 +949,6 @@
 	}
 
 enable_icl_changed_interrupt:
-	enable_irq(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
 	return rc;
 }
 
@@ -1318,11 +1324,84 @@
 #define MAX_RETRY		15
 #define MIN_DELAY_US		2000
 #define MAX_DELAY_US		9000
+static int otg_current[] = {250000, 500000, 1000000, 1500000};
+static int smblib_enable_otg_wa(struct smb_charger *chg)
+{
+	u8 stat;
+	int rc, i, retry_count = 0, min_delay = MIN_DELAY_US;
+
+	for (i = 0; i < ARRAY_SIZE(otg_current); i++) {
+		smblib_dbg(chg, PR_OTG, "enabling OTG with %duA\n",
+						otg_current[i]);
+		rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
+						otg_current[i]);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't set otg limit rc=%d\n", rc);
+			return rc;
+		}
+
+		rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
+			return rc;
+		}
+
+		retry_count = 0;
+		min_delay = MIN_DELAY_US;
+		do {
+			usleep_range(min_delay, min_delay + 100);
+			rc = smblib_read(chg, OTG_STATUS_REG, &stat);
+			if (rc < 0) {
+				smblib_err(chg, "Couldn't read OTG status rc=%d\n",
+							rc);
+				goto out;
+			}
+
+			if (stat & BOOST_SOFTSTART_DONE_BIT) {
+				rc = smblib_set_charge_param(chg,
+					&chg->param.otg_cl, chg->otg_cl_ua);
+				if (rc < 0) {
+					smblib_err(chg, "Couldn't set otg limit rc=%d\n",
+							rc);
+					goto out;
+				}
+				break;
+			}
+			/* increase the delay for following iterations */
+			if (retry_count > 5)
+				min_delay = MAX_DELAY_US;
+
+		} while (retry_count++ < MAX_RETRY);
+
+		if (retry_count >= MAX_RETRY) {
+			smblib_dbg(chg, PR_OTG, "OTG enable failed with %duA\n",
+								otg_current[i]);
+			rc = smblib_write(chg, CMD_OTG_REG, 0);
+			if (rc < 0) {
+				smblib_err(chg, "disable OTG rc=%d\n", rc);
+				goto out;
+			}
+		} else {
+			smblib_dbg(chg, PR_OTG, "OTG enabled\n");
+			return 0;
+		}
+	}
+
+	if (i == ARRAY_SIZE(otg_current)) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	return 0;
+out:
+	smblib_write(chg, CMD_OTG_REG, 0);
+	return rc;
+}
+
 static int _smblib_vbus_regulator_enable(struct regulator_dev *rdev)
 {
 	struct smb_charger *chg = rdev_get_drvdata(rdev);
-	int rc, retry_count = 0, min_delay = MIN_DELAY_US;
-	u8 stat;
+	int rc;
 
 	smblib_dbg(chg, PR_OTG, "halt 1 in 8 mode\n");
 	rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
@@ -1335,48 +1414,17 @@
 	}
 
 	smblib_dbg(chg, PR_OTG, "enabling OTG\n");
-	rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't enable OTG regulator rc=%d\n", rc);
-		return rc;
-	}
 
 	if (chg->wa_flags & OTG_WA) {
-		/* check for softstart */
-		do {
-			usleep_range(min_delay, min_delay + 100);
-			rc = smblib_read(chg, OTG_STATUS_REG, &stat);
-			if (rc < 0) {
-				smblib_err(chg,
-					"Couldn't read OTG status rc=%d\n",
-					rc);
-				goto out;
-			}
-
-			if (stat & BOOST_SOFTSTART_DONE_BIT) {
-				rc = smblib_set_charge_param(chg,
-					&chg->param.otg_cl, chg->otg_cl_ua);
-				if (rc < 0)
-					smblib_err(chg,
-						"Couldn't set otg limit\n");
-				break;
-			}
-
-			/* increase the delay for following iterations */
-			if (retry_count > 5)
-				min_delay = MAX_DELAY_US;
-		} while (retry_count++ < MAX_RETRY);
-
-		if (retry_count >= MAX_RETRY) {
-			smblib_dbg(chg, PR_OTG, "Boost Softstart not done\n");
-			goto out;
-		}
+		rc = smblib_enable_otg_wa(chg);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
+	} else {
+		rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
 	}
 
-	return 0;
-out:
-	/* disable OTG if softstart failed */
-	smblib_write(chg, CMD_OTG_REG, 0);
 	return rc;
 }
 
@@ -1755,30 +1803,6 @@
 	return rc;
 }
 
-int smblib_get_prop_step_chg_step(struct smb_charger *chg,
-				union power_supply_propval *val)
-{
-	int rc;
-	u8 stat;
-
-	if (!chg->step_chg_enabled) {
-		val->intval = -1;
-		return 0;
-	}
-
-	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
-			rc);
-		return rc;
-	}
-
-	val->intval = (stat & STEP_CHARGING_STATUS_MASK) >>
-				STEP_CHARGING_STATUS_SHIFT;
-
-	return rc;
-}
-
 int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
 					union power_supply_propval *val)
 {
@@ -2427,6 +2451,31 @@
 	return 0;
 }
 
+#define SDP_CURRENT_UA			500000
+#define CDP_CURRENT_UA			1500000
+#define DCP_CURRENT_UA			1500000
+#define HVDCP_CURRENT_UA		3000000
+#define TYPEC_DEFAULT_CURRENT_UA	900000
+#define TYPEC_MEDIUM_CURRENT_UA		1500000
+#define TYPEC_HIGH_CURRENT_UA		3000000
+static int get_rp_based_dcp_current(struct smb_charger *chg, int typec_mode)
+{
+	int rp_ua;
+
+	switch (typec_mode) {
+	case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+		rp_ua = TYPEC_HIGH_CURRENT_UA;
+		break;
+	case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+	case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+	/* fall through */
+	default:
+		rp_ua = DCP_CURRENT_UA;
+	}
+
+	return rp_ua;
+}
+
 /*******************
  * USB PSY SETTERS *
  * *****************/
@@ -2444,14 +2493,54 @@
 	return rc;
 }
 
+static int smblib_handle_usb_current(struct smb_charger *chg,
+					int usb_current)
+{
+	int rc = 0, rp_ua, typec_mode;
+
+	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
+		if (usb_current == -ETIMEDOUT) {
+			/*
+			 * Valid FLOAT charger, report the current based
+			 * of Rp
+			 */
+			typec_mode = smblib_get_prop_typec_mode(chg);
+			rp_ua = get_rp_based_dcp_current(chg, typec_mode);
+			rc = vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER,
+								true, rp_ua);
+			if (rc < 0)
+				return rc;
+		} else {
+			/*
+			 * FLOAT charger detected as SDP by USB driver,
+			 * charge with the requested current and update the
+			 * real_charger_type
+			 */
+			chg->real_charger_type = POWER_SUPPLY_TYPE_USB;
+			rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+						true, usb_current);
+			if (rc < 0)
+				return rc;
+			rc = vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER,
+							false, 0);
+			if (rc < 0)
+				return rc;
+		}
+	} else {
+		rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+					true, usb_current);
+	}
+
+	return rc;
+}
+
 int smblib_set_prop_usb_current_max(struct smb_charger *chg,
 				    const union power_supply_propval *val)
 {
 	int rc = 0;
 
 	if (!chg->pd_active) {
-		rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
-				true, val->intval);
+		rc = smblib_handle_usb_current(chg, val->intval);
 	} else if (chg->system_suspend_supported) {
 		if (val->intval <= USBIN_25MA)
 			rc = vote(chg->usb_icl_votable,
@@ -2834,46 +2923,72 @@
 	return rc;
 }
 
+static int smblib_recover_from_soft_jeita(struct smb_charger *chg)
+{
+	u8 stat_1, stat_2;
+	int rc;
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat_1);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_2_REG, &stat_2);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	if ((chg->jeita_status && !(stat_2 & BAT_TEMP_STATUS_SOFT_LIMIT_MASK) &&
+		((stat_1 & BATTERY_CHARGER_STATUS_MASK) == TERMINATE_CHARGE))) {
+		/*
+		 * We are moving from JEITA soft -> Normal and charging
+		 * is terminated
+		 */
+		rc = smblib_write(chg, CHARGING_ENABLE_CMD_REG, 0);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't disable charging rc=%d\n",
+						rc);
+			return rc;
+		}
+		rc = smblib_write(chg, CHARGING_ENABLE_CMD_REG,
+						CHARGING_ENABLE_CMD_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't enable charging rc=%d\n",
+						rc);
+			return rc;
+		}
+	}
+
+	chg->jeita_status = stat_2 & BAT_TEMP_STATUS_SOFT_LIMIT_MASK;
+
+	return 0;
+}
+
 /************************
  * USB MAIN PSY GETTERS *
  ************************/
 int smblib_get_prop_fcc_delta(struct smb_charger *chg,
-			       union power_supply_propval *val)
+				union power_supply_propval *val)
 {
-	int rc, jeita_cc_delta_ua, step_cc_delta_ua, hw_cc_delta_ua = 0;
-
-	rc = smblib_get_step_cc_delta(chg, &step_cc_delta_ua);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't get step cc delta rc=%d\n", rc);
-		step_cc_delta_ua = 0;
-	} else {
-		hw_cc_delta_ua = step_cc_delta_ua;
-	}
+	int rc, jeita_cc_delta_ua = 0;
 
 	rc = smblib_get_jeita_cc_delta(chg, &jeita_cc_delta_ua);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't get jeita cc delta rc=%d\n", rc);
 		jeita_cc_delta_ua = 0;
-	} else if (jeita_cc_delta_ua < 0) {
-		/* HW will take the min between JEITA and step charge */
-		hw_cc_delta_ua = min(hw_cc_delta_ua, jeita_cc_delta_ua);
 	}
 
-	val->intval = hw_cc_delta_ua;
+	val->intval = jeita_cc_delta_ua;
 	return 0;
 }
 
 /************************
  * USB MAIN PSY SETTERS *
  ************************/
-
-#define SDP_CURRENT_UA			500000
-#define CDP_CURRENT_UA			1500000
-#define DCP_CURRENT_UA			1500000
-#define HVDCP_CURRENT_UA		3000000
-#define TYPEC_DEFAULT_CURRENT_UA	900000
-#define TYPEC_MEDIUM_CURRENT_UA		1500000
-#define TYPEC_HIGH_CURRENT_UA		3000000
 int smblib_get_charge_current(struct smb_charger *chg,
 				int *total_current_ua)
 {
@@ -3040,61 +3155,18 @@
 	return IRQ_HANDLED;
 }
 
-irqreturn_t smblib_handle_step_chg_state_change(int irq, void *data)
-{
-	struct smb_irq_data *irq_data = data;
-	struct smb_charger *chg = irq_data->parent_data;
-
-	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
-
-	if (chg->step_chg_enabled)
-		rerun_election(chg->fcc_votable);
-
-	return IRQ_HANDLED;
-}
-
-irqreturn_t smblib_handle_step_chg_soc_update_fail(int irq, void *data)
-{
-	struct smb_irq_data *irq_data = data;
-	struct smb_charger *chg = irq_data->parent_data;
-
-	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
-
-	if (chg->step_chg_enabled)
-		rerun_election(chg->fcc_votable);
-
-	return IRQ_HANDLED;
-}
-
-#define STEP_SOC_REQ_MS	3000
-irqreturn_t smblib_handle_step_chg_soc_update_request(int irq, void *data)
-{
-	struct smb_irq_data *irq_data = data;
-	struct smb_charger *chg = irq_data->parent_data;
-	int rc;
-	union power_supply_propval pval = {0, };
-
-	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
-
-	if (!chg->bms_psy) {
-		schedule_delayed_work(&chg->step_soc_req_work,
-				      msecs_to_jiffies(STEP_SOC_REQ_MS));
-		return IRQ_HANDLED;
-	}
-
-	rc = smblib_get_prop_batt_capacity(chg, &pval);
-	if (rc < 0)
-		smblib_err(chg, "Couldn't get batt capacity rc=%d\n", rc);
-	else
-		step_charge_soc_update(chg, pval.intval);
-
-	return IRQ_HANDLED;
-}
-
 irqreturn_t smblib_handle_batt_temp_changed(int irq, void *data)
 {
 	struct smb_irq_data *irq_data = data;
 	struct smb_charger *chg = irq_data->parent_data;
+	int rc;
+
+	rc = smblib_recover_from_soft_jeita(chg);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't recover chg from soft jeita rc=%d\n",
+				rc);
+		return IRQ_HANDLED;
+	}
 
 	rerun_election(chg->fcc_votable);
 	power_supply_changed(chg->batt_psy);
@@ -3207,25 +3279,10 @@
 	smblib_set_opt_freq_buck(chg, vbus_rising ? chg->chg_freq.freq_5V :
 						chg->chg_freq.freq_removal);
 
-	/* fetch the DPDM regulator */
-	if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
-						"dpdm-supply", NULL)) {
-		chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
-		if (IS_ERR(chg->dpdm_reg)) {
-			smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n",
-				PTR_ERR(chg->dpdm_reg));
-			chg->dpdm_reg = NULL;
-		}
-	}
-
 	if (vbus_rising) {
-		if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
-			smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
-			rc = regulator_enable(chg->dpdm_reg);
-			if (rc < 0)
-				smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
-					rc);
-		}
+		rc = smblib_request_dpdm(chg, true);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
 
 		/* Schedule work to enable parallel charger */
 		vote(chg->awake_votable, PL_DELAY_VOTER, true, 0);
@@ -3245,13 +3302,9 @@
 			}
 		}
 
-		if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
-			smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
-			rc = regulator_disable(chg->dpdm_reg);
-			if (rc < 0)
-				smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
-					rc);
-		}
+		rc = smblib_request_dpdm(chg, false);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
 	}
 
 	if (chg->micro_usb_mode)
@@ -3472,24 +3525,6 @@
 		   rising ? "rising" : "falling");
 }
 
-static int get_rp_based_dcp_current(struct smb_charger *chg, int typec_mode)
-{
-	int rp_ua;
-
-	switch (typec_mode) {
-	case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
-		rp_ua = TYPEC_HIGH_CURRENT_UA;
-		break;
-	case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
-	case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
-	/* fall through */
-	default:
-		rp_ua = DCP_CURRENT_UA;
-	}
-
-	return rp_ua;
-}
-
 static void smblib_force_legacy_icl(struct smb_charger *chg, int pst)
 {
 	int typec_mode;
@@ -3515,11 +3550,17 @@
 		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 1500000);
 		break;
 	case POWER_SUPPLY_TYPE_USB_DCP:
-	case POWER_SUPPLY_TYPE_USB_FLOAT:
 		typec_mode = smblib_get_prop_typec_mode(chg);
 		rp_ua = get_rp_based_dcp_current(chg, typec_mode);
 		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, rp_ua);
 		break;
+	case POWER_SUPPLY_TYPE_USB_FLOAT:
+		/*
+		 * limit ICL to 100mA, the USB driver will enumerate to check
+		 * if this is a SDP and appropriately set the current
+		 */
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 100000);
+		break;
 	case POWER_SUPPLY_TYPE_USB_HVDCP:
 	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
 		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 3000000);
@@ -3655,13 +3696,9 @@
 
 	chg->cc2_detach_wa_active = false;
 
-	if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
-		smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
-		rc = regulator_disable(chg->dpdm_reg);
-		if (rc < 0)
-			smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
-				rc);
-	}
+	rc = smblib_request_dpdm(chg, false);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
 
 	if (chg->wa_flags & BOOST_BACK_WA) {
 		data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
@@ -3720,6 +3757,13 @@
 	chg->pd_hard_reset = 0;
 	chg->typec_legacy_valid = false;
 
+	/* write back the default FLOAT charger configuration */
+	rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+				(u8)FLOAT_OPTIONS_MASK, chg->float_cfg);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't write float charger options rc=%d\n",
+			rc);
+
 	/* reset back to 120mS tCC debounce */
 	rc = smblib_masked_write(chg, MISC_CFG_REG, TCC_DEBOUNCE_20MS_BIT, 0);
 	if (rc < 0)
@@ -3799,10 +3843,14 @@
 		smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
 									rc);
 
-	if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT)
+	if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT) {
 		typec_sink_insertion(chg);
-	else
+	} else {
+		rc = smblib_request_dpdm(chg, true);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
 		typec_sink_removal(chg);
+	}
 }
 
 static void smblib_handle_rp_change(struct smb_charger *chg, int typec_mode)
@@ -3815,6 +3863,24 @@
 		return;
 
 	/*
+	 * if APSD indicates FLOAT and the USB stack had detected SDP,
+	 * do not respond to Rp changes as we do not confirm that its
+	 * a legacy cable
+	 */
+	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
+		return;
+	/*
+	 * We want the ICL vote @ 100mA for a FLOAT charger
+	 * until the detection by the USB stack is complete.
+	 * Ignore the Rp changes unless there is a
+	 * pre-existing valid vote.
+	 */
+	if (apsd->pst == POWER_SUPPLY_TYPE_USB_FLOAT &&
+		get_client_vote(chg->usb_icl_votable,
+			LEGACY_UNKNOWN_VOTER) <= 100000)
+		return;
+
+	/*
 	 * handle Rp change for DCP/FLOAT/OCP.
 	 * Update the current only if the Rp is different from
 	 * the last Rp value.
@@ -4011,10 +4077,15 @@
 	struct smb_charger *chg = irq_data->parent_data;
 	int rc;
 
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
 	rc = smblib_write(chg, BARK_BITE_WDOG_PET_REG, BARK_BITE_WDOG_PET_BIT);
 	if (rc < 0)
 		smblib_err(chg, "Couldn't pet the dog rc=%d\n", rc);
 
+	if (chg->step_chg_enabled)
+		power_supply_changed(chg->batt_psy);
+
 	return IRQ_HANDLED;
 }
 
@@ -4097,22 +4168,6 @@
 		power_supply_changed(chg->batt_psy);
 }
 
-static void step_soc_req_work(struct work_struct *work)
-{
-	struct smb_charger *chg = container_of(work, struct smb_charger,
-						step_soc_req_work.work);
-	union power_supply_propval pval = {0, };
-	int rc;
-
-	rc = smblib_get_prop_batt_capacity(chg, &pval);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't get batt capacity rc=%d\n", rc);
-		return;
-	}
-
-	step_charge_soc_update(chg, pval.intval);
-}
-
 static void clear_hdc_work(struct work_struct *work)
 {
 	struct smb_charger *chg = container_of(work, struct smb_charger,
@@ -4645,7 +4700,6 @@
 	INIT_WORK(&chg->bms_update_work, bms_update_work);
 	INIT_WORK(&chg->rdstd_cc2_detach_work, rdstd_cc2_detach_work);
 	INIT_DELAYED_WORK(&chg->hvdcp_detect_work, smblib_hvdcp_detect_work);
-	INIT_DELAYED_WORK(&chg->step_soc_req_work, step_soc_req_work);
 	INIT_DELAYED_WORK(&chg->clear_hdc_work, clear_hdc_work);
 	INIT_WORK(&chg->otg_oc_work, smblib_otg_oc_work);
 	INIT_WORK(&chg->vconn_oc_work, smblib_vconn_oc_work);
@@ -4667,6 +4721,13 @@
 			return rc;
 		}
 
+		rc = qcom_step_chg_init(chg->step_chg_enabled);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't init qcom_step_chg_init rc=%d\n",
+				rc);
+			return rc;
+		}
+
 		rc = smblib_create_votables(chg);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't create votables rc=%d\n",
@@ -4701,7 +4762,6 @@
 		cancel_work_sync(&chg->bms_update_work);
 		cancel_work_sync(&chg->rdstd_cc2_detach_work);
 		cancel_delayed_work_sync(&chg->hvdcp_detect_work);
-		cancel_delayed_work_sync(&chg->step_soc_req_work);
 		cancel_delayed_work_sync(&chg->clear_hdc_work);
 		cancel_work_sync(&chg->otg_oc_work);
 		cancel_work_sync(&chg->vconn_oc_work);
@@ -4713,6 +4773,7 @@
 		cancel_delayed_work_sync(&chg->bb_removal_work);
 		power_supply_unreg_notifier(&chg->nb);
 		smblib_destroy_votables(chg);
+		qcom_step_chg_deinit();
 		qcom_batt_deinit();
 		break;
 	case PARALLEL_SLAVE:
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index c08d404..4ffbeb6 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -192,9 +192,6 @@
 	struct smb_chg_param	dc_icl_div2_mid_hv;
 	struct smb_chg_param	dc_icl_div2_hv;
 	struct smb_chg_param	jeita_cc_comp;
-	struct smb_chg_param	step_soc_threshold[4];
-	struct smb_chg_param	step_soc;
-	struct smb_chg_param	step_cc_delta[5];
 	struct smb_chg_param	freq_buck;
 	struct smb_chg_param	freq_boost;
 };
@@ -289,7 +286,6 @@
 	struct work_struct	rdstd_cc2_detach_work;
 	struct delayed_work	hvdcp_detect_work;
 	struct delayed_work	ps_change_timeout_work;
-	struct delayed_work	step_soc_req_work;
 	struct delayed_work	clear_hdc_work;
 	struct work_struct	otg_oc_work;
 	struct work_struct	vconn_oc_work;
@@ -330,6 +326,9 @@
 	int			fake_input_current_limited;
 	bool			pr_swap_in_progress;
 	int			typec_mode;
+	int			usb_icl_change_irq_enabled;
+	u32			jeita_status;
+	u8			float_cfg;
 
 	/* workaround flag */
 	u32			wa_flags;
@@ -384,9 +383,6 @@
 irqreturn_t smblib_handle_debug(int irq, void *data);
 irqreturn_t smblib_handle_otg_overcurrent(int irq, void *data);
 irqreturn_t smblib_handle_chg_state_change(int irq, void *data);
-irqreturn_t smblib_handle_step_chg_state_change(int irq, void *data);
-irqreturn_t smblib_handle_step_chg_soc_update_fail(int irq, void *data);
-irqreturn_t smblib_handle_step_chg_soc_update_request(int irq, void *data);
 irqreturn_t smblib_handle_batt_temp_changed(int irq, void *data);
 irqreturn_t smblib_handle_batt_psy_changed(int irq, void *data);
 irqreturn_t smblib_handle_usb_psy_changed(int irq, void *data);
@@ -426,9 +422,6 @@
 				union power_supply_propval *val);
 int smblib_get_prop_batt_temp(struct smb_charger *chg,
 				union power_supply_propval *val);
-int smblib_get_prop_step_chg_step(struct smb_charger *chg,
-				union power_supply_propval *val);
-
 int smblib_set_prop_input_suspend(struct smb_charger *chg,
 				const union power_supply_propval *val);
 int smblib_set_prop_batt_capacity(struct smb_charger *chg,
@@ -508,7 +501,7 @@
 void smblib_suspend_on_debug_battery(struct smb_charger *chg);
 int smblib_rerun_apsd_if_required(struct smb_charger *chg);
 int smblib_get_prop_fcc_delta(struct smb_charger *chg,
-			       union power_supply_propval *val);
+				union power_supply_propval *val);
 int smblib_icl_override(struct smb_charger *chg, bool override);
 int smblib_dp_dm(struct smb_charger *chg, int val);
 int smblib_rerun_aicl(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index ca0a2c6..dd949e7 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -845,6 +845,13 @@
 		}
 	}
 
+	/* configure to a fixed 700khz freq to avoid tdie errors */
+	rc = smblib_set_charge_param(chg, &chg->param.freq_buck, 700);
+	if (rc < 0) {
+		pr_err("Couldn't configure 700Khz switch freq rc=%d\n", rc);
+		return rc;
+	}
+
 	/* enable watchdog bark and bite interrupts, and disable the watchdog */
 	rc = smblib_masked_write(chg, WD_CFG_REG, WDOG_TIMER_EN_BIT
 			| WDOG_TIMER_EN_ON_PLUGIN_BIT | BITE_WDOG_INT_EN_BIT
diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c
new file mode 100644
index 0000000..a2c08be
--- /dev/null
+++ b/drivers/power/supply/qcom/step-chg-jeita.c
@@ -0,0 +1,272 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "QCOM-STEPCHG: %s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/pmic-voter.h>
+#include "step-chg-jeita.h"
+
+#define MAX_STEP_CHG_ENTRIES	8
+#define STEP_CHG_VOTER		"STEP_CHG_VOTER"
+#define STATUS_CHANGE_VOTER	"STATUS_CHANGE_VOTER"
+
+#define is_between(left, right, value) \
+		(((left) >= (right) && (left) >= (value) \
+			&& (value) >= (right)) \
+		|| ((left) <= (right) && (left) <= (value) \
+			&& (value) <= (right)))
+
+struct step_chg_data {
+	u32 vbatt_soc_low;
+	u32 vbatt_soc_high;
+	u32 fcc_ua;
+};
+
+struct step_chg_cfg {
+	u32 psy_prop;
+	char *prop_name;
+	struct step_chg_data cfg[MAX_STEP_CHG_ENTRIES];
+};
+
+struct step_chg_info {
+	ktime_t			last_update_time;
+	bool			step_chg_enable;
+
+	struct votable		*fcc_votable;
+	struct wakeup_source	*step_chg_ws;
+	struct power_supply	*batt_psy;
+	struct delayed_work	status_change_work;
+	struct notifier_block	nb;
+};
+
+static struct step_chg_info *the_chip;
+
+/*
+ * Step Charging Configuration
+ * Update the table based on the battery profile
+ * Supports VBATT and SOC based source
+ */
+static struct step_chg_cfg step_chg_config = {
+	.psy_prop  = POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	.prop_name = "VBATT",
+	.cfg	 = {
+		/* VBAT_LOW	VBAT_HIGH	FCC */
+		{3600000,	4000000,	3000000},
+		{4000000,	4200000,	2800000},
+		{4200000,	4400000,	2000000},
+	},
+/*
+ *	SOC STEP-CHG configuration example.
+ *
+ *	.psy_prop = POWER_SUPPLY_PROP_CAPACITY,
+ *	.prop_name = "SOC",
+ *	.cfg	= {
+ *		//SOC_LOW	SOC_HIGH	FCC
+ *		{20,		70,		3000000},
+ *		{70,		90,		2750000},
+ *		{90,		100,		2500000},
+ *	},
+ */
+};
+
+static bool is_batt_available(struct step_chg_info *chip)
+{
+	if (!chip->batt_psy)
+		chip->batt_psy = power_supply_get_by_name("battery");
+
+	if (!chip->batt_psy)
+		return false;
+
+	return true;
+}
+
+static int get_fcc(int threshold)
+{
+	int i;
+
+	for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++)
+		if (is_between(step_chg_config.cfg[i].vbatt_soc_low,
+			step_chg_config.cfg[i].vbatt_soc_high, threshold))
+			return step_chg_config.cfg[i].fcc_ua;
+
+	return -ENODATA;
+}
+
+static int handle_step_chg_config(struct step_chg_info *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc = 0, fcc_ua = 0;
+
+	rc = power_supply_get_property(chip->batt_psy,
+		POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED, &pval);
+	if (rc < 0)
+		chip->step_chg_enable = 0;
+	else
+		chip->step_chg_enable = pval.intval;
+
+	if (!chip->step_chg_enable) {
+		if (chip->fcc_votable)
+			vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
+		return 0;
+	}
+
+	rc = power_supply_get_property(chip->batt_psy,
+				step_chg_config.psy_prop, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't read %s property rc=%d\n",
+				step_chg_config.prop_name, rc);
+		return rc;
+	}
+
+	chip->fcc_votable = find_votable("FCC");
+	if (!chip->fcc_votable)
+		return -EINVAL;
+
+	fcc_ua = get_fcc(pval.intval);
+	if (fcc_ua < 0) {
+		/* remove the vote if no step-based fcc is found */
+		vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
+		return 0;
+	}
+
+	vote(chip->fcc_votable, STEP_CHG_VOTER, true, fcc_ua);
+
+	pr_debug("%s = %d Step-FCC = %duA\n",
+		step_chg_config.prop_name, pval.intval, fcc_ua);
+
+	return 0;
+}
+
+#define STEP_CHG_HYSTERISIS_DELAY_US		5000000 /* 5 secs */
+static void status_change_work(struct work_struct *work)
+{
+	struct step_chg_info *chip = container_of(work,
+			struct step_chg_info, status_change_work.work);
+	int rc = 0;
+	u64 elapsed_us;
+
+	elapsed_us = ktime_us_delta(ktime_get(), chip->last_update_time);
+	if (elapsed_us < STEP_CHG_HYSTERISIS_DELAY_US)
+		goto release_ws;
+
+	if (!is_batt_available(chip))
+		goto release_ws;
+
+	rc = handle_step_chg_config(chip);
+	if (rc < 0)
+		goto release_ws;
+
+	chip->last_update_time = ktime_get();
+
+release_ws:
+	__pm_relax(chip->step_chg_ws);
+}
+
+static int step_chg_notifier_call(struct notifier_block *nb,
+		unsigned long ev, void *v)
+{
+	struct power_supply *psy = v;
+	struct step_chg_info *chip = container_of(nb, struct step_chg_info, nb);
+
+	if (ev != PSY_EVENT_PROP_CHANGED)
+		return NOTIFY_OK;
+
+	if ((strcmp(psy->desc->name, "battery") == 0)) {
+		__pm_stay_awake(chip->step_chg_ws);
+		schedule_delayed_work(&chip->status_change_work, 0);
+	}
+
+	return NOTIFY_OK;
+}
+
+static int step_chg_register_notifier(struct step_chg_info *chip)
+{
+	int rc;
+
+	chip->nb.notifier_call = step_chg_notifier_call;
+	rc = power_supply_reg_notifier(&chip->nb);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+int qcom_step_chg_init(bool step_chg_enable)
+{
+	int rc;
+	struct step_chg_info *chip;
+
+	if (the_chip) {
+		pr_err("Already initialized\n");
+		return -EINVAL;
+	}
+
+	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->step_chg_ws = wakeup_source_register("qcom-step-chg");
+	if (!chip->step_chg_ws) {
+		rc = -EINVAL;
+		goto cleanup;
+	}
+
+	chip->step_chg_enable = step_chg_enable;
+
+	if (step_chg_enable && (!step_chg_config.psy_prop ||
+				!step_chg_config.prop_name)) {
+		/* fail if step-chg configuration is invalid */
+		pr_err("Step-chg configuration not defined - fail\n");
+		return -ENODATA;
+	}
+
+	INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
+
+	rc = step_chg_register_notifier(chip);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		goto release_wakeup_source;
+	}
+
+	the_chip = chip;
+
+	if (step_chg_enable)
+		pr_info("Step charging enabled. Using %s source\n",
+				step_chg_config.prop_name);
+
+	return 0;
+
+release_wakeup_source:
+	wakeup_source_unregister(chip->step_chg_ws);
+cleanup:
+	kfree(chip);
+	return rc;
+}
+
+void qcom_step_chg_deinit(void)
+{
+	struct step_chg_info *chip = the_chip;
+
+	if (!chip)
+		return;
+
+	cancel_delayed_work_sync(&chip->status_change_work);
+	power_supply_unreg_notifier(&chip->nb);
+	wakeup_source_unregister(chip->step_chg_ws);
+	the_chip = NULL;
+	kfree(chip);
+}
diff --git a/drivers/power/supply/qcom/step-chg-jeita.h b/drivers/power/supply/qcom/step-chg-jeita.h
new file mode 100644
index 0000000..236877a
--- /dev/null
+++ b/drivers/power/supply/qcom/step-chg-jeita.h
@@ -0,0 +1,17 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __STEP_CHG_H__
+#define __STEP_CHG_H__
+int qcom_step_chg_init(bool step_chg_enable);
+void qcom_step_chg_deinit(void);
+#endif /* __STEP_CHG_H__ */
diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c
index b1e6a3b..9510016 100644
--- a/drivers/regulator/cpr3-regulator.c
+++ b/drivers/regulator/cpr3-regulator.c
@@ -1316,6 +1316,27 @@
 static int cprh_regulator_aging_adjust(struct cpr3_controller *ctrl);
 
 /**
+ * cpr3_regulator_cprh_initialized() - checks if CPRh has already been
+ *		initialized by the boot loader
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: true if CPRh controller is already initialized else false
+ */
+static bool cpr3_regulator_cprh_initialized(struct cpr3_controller *ctrl)
+{
+	u32 reg;
+
+	if (ctrl->ctrl_type != CPR_CTRL_TYPE_CPRH)
+		return false;
+
+	ctrl->cpr_hw_version = readl_relaxed(ctrl->cpr_ctrl_base
+						+ CPR3_REG_CPR_VERSION);
+	reg = readl_relaxed(ctrl->cpr_ctrl_base + CPRH_REG_CTL(ctrl));
+
+	return reg & CPRH_CTL_OSM_ENABLED;
+}
+
+/**
  * cpr3_regulator_init_cprh() - performs hardware initialization at the
  *		controller and thread level required for CPRh operation.
  * @ctrl:		Pointer to the CPR3 controller
@@ -6459,6 +6480,11 @@
 	}
 	ctrl->cpr_ctrl_base = devm_ioremap(dev, res->start, resource_size(res));
 
+	if (cpr3_regulator_cprh_initialized(ctrl)) {
+		cpr3_err(ctrl, "CPRh controller already initialized by boot loader\n");
+		return -EPERM;
+	}
+
 	if (ctrl->aging_possible_mask) {
 		/*
 		 * Aging possible register address is required if an aging
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index deb0ce5..c393940 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -38,12 +38,10 @@
 #define SDM660_KBSS_FUSE_CORNERS			5
 
 #define SDM845_KBSS_POWER_CLUSTER_FUSE_CORNERS		4
-#define SDM845_KBSS_PERFORMANCE_CLUSTER_FUSE_CORNERS	3
-/*
- * This must be set to the larger of SDM845_KBSS_POWER_CLUSTER_FUSE_CORNERS and
- * SDM845_KBSS_PERFORMANCE_CLUSTER_FUSE_CORNERS values.
- */
-#define SDM845_KBSS_MAX_FUSE_CORNERS			4
+#define SDM845_V1_KBSS_PERF_CLUSTER_FUSE_CORNERS	3
+#define SDM845_V2_KBSS_PERF_CLUSTER_FUSE_CORNERS	5
+/* This must be set to the largest of SDM845 FUSE_CORNERS values. */
+#define SDM845_KBSS_MAX_FUSE_CORNERS			5
 
 /**
  * struct cprh_kbss_fuses - KBSS specific fuse data
@@ -153,18 +151,38 @@
 #define CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID	0
 
 static const char * const
-cprh_sdm845_kbss_fuse_corner_name[2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
+cprh_sdm845_v1_kbss_fuse_corner_name[2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		"LowSVS",
 		"SVS_L1",
 		"NOM_L1",
 		"TURBO",
+		"",
 	},
 	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
 		"SVS",
 		"NOM",
 		"TURBO_L2",
 		"",
+		"",
+	},
+};
+
+static const char * const
+cprh_sdm845_v2_kbss_fuse_corner_name[2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		"LowSVS",
+		"SVS_L1",
+		"NOM",
+		"TURBO",
+		"",
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		"LowSVS",
+		"SVS",
+		"NOM_L1",
+		"TURBO_L2",
+		"BINNING",
 	},
 };
 
@@ -334,7 +352,7 @@
  *		 different fuse rows.
  */
 static const struct cpr3_fuse_param
-sdm845_kbss_ro_sel_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
+sdm845_v1_kbss_ro_sel_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
 			{{66, 52, 55}, {} },
@@ -359,7 +377,34 @@
 };
 
 static const struct cpr3_fuse_param
-sdm845_kbss_init_voltage_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
+sdm845_v2_kbss_ro_sel_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
+			{{66, 52, 55}, {} },
+			{{66, 48, 51}, {} },
+			{{66, 44, 47}, {} },
+			{{66, 40, 43}, {} },
+		},
+		[CPRH_KBSS_L3_THREAD_ID] = {
+			{{66, 52, 55}, {} },
+			{{66, 48, 51}, {} },
+			{{66, 44, 47}, {} },
+			{{66, 40, 43}, {} },
+		},
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		[CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+			{{73,  5,  8}, {} },
+			{{70, 12, 15}, {} },
+			{{70,  8, 11}, {} },
+			{{70,  4,  7}, {} },
+			{{70,  0,  3}, {} },
+		},
+	},
+};
+
+static const struct cpr3_fuse_param
+sdm845_v1_kbss_init_voltage_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
 			{{67, 10, 15}, {} },
@@ -384,7 +429,34 @@
 };
 
 static const struct cpr3_fuse_param
-sdm845_kbss_target_quot_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
+sdm845_v2_kbss_init_voltage_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
+			{{67, 10, 15}, {} },
+			{{67,  4,  9}, {} },
+			{{66, 62, 63}, {67,  0,  3}, {} },
+			{{66, 56, 61}, {} },
+		},
+		[CPRH_KBSS_L3_THREAD_ID] = {
+			{{68, 50, 55}, {} },
+			{{68, 44, 49}, {} },
+			{{68, 38, 43}, {} },
+			{{68, 32, 37}, {} },
+		},
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		[CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+			{{72, 10, 15}, {} },
+			{{70, 34, 39}, {} },
+			{{70, 28, 33}, {} },
+			{{70, 22, 27}, {} },
+			{{70, 16, 21}, {} },
+		},
+	},
+};
+
+static const struct cpr3_fuse_param
+sdm845_v1_kbss_target_quot_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
 			{{67, 52, 63}, {} },
@@ -409,7 +481,34 @@
 };
 
 static const struct cpr3_fuse_param
-sdm845_kbss_quot_offset_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][2] = {
+sdm845_v2_kbss_target_quot_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
+			{{67, 52, 63}, {} },
+			{{67, 40, 51}, {} },
+			{{67, 28, 39}, {} },
+			{{67, 16, 27}, {} },
+		},
+		[CPRH_KBSS_L3_THREAD_ID] = {
+			{{69, 28, 39}, {} },
+			{{69, 16, 27}, {} },
+			{{69,  4, 15}, {} },
+			{{68, 56, 63}, {69, 0, 3}, {} },
+		},
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		[CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+			{{72, 16, 27}, {} },
+			{{71, 12, 23}, {} },
+			{{71,  0, 11}, {} },
+			{{70, 52, 63}, {} },
+			{{70, 40, 51}, {} },
+		},
+	},
+};
+
+static const struct cpr3_fuse_param
+sdm845_v1_kbss_quot_offset_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][2] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
 			{{} },
@@ -433,6 +532,33 @@
 	},
 };
 
+static const struct cpr3_fuse_param
+sdm845_v2_kbss_quot_offset_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][2] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
+			{{} },
+			{{68, 16, 23}, {} },
+			{{68,  8, 15}, {} },
+			{{68,  0,  7}, {} },
+		},
+		[CPRH_KBSS_L3_THREAD_ID] = {
+			{{} },
+			{{69, 56, 63}, {} },
+			{{69, 48, 55}, {} },
+			{{69, 40, 47}, {} },
+		},
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		[CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+			{{} },
+			{{72, 28, 35}, {} },
+			{{71, 40, 47}, {} },
+			{{71, 32, 39}, {} },
+			{{71, 24, 31}, {} },
+		},
+	},
+};
+
 static const struct cpr3_fuse_param msm8998_cpr_fusing_rev_param[] = {
 	{39, 51, 53},
 	{},
@@ -443,11 +569,16 @@
 	{},
 };
 
-static const struct cpr3_fuse_param sdm845_cpr_fusing_rev_param[] = {
+static const struct cpr3_fuse_param sdm845_v1_cpr_fusing_rev_param[] = {
 	{73, 3, 5},
 	{},
 };
 
+static const struct cpr3_fuse_param sdm845_v2_cpr_fusing_rev_param[] = {
+	{75, 34, 36},
+	{},
+};
+
 static const struct cpr3_fuse_param kbss_speed_bin_param[] = {
 	{38, 29, 31},
 	{},
@@ -490,7 +621,7 @@
 };
 
 static const struct cpr3_fuse_param
-sdm845_kbss_aging_init_quot_diff_param[2][2] = {
+sdm845_v1_kbss_aging_init_quot_diff_param[2][2] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		{68, 21, 28},
 		{},
@@ -501,6 +632,18 @@
 	},
 };
 
+static const struct cpr3_fuse_param
+sdm845_v2_kbss_aging_init_quot_diff_param[2][2] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		{68, 24, 31},
+		{},
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		{71, 48, 55},
+		{},
+	},
+};
+
 /*
  * Open loop voltage fuse reference voltages in microvolts for MSM8998 v1
  */
@@ -556,7 +699,7 @@
  * Open loop voltage fuse reference voltages in microvolts for SDM845
  */
 static const int
-sdm845_kbss_fuse_ref_volt[2][2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
+sdm845_v1_kbss_fuse_ref_volt[2][2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
 			688000,
@@ -580,6 +723,33 @@
 	},
 };
 
+static const int
+sdm845_v2_kbss_fuse_ref_volt[2][2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
+			688000,
+			812000,
+			828000,
+			952000,
+		},
+		[CPRH_KBSS_L3_THREAD_ID] = {
+			688000,
+			812000,
+			828000,
+			952000,
+		},
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		[CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+			 688000,
+			 812000,
+			 884000,
+			1000000,
+			1000000,
+		},
+	},
+};
+
 #define CPRH_KBSS_FUSE_STEP_VOLT		10000
 #define CPRH_SDM845_KBSS_FUSE_STEP_VOLT		8000
 #define CPRH_KBSS_VOLTAGE_FUSE_SIZE		6
@@ -880,9 +1050,11 @@
 		struct cprh_kbss_fuses *fuse)
 {
 	void __iomem *base = vreg->thread->ctrl->fuse_base;
+	bool is_v1 = (vreg->thread->ctrl->soc_revision == SDM845_V1_SOC_ID);
 	int i, cid, tid, rc;
 
-	rc = cpr3_read_fuse_param(base, sdm845_cpr_fusing_rev_param,
+	rc = cpr3_read_fuse_param(base, is_v1 ? sdm845_v1_cpr_fusing_rev_param
+					      : sdm845_v2_cpr_fusing_rev_param,
 				&fuse->cpr_fusing_rev);
 	if (rc) {
 		cpr3_err(vreg, "Unable to read CPR fusing revision fuse, rc=%d\n",
@@ -895,8 +1067,9 @@
 	cid = vreg->thread->ctrl->ctrl_id;
 
 	for (i = 0; i < vreg->fuse_corner_count; i++) {
-		rc = cpr3_read_fuse_param(base,
-				sdm845_kbss_init_voltage_param[cid][tid][i],
+		rc = cpr3_read_fuse_param(base, is_v1 ?
+				sdm845_v1_kbss_init_voltage_param[cid][tid][i] :
+				sdm845_v2_kbss_init_voltage_param[cid][tid][i],
 				&fuse->init_voltage[i]);
 		if (rc) {
 			cpr3_err(vreg, "Unable to read fuse-corner %d initial voltage fuse, rc=%d\n",
@@ -904,8 +1077,9 @@
 			return rc;
 		}
 
-		rc = cpr3_read_fuse_param(base,
-				sdm845_kbss_target_quot_param[cid][tid][i],
+		rc = cpr3_read_fuse_param(base, is_v1 ?
+				sdm845_v1_kbss_target_quot_param[cid][tid][i] :
+				sdm845_v2_kbss_target_quot_param[cid][tid][i],
 				&fuse->target_quot[i]);
 		if (rc) {
 			cpr3_err(vreg, "Unable to read fuse-corner %d target quotient fuse, rc=%d\n",
@@ -913,8 +1087,9 @@
 			return rc;
 		}
 
-		rc = cpr3_read_fuse_param(base,
-				sdm845_kbss_ro_sel_param[cid][tid][i],
+		rc = cpr3_read_fuse_param(base, is_v1 ?
+				sdm845_v1_kbss_ro_sel_param[cid][tid][i] :
+				sdm845_v2_kbss_ro_sel_param[cid][tid][i],
 				&fuse->ro_sel[i]);
 		if (rc) {
 			cpr3_err(vreg, "Unable to read fuse-corner %d RO select fuse, rc=%d\n",
@@ -922,8 +1097,9 @@
 			return rc;
 		}
 
-		rc = cpr3_read_fuse_param(base,
-				sdm845_kbss_quot_offset_param[cid][tid][i],
+		rc = cpr3_read_fuse_param(base, is_v1 ?
+				sdm845_v1_kbss_quot_offset_param[cid][tid][i] :
+				sdm845_v2_kbss_quot_offset_param[cid][tid][i],
 				&fuse->quot_offset[i]);
 		if (rc) {
 			cpr3_err(vreg, "Unable to read fuse-corner %d quotient offset fuse, rc=%d\n",
@@ -932,8 +1108,9 @@
 		}
 	}
 
-	rc = cpr3_read_fuse_param(base,
-				sdm845_kbss_aging_init_quot_diff_param[cid],
+	rc = cpr3_read_fuse_param(base, is_v1 ?
+				sdm845_v1_kbss_aging_init_quot_diff_param[cid] :
+				sdm845_v2_kbss_aging_init_quot_diff_param[cid],
 				&fuse->aging_init_quot_diff);
 	if (rc) {
 		cpr3_err(vreg, "Unable to read aging initial quotient difference fuse, rc=%d\n",
@@ -994,11 +1171,16 @@
 		fuse_corners = MSM8998_KBSS_FUSE_CORNERS;
 		break;
 	case SDM845_V1_SOC_ID:
+		fuse_corners = vreg->thread->ctrl->ctrl_id
+					== CPRH_KBSS_POWER_CLUSTER_ID
+				? SDM845_KBSS_POWER_CLUSTER_FUSE_CORNERS
+				: SDM845_V1_KBSS_PERF_CLUSTER_FUSE_CORNERS;
+		break;
 	case SDM845_V2_SOC_ID:
 		fuse_corners = vreg->thread->ctrl->ctrl_id
 					== CPRH_KBSS_POWER_CLUSTER_ID
 				? SDM845_KBSS_POWER_CLUSTER_FUSE_CORNERS
-				: SDM845_KBSS_PERFORMANCE_CLUSTER_FUSE_CORNERS;
+				: SDM845_V2_KBSS_PERF_CLUSTER_FUSE_CORNERS;
 		break;
 	default:
 		cpr3_err(vreg, "unsupported soc id = %d\n", soc_revision);
@@ -1156,10 +1338,14 @@
 		corner_name = cprh_msm8998_kbss_fuse_corner_name;
 		break;
 	case SDM845_V1_SOC_ID:
+		tid = cprh_kbss_get_thread_id(vreg->thread);
+		ref_volt = sdm845_v1_kbss_fuse_ref_volt[id][tid];
+		corner_name = cprh_sdm845_v1_kbss_fuse_corner_name[id];
+		break;
 	case SDM845_V2_SOC_ID:
 		tid = cprh_kbss_get_thread_id(vreg->thread);
-		ref_volt = sdm845_kbss_fuse_ref_volt[id][tid];
-		corner_name = cprh_sdm845_kbss_fuse_corner_name[id];
+		ref_volt = sdm845_v2_kbss_fuse_ref_volt[id][tid];
+		corner_name = cprh_sdm845_v2_kbss_fuse_corner_name[id];
 		break;
 	default:
 		cpr3_err(vreg, "unsupported soc id = %d\n", soc_revision);
@@ -1744,8 +1930,13 @@
 			CPRH_MSM8998_KBSS_FUSE_CORNER_TURBO_L1;
 		break;
 	case SDM845_V1_SOC_ID:
+		corner_name = cprh_sdm845_v1_kbss_fuse_corner_name[
+						vreg->thread->ctrl->ctrl_id];
+		lowest_fuse_corner = 0;
+		highest_fuse_corner = vreg->fuse_corner_count - 1;
+		break;
 	case SDM845_V2_SOC_ID:
-		corner_name = cprh_sdm845_kbss_fuse_corner_name[
+		corner_name = cprh_sdm845_v2_kbss_fuse_corner_name[
 						vreg->thread->ctrl->ctrl_id];
 		lowest_fuse_corner = 0;
 		highest_fuse_corner = vreg->fuse_corner_count - 1;
diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c
index fcb3731..68ddd1f 100644
--- a/drivers/soc/qcom/scm.c
+++ b/drivers/soc/qcom/scm.c
@@ -668,10 +668,6 @@
 
 		desc->ret[0] = desc->ret[1] = desc->ret[2] = 0;
 
-		pr_debug("scm_call: func id %#llx, args: %#x, %#llx, %#llx, %#llx, %#llx\n",
-			x0, desc->arginfo, desc->args[0], desc->args[1],
-			desc->args[2], desc->x5);
-
 		trace_scm_call_start(x0, desc);
 
 		if (scm_version == SCM_ARMV8_64)
@@ -701,10 +697,8 @@
 	}  while (ret == SCM_V2_EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));
 
 	if (ret < 0)
-		pr_err("scm_call failed: func id %#llx, arginfo: %#x, args: %#llx, %#llx, %#llx, %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
-			x0, desc->arginfo, desc->args[0], desc->args[1],
-			desc->args[2], desc->x5, ret, desc->ret[0],
-			desc->ret[1], desc->ret[2]);
+		pr_err("scm_call failed: func id %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
+			x0, ret, desc->ret[0], desc->ret[1], desc->ret[2]);
 
 	if (arglen > N_REGISTER_ARGS)
 		kfree(desc->extra_arg_buf);
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index 49fd7fe..6553ac0 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -25,29 +25,12 @@
 
 DEFINE_MUTEX(secure_buffer_mutex);
 
-struct cp2_mem_chunks {
-	u32 chunk_list;
-	u32 chunk_list_size;
-	u32 chunk_size;
-} __attribute__ ((__packed__));
-
-struct cp2_lock_req {
-	struct cp2_mem_chunks chunks;
-	u32 mem_usage;
-	u32 lock;
-} __attribute__ ((__packed__));
-
-
 struct mem_prot_info {
 	phys_addr_t addr;
 	u64 size;
 };
 
 #define MEM_PROT_ASSIGN_ID		0x16
-#define MEM_PROTECT_LOCK_ID2		0x0A
-#define MEM_PROTECT_LOCK_ID2_FLAT	0x11
-#define V2_CHUNK_SIZE		SZ_1M
-#define FEATURE_ID_CP 12
 
 struct dest_vm_and_perm_info {
 	u32 vm;
@@ -59,137 +42,6 @@
 static void *qcom_secure_mem;
 #define QCOM_SECURE_MEM_SIZE (512*1024)
 
-static int secure_buffer_change_chunk(u32 chunks,
-				u32 nchunks,
-				u32 chunk_size,
-				int lock)
-{
-	struct cp2_lock_req request;
-	u32 resp;
-	int ret;
-	struct scm_desc desc = {0};
-
-	desc.args[0] = request.chunks.chunk_list = chunks;
-	desc.args[1] = request.chunks.chunk_list_size = nchunks;
-	desc.args[2] = request.chunks.chunk_size = chunk_size;
-	/* Usage is now always 0 */
-	desc.args[3] = request.mem_usage = 0;
-	desc.args[4] = request.lock = lock;
-	desc.args[5] = 0;
-	desc.arginfo = SCM_ARGS(6, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
-				SCM_VAL);
-
-	kmap_flush_unused();
-	kmap_atomic_flush_unused();
-
-	if (!is_scm_armv8()) {
-		ret = scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
-				&request, sizeof(request), &resp, sizeof(resp));
-	} else {
-		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
-				MEM_PROTECT_LOCK_ID2_FLAT), &desc);
-		resp = desc.ret[0];
-	}
-
-	return ret;
-}
-
-
-
-static int secure_buffer_change_table(struct sg_table *table, int lock)
-{
-	int i, j;
-	int ret = -EINVAL;
-	u32 *chunk_list;
-	struct scatterlist *sg;
-
-	for_each_sg(table->sgl, sg, table->nents, i) {
-		int nchunks;
-		int size = sg->length;
-		int chunk_list_len;
-		phys_addr_t chunk_list_phys;
-
-		/*
-		 * This should theoretically be a phys_addr_t but the protocol
-		 * indicates this should be a u32.
-		 */
-		u32 base;
-		u64 tmp = sg_dma_address(sg);
-
-		WARN((tmp >> 32) & 0xffffffff,
-			"%s: there are ones in the upper 32 bits of the sg at %p! They will be truncated! Address: 0x%llx\n",
-			__func__, sg, tmp);
-		if (unlikely(!size || (size % V2_CHUNK_SIZE))) {
-			WARN(1,
-				"%s: chunk %d has invalid size: 0x%x. Must be a multiple of 0x%x\n",
-				__func__, i, size, V2_CHUNK_SIZE);
-			return -EINVAL;
-		}
-
-		base = (u32)tmp;
-
-		nchunks = size / V2_CHUNK_SIZE;
-		chunk_list_len = sizeof(u32)*nchunks;
-
-		chunk_list = kzalloc(chunk_list_len, GFP_KERNEL);
-
-		if (!chunk_list)
-			return -ENOMEM;
-
-		chunk_list_phys = virt_to_phys(chunk_list);
-		for (j = 0; j < nchunks; j++)
-			chunk_list[j] = base + j * V2_CHUNK_SIZE;
-
-		/*
-		 * Flush the chunk list before sending the memory to the
-		 * secure environment to ensure the data is actually present
-		 * in RAM
-		 */
-		dmac_flush_range(chunk_list, chunk_list + chunk_list_len);
-
-		ret = secure_buffer_change_chunk(virt_to_phys(chunk_list),
-				nchunks, V2_CHUNK_SIZE, lock);
-
-		if (!ret) {
-			/*
-			 * Set or clear the private page flag to communicate the
-			 * status of the chunk to other entities
-			 */
-			if (lock)
-				SetPagePrivate(sg_page(sg));
-			else
-				ClearPagePrivate(sg_page(sg));
-		}
-
-		kfree(chunk_list);
-	}
-
-	return ret;
-}
-
-int msm_secure_table(struct sg_table *table)
-{
-	int ret;
-
-	mutex_lock(&secure_buffer_mutex);
-	ret = secure_buffer_change_table(table, 1);
-	mutex_unlock(&secure_buffer_mutex);
-
-	return ret;
-
-}
-
-int msm_unsecure_table(struct sg_table *table)
-{
-	int ret;
-
-	mutex_lock(&secure_buffer_mutex);
-	ret = secure_buffer_change_table(table, 0);
-	mutex_unlock(&secure_buffer_mutex);
-	return ret;
-
-}
-
 static struct dest_vm_and_perm_info *
 populate_dest_info(int *dest_vmids, int nelements, int *dest_perms,
 		   size_t *size_in_bytes)
@@ -426,20 +278,6 @@
 	}
 }
 
-#define MAKE_CP_VERSION(major, minor, patch) \
-	(((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
-
-bool msm_secure_v2_is_supported(void)
-{
-	int version = scm_get_feat_version(FEATURE_ID_CP);
-
-	/*
-	 * if the version is < 1.1.0 then dynamic buffer allocation is
-	 * not supported
-	 */
-	return version >= MAKE_CP_VERSION(1, 1, 0);
-}
-
 static int __init alloc_secure_shared_memory(void)
 {
 	int ret = 0;
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index ff6436f..ee8e8b6 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -249,6 +249,8 @@
 	struct ion_cma_buffer_info *info = buffer->priv_virt;
 
 	source_nelems = count_set_bits(buffer->flags & ION_FLAGS_CP_MASK);
+	if (!source_nelems)
+		return;
 	source_vm_list = kcalloc(source_nelems, sizeof(*source_vm_list),
 				 GFP_KERNEL);
 	if (!source_vm_list)
@@ -291,6 +293,10 @@
 	source_vm = VMID_HLOS;
 
 	dest_nelems = count_set_bits(flags & ION_FLAGS_CP_MASK);
+	if (!dest_nelems) {
+		ret = -EINVAL;
+		goto out;
+	}
 	dest_vm_list = kcalloc(dest_nelems, sizeof(*dest_vm_list), GFP_KERNEL);
 	if (!dest_vm_list) {
 		ret = -ENOMEM;
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index eef716b..3fd2b54 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -981,6 +981,15 @@
 		case USB_PTM_CAP_TYPE:
 			dev->bos->ptm_cap =
 				(struct usb_ptm_cap_descriptor *)buffer;
+			break;
+		case USB_CAP_TYPE_CONFIG_SUMMARY:
+			/* one such desc per configuration */
+			if (!dev->bos->num_config_summary_desc)
+				dev->bos->config_summary =
+				(struct usb_config_summary_descriptor *)buffer;
+
+			dev->bos->num_config_summary_desc++;
+			break;
 		default:
 			break;
 		}
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 358ca8d..0f10ff2 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -19,6 +19,8 @@
 
 #include <linux/usb.h>
 #include <linux/usb/hcd.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v3.h>
 #include "usb.h"
 
 static inline const char *plural(int n)
@@ -40,6 +42,34 @@
 		&& desc->bInterfaceProtocol == 1;
 }
 
+static int usb_audio_max_rev_config(struct usb_host_bos *bos)
+{
+	int desc_cnt, func_cnt, numfunc;
+	int num_cfg_desc;
+	struct usb_config_summary_descriptor *conf_summary;
+
+	if (!bos || !bos->config_summary)
+		goto done;
+
+	conf_summary = bos->config_summary;
+	num_cfg_desc = bos->num_config_summary_desc;
+
+	for (desc_cnt = 0; desc_cnt < num_cfg_desc; desc_cnt++) {
+		numfunc = conf_summary->bNumFunctions;
+		for (func_cnt = 0; func_cnt < numfunc; func_cnt++) {
+			/* honor device preferred config */
+			if (conf_summary->cs_info[func_cnt].bClass ==
+				USB_CLASS_AUDIO &&
+				conf_summary->cs_info[func_cnt].bSubClass !=
+				FULL_ADC_3_0)
+				return conf_summary->bConfigurationValue;
+		}
+	}
+
+done:
+	return -EINVAL;
+}
+
 int usb_choose_configuration(struct usb_device *udev)
 {
 	int i;
@@ -130,7 +160,6 @@
 			best = c;
 			break;
 		}
-
 		/* If all the remaining configs are vendor-specific,
 		 * choose the first one. */
 		else if (!best)
@@ -143,7 +172,10 @@
 			insufficient_power, plural(insufficient_power));
 
 	if (best) {
-		i = best->desc.bConfigurationValue;
+		/* choose usb audio class preferred config if available */
+		i = usb_audio_max_rev_config(udev->bos);
+		if (i < 0)
+			i = best->desc.bConfigurationValue;
 		dev_dbg(&udev->dev,
 			"configuration #%d chosen from %d choice%s\n",
 			i, num_configs, plural(num_configs));
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
index 51ab794..1590927 100644
--- a/drivers/usb/gadget/function/f_audio_source.c
+++ b/drivers/usb/gadget/function/f_audio_source.c
@@ -1029,8 +1029,14 @@
 	config_group_init_type_name(&fi_audio->func_inst.group, "",
 						&audio_source_func_type);
 
-	snprintf(device_name, AUDIO_SOURCE_DEV_NAME_LENGTH,
+	if (!count) {
+		snprintf(device_name, AUDIO_SOURCE_DEV_NAME_LENGTH,
+					"f_audio_source");
+		count++;
+	} else {
+		snprintf(device_name, AUDIO_SOURCE_DEV_NAME_LENGTH,
 					"f_audio_source%d", count++);
+	}
 
 	dev = create_function_device(device_name);
 
diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile
index 60da84a..5d75cc4 100644
--- a/fs/fuse/Makefile
+++ b/fs/fuse/Makefile
@@ -5,4 +5,4 @@
 obj-$(CONFIG_FUSE_FS) += fuse.o
 obj-$(CONFIG_CUSE) += cuse.o
 
-fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o
+fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o passthrough.o
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 83511cb..658fa9e 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -7,6 +7,7 @@
 */
 
 #include "fuse_i.h"
+#include "fuse_passthrough.h"
 
 #include <linux/init.h>
 #include <linux/module.h>
@@ -550,9 +551,14 @@
 	       args->out.numargs * sizeof(struct fuse_arg));
 	fuse_request_send(fc, req);
 	ret = req->out.h.error;
-	if (!ret && args->out.argvar) {
-		BUG_ON(args->out.numargs != 1);
-		ret = req->out.args[0].size;
+	if (!ret) {
+		if (args->out.argvar) {
+			WARN_ON(args->out.numargs != 1);
+			ret = req->out.args[0].size;
+		}
+
+		if (req->passthrough_filp != NULL)
+			args->out.passthrough_filp = req->passthrough_filp;
 	}
 	fuse_put_request(fc, req);
 
@@ -1890,6 +1896,9 @@
 	}
 	fuse_copy_finish(cs);
 
+	fuse_setup_passthrough(fc, req);
+
+
 	spin_lock(&fpq->lock);
 	clear_bit(FR_LOCKED, &req->flags);
 	if (!fpq->connected)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index fc8ba62..c7c3c96 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -491,6 +491,7 @@
 	args.out.args[0].value = &outentry;
 	args.out.args[1].size = sizeof(outopen);
 	args.out.args[1].value = &outopen;
+	args.out.passthrough_filp = NULL;
 	err = fuse_simple_request(fc, &args);
 	if (err)
 		goto out_free_ff;
@@ -502,6 +503,8 @@
 	ff->fh = outopen.fh;
 	ff->nodeid = outentry.nodeid;
 	ff->open_flags = outopen.open_flags;
+	if (args.out.passthrough_filp != NULL)
+		ff->passthrough_filp = args.out.passthrough_filp;
 	inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
 			  &outentry.attr, entry_attr_timeout(&outentry), 0);
 	if (!inode) {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 5ec5870..75c95659 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -7,6 +7,7 @@
 */
 
 #include "fuse_i.h"
+#include "fuse_passthrough.h"
 
 #include <linux/pagemap.h>
 #include <linux/slab.h>
@@ -21,8 +22,10 @@
 static const struct file_operations fuse_direct_io_file_operations;
 
 static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
-			  int opcode, struct fuse_open_out *outargp)
+			  int opcode, struct fuse_open_out *outargp,
+			  struct file **passthrough_filpp)
 {
+	int ret_val;
 	struct fuse_open_in inarg;
 	FUSE_ARGS(args);
 
@@ -38,8 +41,14 @@
 	args.out.numargs = 1;
 	args.out.args[0].size = sizeof(*outargp);
 	args.out.args[0].value = outargp;
+	args.out.passthrough_filp = NULL;
 
-	return fuse_simple_request(fc, &args);
+	ret_val = fuse_simple_request(fc, &args);
+
+	if (args.out.passthrough_filp != NULL)
+		*passthrough_filpp = args.out.passthrough_filp;
+
+	return ret_val;
 }
 
 struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
@@ -50,6 +59,11 @@
 	if (unlikely(!ff))
 		return NULL;
 
+	ff->passthrough_filp = NULL;
+	ff->passthrough_enabled = 0;
+	if (fc->passthrough)
+		ff->passthrough_enabled = 1;
+
 	ff->fc = fc;
 	ff->reserved_req = fuse_request_alloc(0);
 	if (unlikely(!ff->reserved_req)) {
@@ -118,6 +132,7 @@
 		 bool isdir)
 {
 	struct fuse_file *ff;
+	struct file *passthrough_filp = NULL;
 	int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
 
 	ff = fuse_file_alloc(fc);
@@ -130,11 +145,12 @@
 		struct fuse_open_out outarg;
 		int err;
 
-		err = fuse_send_open(fc, nodeid, file, opcode, &outarg);
+		err = fuse_send_open(fc, nodeid, file, opcode, &outarg,
+				     &(passthrough_filp));
 		if (!err) {
 			ff->fh = outarg.fh;
 			ff->open_flags = outarg.open_flags;
-
+			ff->passthrough_filp = passthrough_filp;
 		} else if (err != -ENOSYS || isdir) {
 			fuse_file_free(ff);
 			return err;
@@ -253,6 +269,8 @@
 	if (unlikely(!ff))
 		return;
 
+	fuse_passthrough_release(ff);
+
 	req = ff->reserved_req;
 	fuse_prepare_release(ff, file->f_flags, opcode);
 
@@ -917,8 +935,10 @@
 
 static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
+	ssize_t ret_val;
 	struct inode *inode = iocb->ki_filp->f_mapping->host;
 	struct fuse_conn *fc = get_fuse_conn(inode);
+	struct fuse_file *ff = iocb->ki_filp->private_data;
 
 	/*
 	 * In auto invalidate mode, always update attributes on read.
@@ -933,7 +953,12 @@
 			return err;
 	}
 
-	return generic_file_read_iter(iocb, to);
+	if (ff && ff->passthrough_enabled && ff->passthrough_filp)
+		ret_val = fuse_passthrough_read_iter(iocb, to);
+	else
+		ret_val = generic_file_read_iter(iocb, to);
+
+	return ret_val;
 }
 
 static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
@@ -1165,6 +1190,7 @@
 static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
 	struct file *file = iocb->ki_filp;
+	struct fuse_file *ff = file->private_data;
 	struct address_space *mapping = file->f_mapping;
 	ssize_t written = 0;
 	ssize_t written_buffered = 0;
@@ -1198,6 +1224,11 @@
 	if (err)
 		goto out;
 
+	if (ff && ff->passthrough_enabled && ff->passthrough_filp) {
+		written = fuse_passthrough_write_iter(iocb, from);
+		goto out;
+	}
+
 	if (iocb->ki_flags & IOCB_DIRECT) {
 		loff_t pos = iocb->ki_pos;
 		written = generic_file_direct_write(iocb, from);
@@ -2069,6 +2100,9 @@
 
 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
+	struct fuse_file *ff = file->private_data;
+
+	ff->passthrough_enabled = 0;
 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
 		fuse_link_write_file(file);
 
@@ -2079,6 +2113,9 @@
 
 static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
 {
+	struct fuse_file *ff = file->private_data;
+
+	ff->passthrough_enabled = 0;
 	/* Can't provide the coherency needed for MAP_SHARED */
 	if (vma->vm_flags & VM_MAYSHARE)
 		return -ENODEV;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 6b30a12..cc2c82c 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -153,6 +153,10 @@
 
 	/** Has flock been performed on this file? */
 	bool flock:1;
+
+	/* the read write file */
+	struct file *passthrough_filp;
+	bool passthrough_enabled;
 };
 
 /** One input argument of a request */
@@ -232,6 +236,7 @@
 		unsigned argvar:1;
 		unsigned numargs;
 		struct fuse_arg args[2];
+		struct file *passthrough_filp;
 	} out;
 };
 
@@ -382,6 +387,9 @@
 
 	/** Request is stolen from fuse_file->reserved_req */
 	struct file *stolen_file;
+
+	/** fuse passthrough file  */
+	struct file *passthrough_filp;
 };
 
 struct fuse_iqueue {
@@ -542,6 +550,9 @@
 	/** handle fs handles killing suid/sgid/cap on write/chown/trunc */
 	unsigned handle_killpriv:1;
 
+	/** passthrough IO. */
+	unsigned passthrough:1;
+
 	/*
 	 * The following bitfields are only for optimization purposes
 	 * and hence races in setting them will not cause malfunction
diff --git a/fs/fuse/fuse_passthrough.h b/fs/fuse/fuse_passthrough.h
new file mode 100644
index 0000000..12429ac
--- /dev/null
+++ b/fs/fuse/fuse_passthrough.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _FS_FUSE_PASSTHROUGH_H
+#define _FS_FUSE_PASSTHROUGH_H
+
+#include "fuse_i.h"
+
+#include <linux/fuse.h>
+#include <linux/file.h>
+
+void fuse_setup_passthrough(struct fuse_conn *fc, struct fuse_req *req);
+
+ssize_t fuse_passthrough_read_iter(struct kiocb *iocb, struct iov_iter *to);
+
+ssize_t fuse_passthrough_write_iter(struct kiocb *iocb, struct iov_iter *from);
+
+void fuse_passthrough_release(struct fuse_file *ff);
+
+#endif /* _FS_FUSE_PASSTHROUGH_H */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 6fe6a88..f1512c8 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -915,6 +915,12 @@
 				fc->parallel_dirops = 1;
 			if (arg->flags & FUSE_HANDLE_KILLPRIV)
 				fc->handle_killpriv = 1;
+			if (arg->flags & FUSE_PASSTHROUGH) {
+				fc->passthrough = 1;
+				/* Prevent further stacking */
+				fc->sb->s_stack_depth =
+					FILESYSTEM_MAX_STACK_DEPTH;
+			}
 			if (arg->time_gran && arg->time_gran <= 1000000000)
 				fc->sb->s_time_gran = arg->time_gran;
 			if ((arg->flags & FUSE_POSIX_ACL)) {
diff --git a/fs/fuse/passthrough.c b/fs/fuse/passthrough.c
new file mode 100644
index 0000000..c92c40b
--- /dev/null
+++ b/fs/fuse/passthrough.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "fuse_passthrough.h"
+
+#include <linux/aio.h>
+#include <linux/fs_stack.h>
+
+void fuse_setup_passthrough(struct fuse_conn *fc, struct fuse_req *req)
+{
+	int daemon_fd, fs_stack_depth;
+	unsigned int open_out_index;
+	struct file *passthrough_filp;
+	struct inode *passthrough_inode;
+	struct super_block *passthrough_sb;
+	struct fuse_open_out *open_out;
+
+	req->passthrough_filp = NULL;
+
+	if (!(fc->passthrough))
+		return;
+
+	if ((req->in.h.opcode != FUSE_OPEN) &&
+	    (req->in.h.opcode != FUSE_CREATE))
+		return;
+
+	open_out_index = req->in.numargs - 1;
+
+	WARN_ON(open_out_index != 0 && open_out_index != 1);
+	WARN_ON(req->out.args[open_out_index].size != sizeof(*open_out));
+
+	open_out = req->out.args[open_out_index].value;
+
+	daemon_fd = (int)open_out->passthrough_fd;
+	if (daemon_fd < 0)
+		return;
+
+	passthrough_filp = fget_raw(daemon_fd);
+	if (!passthrough_filp)
+		return;
+
+	passthrough_inode = file_inode(passthrough_filp);
+	passthrough_sb = passthrough_inode->i_sb;
+	fs_stack_depth = passthrough_sb->s_stack_depth + 1;
+
+	/* If we reached the stacking limit go through regular io */
+	if (fs_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
+		/* Release the passthrough file. */
+		fput(passthrough_filp);
+		pr_err("FUSE: maximum fs stacking depth exceeded, cannot use passthrough for this file\n");
+		return;
+	}
+	req->passthrough_filp = passthrough_filp;
+}
+
+
+static ssize_t fuse_passthrough_read_write_iter(struct kiocb *iocb,
+					    struct iov_iter *iter, int do_write)
+{
+	ssize_t ret_val;
+	struct fuse_file *ff;
+	struct file *fuse_file, *passthrough_filp;
+	struct inode *fuse_inode, *passthrough_inode;
+	struct fuse_conn *fc;
+
+	ff = iocb->ki_filp->private_data;
+	fuse_file = iocb->ki_filp;
+	passthrough_filp = ff->passthrough_filp;
+	fc = ff->fc;
+
+	/* lock passthrough file to prevent it from being released */
+	get_file(passthrough_filp);
+	iocb->ki_filp = passthrough_filp;
+	fuse_inode = fuse_file->f_path.dentry->d_inode;
+	passthrough_inode = file_inode(passthrough_filp);
+
+	if (do_write) {
+		if (!passthrough_filp->f_op->write_iter)
+			return -EIO;
+
+		ret_val = passthrough_filp->f_op->write_iter(iocb, iter);
+
+		if (ret_val >= 0 || ret_val == -EIOCBQUEUED) {
+			spin_lock(&fc->lock);
+			fsstack_copy_inode_size(fuse_inode, passthrough_inode);
+			spin_unlock(&fc->lock);
+			fsstack_copy_attr_times(fuse_inode, passthrough_inode);
+		}
+	} else {
+		if (!passthrough_filp->f_op->read_iter)
+			return -EIO;
+
+		ret_val = passthrough_filp->f_op->read_iter(iocb, iter);
+		if (ret_val >= 0 || ret_val == -EIOCBQUEUED)
+			fsstack_copy_attr_atime(fuse_inode, passthrough_inode);
+	}
+
+	iocb->ki_filp = fuse_file;
+
+	/* unlock passthrough file */
+	fput(passthrough_filp);
+
+	return ret_val;
+}
+
+ssize_t fuse_passthrough_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+	return fuse_passthrough_read_write_iter(iocb, to, 0);
+}
+
+ssize_t fuse_passthrough_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+	return fuse_passthrough_read_write_iter(iocb, from, 1);
+}
+
+void fuse_passthrough_release(struct fuse_file *ff)
+{
+	if (!(ff->passthrough_filp))
+		return;
+
+	/* Release the passthrough file. */
+	fput(ff->passthrough_filp);
+	ff->passthrough_filp = NULL;
+}
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 0e2743f..aa2b4e4 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1718,6 +1718,9 @@
 	WLAN_STATUS_REJECT_DSE_BAND = 96,
 	WLAN_STATUS_DENIED_WITH_SUGGESTED_BAND_AND_CHANNEL = 99,
 	WLAN_STATUS_DENIED_DUE_TO_SPECTRUM_MANAGEMENT = 103,
+	/* 802.11ai */
+	WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108,
+	WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109,
 };
 
 
@@ -2079,6 +2082,12 @@
 #define FILS_NONCE_LEN			16
 #define FILS_MAX_KEK_LEN		64
 
+#define FILS_ERP_MAX_USERNAME_LEN	16
+#define FILS_ERP_MAX_REALM_LEN		253
+#define FILS_ERP_MAX_RRK_LEN		64
+
+#define PMK_MAX_LEN			48
+
 /* Public action codes */
 enum ieee80211_pub_actioncode {
 	WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4,
@@ -2329,6 +2338,10 @@
 #define WLAN_AKM_SUITE_TDLS		SUITE(0x000FAC, 7)
 #define WLAN_AKM_SUITE_SAE		SUITE(0x000FAC, 8)
 #define WLAN_AKM_SUITE_FT_OVER_SAE	SUITE(0x000FAC, 9)
+#define WLAN_AKM_SUITE_FILS_SHA256	SUITE(0x000FAC, 14)
+#define WLAN_AKM_SUITE_FILS_SHA384	SUITE(0x000FAC, 15)
+#define WLAN_AKM_SUITE_FT_FILS_SHA256	SUITE(0x000FAC, 16)
+#define WLAN_AKM_SUITE_FT_FILS_SHA384	SUITE(0x000FAC, 17)
 
 #define WLAN_MAX_KEY_LEN		32
 
diff --git a/include/linux/ipa_odu_bridge.h b/include/linux/ipa_odu_bridge.h
index 5d30a97..e7f75b7 100644
--- a/include/linux/ipa_odu_bridge.h
+++ b/include/linux/ipa_odu_bridge.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -39,7 +39,85 @@
 	u32 ipa_desc_size;
 };
 
-#if defined CONFIG_IPA || defined CONFIG_IPA3
+/**
+ * struct ipa_bridge_init_params - parameters for IPA bridge initialization API
+ *
+ * @info: structure contains initialization information
+ * @wakeup_request: callback to client to indicate there is downlink data
+ *	available. Client is expected to call ipa_bridge_resume() to start
+ *	receiving data
+ */
+struct ipa_bridge_init_params {
+	struct odu_bridge_params info;
+	void (*wakeup_request)(void *);
+};
+
+#ifdef CONFIG_IPA3
+
+int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl);
+
+int ipa_bridge_connect(u32 hdl);
+
+int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth);
+
+int ipa_bridge_disconnect(u32 hdl);
+
+int ipa_bridge_suspend(u32 hdl);
+
+int ipa_bridge_resume(u32 hdl);
+
+int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb,
+	struct ipa_tx_meta *metadata);
+
+int ipa_bridge_cleanup(u32 hdl);
+
+#else
+
+static inline int ipa_bridge_init(struct odu_bridge_params *params, u32 *hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_connect(u32 hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_disconnect(u32 hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_suspend(u32 hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_resume(u32 hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb,
+struct ipa_tx_meta *metadata)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_cleanup(u32 hdl)
+{
+	return -EPERM;
+}
+
+#endif /* CONFIG_IPA3 */
+
+/* Below API is deprecated. Please use the API above */
+# if defined CONFIG_IPA || defined CONFIG_IPA3
 
 int odu_bridge_init(struct odu_bridge_params *params);
 
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
index 2a663c6..b01d294 100644
--- a/include/linux/libfdt_env.h
+++ b/include/linux/libfdt_env.h
@@ -1,6 +1,7 @@
 #ifndef _LIBFDT_ENV_H
 #define _LIBFDT_ENV_H
 
+#include <linux/kernel.h>
 #include <linux/string.h>
 
 #include <asm/byteorder.h>
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7627c76..779c5c4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -3882,6 +3882,7 @@
 #define SCHED_CPUFREQ_IOWAIT	(1U << 2)
 #define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3)
 #define SCHED_CPUFREQ_WALT (1U << 4)
+#define SCHED_CPUFREQ_PL	(1U << 5)
 
 #define SCHED_CPUFREQ_RT_DL	(SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
 
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 1f39661..0f9fff3 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -333,6 +333,8 @@
 	struct usb_ssp_cap_descriptor	*ssp_cap;
 	struct usb_ss_container_id_descriptor	*ss_id;
 	struct usb_ptm_cap_descriptor	*ptm_cap;
+	struct usb_config_summary_descriptor	*config_summary;
+	unsigned int	num_config_summary_desc;
 };
 
 int __usb_get_extra_descriptor(char *buffer, unsigned size,
diff --git a/include/linux/usb/audio-v3.h b/include/linux/usb/audio-v3.h
index f2322f3..b6cc17b 100644
--- a/include/linux/usb/audio-v3.h
+++ b/include/linux/usb/audio-v3.h
@@ -50,7 +50,8 @@
 #define CLUSTER_ID_MONO		0x0001
 #define CLUSTER_ID_STEREO	0x0002
 
-#define FULL_ADC_PROFILE	0x01
+/* A.2 audio function subclass codes */
+#define FULL_ADC_3_0		0x01
 
 /* BADD Profile IDs */
 #define PROF_GENERIC_IO		0x20
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index cf65cbe..b99b80a 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -39,6 +39,12 @@
 /* backport support for specifying reason for connect timeout */
 #define CFG80211_CONNECT_TIMEOUT_REASON_CODE 1
 
+/* Indicate backport support for the new connect done api */
+#define CFG80211_CONNECT_DONE 1
+
+/* Indicate backport support for FILS SK offload in cfg80211 */
+#define CFG80211_FILS_SK_OFFLOAD_SUPPORT 1
+
 /**
  * DOC: Introduction
  *
@@ -2052,6 +2058,19 @@
  *	the BSSID of the current association, i.e., to the value that is
  *	included in the Current AP address field of the Reassociation Request
  *	frame.
+ * @fils_erp_username: EAP re-authentication protocol (ERP) username part of the
+ *	NAI or %NULL if not specified. This is used to construct FILS wrapped
+ *	data IE.
+ * @fils_erp_username_len: Length of @fils_erp_username in octets.
+ * @fils_erp_realm: EAP re-authentication protocol (ERP) realm part of NAI or
+ *	%NULL if not specified. This specifies the domain name of ER server and
+ *	is used to construct FILS wrapped data IE.
+ * @fils_erp_realm_len: Length of @fils_erp_realm in octets.
+ * @fils_erp_next_seq_num: The next sequence number to use in the FILS ERP
+ *	messages. This is also used to construct FILS wrapped data IE.
+ * @fils_erp_rrk: ERP re-authentication Root Key (rRK) used to derive additional
+ *	keys in FILS or %NULL if not specified.
+ * @fils_erp_rrk_len: Length of @fils_erp_rrk in octets.
  */
 struct cfg80211_connect_params {
 	struct ieee80211_channel *channel;
@@ -2077,6 +2096,13 @@
 	bool pbss;
 	struct cfg80211_bss_selection bss_select;
 	const u8 *prev_bssid;
+	const u8 *fils_erp_username;
+	size_t fils_erp_username_len;
+	const u8 *fils_erp_realm;
+	size_t fils_erp_realm_len;
+	u16 fils_erp_next_seq_num;
+	const u8 *fils_erp_rrk;
+	size_t fils_erp_rrk_len;
 };
 
 /**
@@ -2115,12 +2141,27 @@
  * This structure is passed to the set/del_pmksa() method for PMKSA
  * caching.
  *
- * @bssid: The AP's BSSID.
- * @pmkid: The PMK material itself.
+ * @bssid: The AP's BSSID (may be %NULL).
+ * @pmkid: The identifier to refer a PMKSA.
+ * @pmk: The PMK for the PMKSA identified by @pmkid. This is used for key
+ *	derivation by a FILS STA. Otherwise, %NULL.
+ * @pmk_len: Length of the @pmk. The length of @pmk can differ depending on
+ *	the hash algorithm used to generate this.
+ * @ssid: SSID to specify the ESS within which a PMKSA is valid when using FILS
+ *	cache identifier (may be %NULL).
+ * @ssid_len: Length of the @ssid in octets.
+ * @cache_id: 2-octet cache identifier advertized by a FILS AP identifying the
+ *	scope of PMKSA. This is valid only if @ssid_len is non-zero (may be
+ *	%NULL).
  */
 struct cfg80211_pmksa {
 	const u8 *bssid;
 	const u8 *pmkid;
+	const u8 *pmk;
+	size_t pmk_len;
+	const u8 *ssid;
+	size_t ssid_len;
+	const u8 *cache_id;
 };
 
 /**
@@ -5064,6 +5105,78 @@
 #endif
 
 /**
+ * struct cfg80211_connect_resp_params - Connection response params
+ * @status: Status code, %WLAN_STATUS_SUCCESS for successful connection, use
+ *	%WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
+ *	the real status code for failures. If this call is used to report a
+ *	failure due to a timeout (e.g., not receiving an Authentication frame
+ *	from the AP) instead of an explicit rejection by the AP, -1 is used to
+ *	indicate that this is a failure, but without a status code.
+ *	@timeout_reason is used to report the reason for the timeout in that
+ *	case.
+ * @bssid: The BSSID of the AP (may be %NULL)
+ * @bss: Entry of bss to which STA got connected to, can be obtained through
+ *	cfg80211_get_bss() (may be %NULL). Only one parameter among @bssid and
+ *	@bss needs to be specified.
+ * @req_ie: Association request IEs (may be %NULL)
+ * @req_ie_len: Association request IEs length
+ * @resp_ie: Association response IEs (may be %NULL)
+ * @resp_ie_len: Association response IEs length
+ * @fils_kek: KEK derived from a successful FILS connection (may be %NULL)
+ * @fils_kek_len: Length of @fils_kek in octets
+ * @update_erp_next_seq_num: Boolean value to specify whether the value in
+ *	@fils_erp_next_seq_num is valid.
+ * @fils_erp_next_seq_num: The next sequence number to use in ERP message in
+ *	FILS Authentication. This value should be specified irrespective of the
+ *	status for a FILS connection.
+ * @pmk: A new PMK if derived from a successful FILS connection (may be %NULL).
+ * @pmk_len: Length of @pmk in octets
+ * @pmkid: A new PMKID if derived from a successful FILS connection or the PMKID
+ *	used for this FILS connection (may be %NULL).
+ * @timeout_reason: Reason for connection timeout. This is used when the
+ *	connection fails due to a timeout instead of an explicit rejection from
+ *	the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is
+ *	not known. This value is used only if @status < 0 to indicate that the
+ *	failure is due to a timeout and not due to explicit rejection by the AP.
+ *	This value is ignored in other cases (@status >= 0).
+ */
+struct cfg80211_connect_resp_params {
+	int status;
+	const u8 *bssid;
+	struct cfg80211_bss *bss;
+	const u8 *req_ie;
+	size_t req_ie_len;
+	const u8 *resp_ie;
+	size_t resp_ie_len;
+	const u8 *fils_kek;
+	size_t fils_kek_len;
+	bool update_erp_next_seq_num;
+	u16 fils_erp_next_seq_num;
+	const u8 *pmk;
+	size_t pmk_len;
+	const u8 *pmkid;
+	enum nl80211_timeout_reason timeout_reason;
+};
+
+/**
+ * cfg80211_connect_done - notify cfg80211 of connection result
+ *
+ * @dev: network device
+ * @params: connection response parameters
+ * @gfp: allocation flags
+ *
+ * It should be called by the underlying driver once execution of the connection
+ * request from connect() has been completed. This is similar to
+ * cfg80211_connect_bss(), but takes a structure pointer for connection response
+ * parameters. Only one of the functions among cfg80211_connect_bss(),
+ * cfg80211_connect_result(), cfg80211_connect_timeout(),
+ * and cfg80211_connect_done() should be called.
+ */
+void cfg80211_connect_done(struct net_device *dev,
+			   struct cfg80211_connect_resp_params *params,
+			   gfp_t gfp);
+
+/**
  * cfg80211_connect_bss - notify cfg80211 of connection result
  *
  * @dev: network device
@@ -5093,13 +5206,31 @@
  * It should be called by the underlying driver once execution of the connection
  * request from connect() has been completed. This is similar to
  * cfg80211_connect_result(), but with the option of identifying the exact bss
- * entry for the connection. Only one of these functions should be called.
+ * entry for the connection. Only one of the functions among
+ * cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
  */
-void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
-			  struct cfg80211_bss *bss, const u8 *req_ie,
-			  size_t req_ie_len, const u8 *resp_ie,
-			  size_t resp_ie_len, int status, gfp_t gfp,
-			  enum nl80211_timeout_reason timeout_reason);
+static inline void
+cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
+		     struct cfg80211_bss *bss, const u8 *req_ie,
+		     size_t req_ie_len, const u8 *resp_ie,
+		     size_t resp_ie_len, int status, gfp_t gfp,
+		     enum nl80211_timeout_reason timeout_reason)
+{
+	struct cfg80211_connect_resp_params params;
+
+	memset(&params, 0, sizeof(params));
+	params.status = status;
+	params.bssid = bssid;
+	params.bss = bss;
+	params.req_ie = req_ie;
+	params.req_ie_len = req_ie_len;
+	params.resp_ie = resp_ie;
+	params.resp_ie_len = resp_ie_len;
+	params.timeout_reason = timeout_reason;
+
+	cfg80211_connect_done(dev, &params, gfp);
+}
 
 /**
  * cfg80211_connect_result - notify cfg80211 of connection result
@@ -5118,7 +5249,8 @@
  * It should be called by the underlying driver once execution of the connection
  * request from connect() has been completed. This is similar to
  * cfg80211_connect_bss() which allows the exact bss entry to be specified. Only
- * one of these functions should be called.
+ * one of the functions among cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
  */
 static inline void
 cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
@@ -5145,7 +5277,9 @@
  * in a sequence where no explicit authentication/association rejection was
  * received from the AP. This could happen, e.g., due to not being able to send
  * out the Authentication or Association Request frame or timing out while
- * waiting for the response.
+ * waiting for the response. Only one of the functions among
+ * cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
  */
 static inline void
 cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid,
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
index 665708d..12fa374 100644
--- a/include/soc/qcom/secure_buffer.h
+++ b/include/soc/qcom/secure_buffer.h
@@ -50,8 +50,6 @@
 #define PERM_EXEC			0x1
 
 #ifdef CONFIG_QCOM_SECURE_BUFFER
-int msm_secure_table(struct sg_table *table);
-int msm_unsecure_table(struct sg_table *table);
 int hyp_assign_table(struct sg_table *table,
 			u32 *source_vm_list, int source_nelems,
 			int *dest_vmids, int *dest_perms,
@@ -59,17 +57,8 @@
 extern int hyp_assign_phys(phys_addr_t addr, u64 size,
 			u32 *source_vmlist, int source_nelems,
 			int *dest_vmids, int *dest_perms, int dest_nelems);
-bool msm_secure_v2_is_supported(void);
 const char *msm_secure_vmid_to_string(int secure_vmid);
 #else
-static inline int msm_secure_table(struct sg_table *table)
-{
-	return -EINVAL;
-}
-static inline int msm_unsecure_table(struct sg_table *table)
-{
-	return -EINVAL;
-}
 static inline int hyp_assign_table(struct sg_table *table,
 			u32 *source_vm_list, int source_nelems,
 			int *dest_vmids, int *dest_perms,
@@ -85,10 +74,6 @@
 	return -EINVAL;
 }
 
-static inline bool msm_secure_v2_is_supported(void)
-{
-	return false;
-}
 static inline const char *msm_secure_vmid_to_string(int secure_vmid)
 {
 	return "N/A";
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 0932378..e645f17 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -266,6 +266,7 @@
 #define FUSE_PARALLEL_DIROPS    (1 << 18)
 #define FUSE_HANDLE_KILLPRIV	(1 << 19)
 #define FUSE_POSIX_ACL		(1 << 20)
+#define FUSE_PASSTHROUGH	(1 << 21)
 
 /**
  * CUSE INIT request/reply flags
@@ -498,7 +499,7 @@
 struct fuse_open_out {
 	uint64_t	fh;
 	uint32_t	open_flags;
-	uint32_t	padding;
+	int32_t   passthrough_fd;
 };
 
 struct fuse_release_in {
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index b0d8b93..8c0fc7b 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -173,6 +173,42 @@
  */
 
 /**
+ * DOC: FILS shared key authentication offload
+ *
+ * FILS shared key authentication offload can be advertized by drivers by
+ * setting @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD flag. The drivers that support
+ * FILS shared key authentication offload should be able to construct the
+ * authentication and association frames for FILS shared key authentication and
+ * eventually do a key derivation as per IEEE 802.11ai. The below additional
+ * parameters should be given to driver in %NL80211_CMD_CONNECT.
+ *	%NL80211_ATTR_FILS_ERP_USERNAME - used to construct keyname_nai
+ *	%NL80211_ATTR_FILS_ERP_REALM - used to construct keyname_nai
+ *	%NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used to construct erp message
+ *	%NL80211_ATTR_FILS_ERP_RRK - used to generate the rIK and rMSK
+ * rIK should be used to generate an authentication tag on the ERP message and
+ * rMSK should be used to derive a PMKSA.
+ * rIK, rMSK should be generated and keyname_nai, sequence number should be used
+ * as specified in IETF RFC 6696.
+ *
+ * When FILS shared key authentication is completed, driver needs to provide the
+ * below additional parameters to userspace.
+ *	%NL80211_ATTR_FILS_KEK - used for key renewal
+ *	%NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used in further EAP-RP exchanges
+ *	%NL80211_ATTR_PMKID - used to identify the PMKSA used/generated
+ *	%Nl80211_ATTR_PMK - used to update PMKSA cache in userspace
+ * The PMKSA can be maintained in userspace persistently so that it can be used
+ * later after reboots or wifi turn off/on also.
+ *
+ * %NL80211_ATTR_FILS_CACHE_ID is the cache identifier advertized by a FILS
+ * capable AP supporting PMK caching. It specifies the scope within which the
+ * PMKSAs are cached in an ESS. %NL80211_CMD_SET_PMKSA and
+ * %NL80211_CMD_DEL_PMKSA are enhanced to allow support for PMKSA caching based
+ * on FILS cache identifier. Additionally %NL80211_ATTR_PMK is used with
+ * %NL80211_SET_PMKSA to specify the PMK corresponding to a PMKSA for driver to
+ * use in a FILS shared key connection with PMKSA caching.
+ */
+
+/**
  * enum nl80211_commands - supported nl80211 commands
  *
  * @NL80211_CMD_UNSPEC: unspecified command to catch errors
@@ -370,10 +406,18 @@
  * @NL80211_CMD_NEW_SURVEY_RESULTS: survey data notification (as a reply to
  *	NL80211_CMD_GET_SURVEY and on the "scan" multicast group)
  *
- * @NL80211_CMD_SET_PMKSA: Add a PMKSA cache entry, using %NL80211_ATTR_MAC
- *	(for the BSSID) and %NL80211_ATTR_PMKID.
+ * @NL80211_CMD_SET_PMKSA: Add a PMKSA cache entry using %NL80211_ATTR_MAC
+ *	(for the BSSID), %NL80211_ATTR_PMKID, and optionally %NL80211_ATTR_PMK
+ *	(PMK is used for PTKSA derivation in case of FILS shared key offload) or
+ *	using %NL80211_ATTR_SSID, %NL80211_ATTR_FILS_CACHE_ID,
+ *	%NL80211_ATTR_PMKID, and %NL80211_ATTR_PMK in case of FILS
+ *	authentication where %NL80211_ATTR_FILS_CACHE_ID is the identifier
+ *	advertized by a FILS capable AP identifying the scope of PMKSA in an
+ *	ESS.
  * @NL80211_CMD_DEL_PMKSA: Delete a PMKSA cache entry, using %NL80211_ATTR_MAC
- *	(for the BSSID) and %NL80211_ATTR_PMKID.
+ *	(for the BSSID) and %NL80211_ATTR_PMKID or using %NL80211_ATTR_SSID,
+ *	%NL80211_ATTR_FILS_CACHE_ID, and %NL80211_ATTR_PMKID in case of FILS
+ *	authentication.
  * @NL80211_CMD_FLUSH_PMKSA: Flush all PMKSA cache entries.
  *
  * @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain
@@ -1998,6 +2042,31 @@
  *	u32 attribute with an &enum nl80211_timeout_reason value. This is used,
  *	e.g., with %NL80211_CMD_CONNECT event.
  *
+ * @NL80211_ATTR_FILS_ERP_USERNAME: EAP Re-authentication Protocol (ERP)
+ *	username part of NAI used to refer keys rRK and rIK. This is used with
+ *	%NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_ERP_REALM: EAP Re-authentication Protocol (ERP) realm part
+ *	of NAI specifying the domain name of the ER server. This is used with
+ *	%NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM: Unsigned 16-bit ERP next sequence number
+ *	to use in ERP messages. This is used in generating the FILS wrapped data
+ *	for FILS authentication and is used with %NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_ERP_RRK: ERP re-authentication Root Key (rRK) for the
+ *	NAI specified by %NL80211_ATTR_FILS_ERP_USERNAME and
+ *	%NL80211_ATTR_FILS_ERP_REALM. This is used for generating rIK and rMSK
+ *	from successful FILS authentication and is used with
+ *	%NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_CACHE_ID: A 2-octet identifier advertized by a FILS AP
+ *	identifying the scope of PMKSAs. This is used with
+ *	@NL80211_CMD_SET_PMKSA and @NL80211_CMD_DEL_PMKSA.
+ *
+ * @NL80211_ATTR_PMK: PMK for the PMKSA identified by %NL80211_ATTR_PMKID.
+ *	This is used with @NL80211_CMD_SET_PMKSA.
+ *
  * @NUM_NL80211_ATTR: total number of nl80211_attrs available
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2409,6 +2478,14 @@
 
 	NL80211_ATTR_TIMEOUT_REASON,
 
+	NL80211_ATTR_FILS_ERP_USERNAME,
+	NL80211_ATTR_FILS_ERP_REALM,
+	NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM,
+	NL80211_ATTR_FILS_ERP_RRK,
+	NL80211_ATTR_FILS_CACHE_ID,
+
+	NL80211_ATTR_PMK,
+
 	/* add attributes here, update the policy in nl80211.c */
 
 	__NL80211_ATTR_AFTER_LAST,
@@ -4736,6 +4813,11 @@
  * @NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI: The driver supports sched_scan
  *	for reporting BSSs with better RSSI than the current connected BSS
  *	(%NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI).
+ * @NL80211_EXT_FEATURE_CQM_RSSI_LIST: With this driver the
+ *	%NL80211_ATTR_CQM_RSSI_THOLD attribute accepts a list of zero or more
+ *	RSSI threshold values to monitor rather than exactly one threshold.
+ * @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD: Driver SME supports FILS shared key
+ *	authentication with %NL80211_CMD_CONNECT.
  *
  * @NUM_NL80211_EXT_FEATURES: number of extended features.
  * @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -4754,6 +4836,8 @@
 	NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA,
 	NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED,
 	NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI,
+	NL80211_EXT_FEATURE_CQM_RSSI_LIST,
+	NL80211_EXT_FEATURE_FILS_SK_OFFLOAD,
 
 	/* add new features before the definition below */
 	NUM_NL80211_EXT_FEATURES,
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index a8acc24..0e5ce0d 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -1051,6 +1051,30 @@
  */
 #define USB_DT_USB_SSP_CAP_SIZE(ssac)	(16 + ssac * 4)
 
+/*
+ * Configuration Summary descriptors: Defines a list of functions in the
+ * configuration. This descriptor may be used by Host software to decide
+ * which Configuration to use to obtain the desired functionality.
+ */
+#define	USB_CAP_TYPE_CONFIG_SUMMARY	0x10
+
+struct function_class_info {
+	__u8 bClass;
+	__u8 bSubClass;
+	__u8 bProtocol;
+};
+
+struct usb_config_summary_descriptor {
+	__u8 bLength;
+	__u8 bDescriptorType;
+	__u8 bDevCapabilityType;
+	__u16 bcdVersion;
+	__u8 bConfigurationValue;
+	__u8 bMaxPower;
+	__u8 bNumFunctions;
+	struct function_class_info cs_info[];
+} __attribute__((packed));
+
 /*-------------------------------------------------------------------------*/
 
 /* USB_DT_WIRELESS_ENDPOINT_COMP:  companion descriptor associated with
diff --git a/include/uapi/media/Kbuild b/include/uapi/media/Kbuild
index 98844ac..4ded0a4 100644
--- a/include/uapi/media/Kbuild
+++ b/include/uapi/media/Kbuild
@@ -4,6 +4,7 @@
 header-y += cam_isp.h
 header-y += cam_isp_vfe.h
 header-y += cam_isp_ife.h
+header-y += cam_jpeg.h
 header-y += cam_req_mgr.h
 header-y += cam_sensor.h
 header-y += cam_sync.h
diff --git a/include/uapi/media/cam_jpeg.h b/include/uapi/media/cam_jpeg.h
new file mode 100644
index 0000000..f3082f3
--- /dev/null
+++ b/include/uapi/media/cam_jpeg.h
@@ -0,0 +1,117 @@
+#ifndef __UAPI_CAM_JPEG_H__
+#define __UAPI_CAM_JPEG_H__
+
+#include "cam_defs.h"
+
+/* enc, dma, cdm(enc/dma) are used in querycap */
+#define CAM_JPEG_DEV_TYPE_ENC      0
+#define CAM_JPEG_DEV_TYPE_DMA      1
+#define CAM_JPEG_DEV_TYPE_MAX      2
+
+#define CAM_JPEG_NUM_DEV_PER_RES_MAX      1
+
+/* definitions needed for jpeg aquire device */
+#define CAM_JPEG_RES_TYPE_ENC        0
+#define CAM_JPEG_RES_TYPE_DMA        1
+#define CAM_JPEG_RES_TYPE_MAX        2
+
+/* packet opcode types */
+#define CAM_JPEG_OPCODE_ENC_UPDATE 0
+#define CAM_JPEG_OPCODE_DMA_UPDATE 1
+
+/* ENC input port resource type */
+#define CAM_JPEG_ENC_INPUT_IMAGE                 0x0
+
+/* ENC output port resource type */
+#define CAM_JPEG_ENC_OUTPUT_IMAGE                0x1
+
+#define CAM_JPEG_ENC_IO_IMAGES_MAX               0x2
+
+/* DMA input port resource type */
+#define CAM_JPEG_DMA_INPUT_IMAGE                 0x0
+
+/* DMA output port resource type */
+#define CAM_JPEG_DMA_OUTPUT_IMAGE                0x1
+
+#define CAM_JPEG_DMA_IO_IMAGES_MAX               0x2
+
+#define CAM_JPEG_IMAGE_MAX                       0x2
+
+/**
+ * struct cam_jpeg_dev_ver - Device information for particular hw type
+ *
+ * This is used to get device version info of JPEG ENC, JPEG DMA
+ * from hardware and use this info in CAM_QUERY_CAP IOCTL
+ *
+ * @size : Size of struct passed
+ * @dev_type: Hardware type for the cap info(jpeg enc, jpeg dma)
+ * @hw_ver: Major, minor and incr values of a device version
+ */
+struct cam_jpeg_dev_ver {
+	uint32_t size;
+	uint32_t dev_type;
+	struct cam_hw_version hw_ver;
+};
+
+/**
+ * struct cam_jpeg_query_cap_cmd - JPEG query device capability payload
+ *
+ * @dev_iommu_handle: Jpeg iommu handles for secure/non secure
+ *      modes
+ * @cdm_iommu_handle: Iommu handles for secure/non secure modes
+ * @num_enc: Number of encoder
+ * @num_dma: Number of dma
+ * @dev_ver: Returned device capability array
+ */
+struct cam_jpeg_query_cap_cmd {
+	struct cam_iommu_handle dev_iommu_handle;
+	struct cam_iommu_handle cdm_iommu_handle;
+	uint32_t num_enc;
+	uint32_t num_dma;
+	struct cam_jpeg_dev_ver dev_ver[CAM_JPEG_DEV_TYPE_MAX];
+};
+
+/**
+ * struct cam_jpeg_res_info - JPEG output resource info
+ *
+ * @format: Format of the resource
+ * @width:  Width in pixels
+ * @height: Height in lines
+ * @fps:  Fps
+ */
+struct cam_jpeg_res_info {
+	uint32_t format;
+	uint32_t width;
+	uint32_t height;
+	uint32_t fps;
+};
+
+/**
+ * struct cam_jpeg_acquire_dev_info - An JPEG device info
+ *
+ * @dev_type: Device type (ENC/DMA)
+ * @reserved: Reserved Bytes
+ * @in_res: In resource info
+ * @in_res: Iut resource info
+ */
+struct cam_jpeg_acquire_dev_info {
+	uint32_t dev_type;
+	uint32_t reserved;
+	struct cam_jpeg_res_info in_res;
+	struct cam_jpeg_res_info out_res;
+};
+
+/**
+ * struct cam_jpeg_config_inout_param_info - JPEG Config time
+ *     input output params
+ *
+ * @clk_index: Input Param- clock selection index.(-1 default)
+ * @output_size: Output Param - jpeg encode/dma output size in
+ *     bytes
+ */
+struct cam_jpeg_config_inout_param_info {
+	int32_t clk_index;
+	int32_t output_size;
+};
+
+#endif /* __UAPI_CAM_JPEG_H__ */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6876d00..30fafc9 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2196,6 +2196,15 @@
 out:
 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
+	if (success && sched_predl) {
+		raw_spin_lock_irqsave(&cpu_rq(cpu)->lock, flags);
+		if (do_pl_notif(cpu_rq(cpu)))
+			cpufreq_update_util(cpu_rq(cpu),
+					    SCHED_CPUFREQ_WALT |
+					    SCHED_CPUFREQ_PL);
+		raw_spin_unlock_irqrestore(&cpu_rq(cpu)->lock, flags);
+	}
+
 	return success;
 }
 
@@ -9585,3 +9594,5 @@
 	task_rq_unlock(rq, p, &rf);
 }
 #endif /* CONFIG_SCHED_WALT */
+
+__read_mostly bool sched_predl;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4b1e496..06681f3 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -38,9 +38,38 @@
 #include <trace/events/sched.h>
 
 #ifdef CONFIG_SCHED_WALT
+
 static inline bool task_fits_max(struct task_struct *p, int cpu);
+static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p,
+					u32 new_task_load, u32 new_pred_demand);
+static void walt_fixup_nr_big_tasks(struct rq *rq, struct task_struct *p,
+				    int delta, bool inc);
+#endif /* CONFIG_SCHED_WALT */
+
+#if defined(CONFIG_SCHED_WALT) && defined(CONFIG_CFS_BANDWIDTH)
+
+static void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq);
+static void walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq,
+				  struct task_struct *p);
+static void walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq,
+				  struct task_struct *p);
+static void walt_inc_throttled_cfs_rq_stats(struct walt_sched_stats *stats,
+					    struct cfs_rq *cfs_rq);
+static void walt_dec_throttled_cfs_rq_stats(struct walt_sched_stats *stats,
+					    struct cfs_rq *cfs_rq);
+#else
+static inline void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq) {}
+static inline void
+walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) {}
+static inline void
+walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) {}
+
+#define walt_inc_throttled_cfs_rq_stats(...)
+#define walt_dec_throttled_cfs_rq_stats(...)
+
 #endif
 
+
 /*
  * Targeted preemption latency for CPU-bound tasks:
  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
@@ -4007,13 +4036,16 @@
 		if (dequeue)
 			dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
 		qcfs_rq->h_nr_running -= task_delta;
+		walt_dec_throttled_cfs_rq_stats(&qcfs_rq->walt_stats, cfs_rq);
 
 		if (qcfs_rq->load.weight)
 			dequeue = 0;
 	}
 
-	if (!se)
+	if (!se) {
 		sub_nr_running(rq, task_delta);
+		walt_dec_throttled_cfs_rq_stats(&rq->walt_stats, cfs_rq);
+	}
 
 	cfs_rq->throttled = 1;
 	cfs_rq->throttled_clock = rq_clock(rq);
@@ -4071,13 +4103,16 @@
 		if (enqueue)
 			enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
 		cfs_rq->h_nr_running += task_delta;
+		walt_inc_throttled_cfs_rq_stats(&cfs_rq->walt_stats, tcfs_rq);
 
 		if (cfs_rq_throttled(cfs_rq))
 			break;
 	}
 
-	if (!se)
+	if (!se) {
 		add_nr_running(rq, task_delta);
+		walt_inc_throttled_cfs_rq_stats(&rq->walt_stats, tcfs_rq);
+	}
 
 	/* determine whether we need to wake up potentially idle cpu */
 	if (rq->curr == rq->idle && rq->cfs.nr_running)
@@ -4419,6 +4454,7 @@
 {
 	cfs_rq->runtime_enabled = 0;
 	INIT_LIST_HEAD(&cfs_rq->throttled_list);
+	walt_init_cfs_rq_stats(cfs_rq);
 }
 
 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
@@ -4613,6 +4649,9 @@
 	int task_wakeup = flags & ENQUEUE_WAKEUP;
 #endif
 
+#ifdef CONFIG_SCHED_WALT
+	p->misfit = !task_fits_max(p, rq->cpu);
+#endif
 	/*
 	 * If in_iowait is set, the code below may not trigger any cpufreq
 	 * utilization updates, so do it here explicitly with the IOWAIT flag
@@ -4636,6 +4675,7 @@
 		if (cfs_rq_throttled(cfs_rq))
 			break;
 		cfs_rq->h_nr_running++;
+		walt_inc_cfs_rq_stats(cfs_rq, p);
 
 		flags = ENQUEUE_WAKEUP;
 	}
@@ -4643,6 +4683,7 @@
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
 		cfs_rq->h_nr_running++;
+		walt_inc_cfs_rq_stats(cfs_rq, p);
 
 		if (cfs_rq_throttled(cfs_rq))
 			break;
@@ -4653,9 +4694,6 @@
 
 	if (!se) {
 		add_nr_running(rq, 1);
-#ifdef CONFIG_SCHED_WALT
-		p->misfit = !task_fits_max(p, rq->cpu);
-#endif
 		inc_rq_walt_stats(rq, p);
 	}
 
@@ -4712,6 +4750,7 @@
 		if (cfs_rq_throttled(cfs_rq))
 			break;
 		cfs_rq->h_nr_running--;
+		walt_dec_cfs_rq_stats(cfs_rq, p);
 
 		/* Don't dequeue parent if it has other entities besides us */
 		if (cfs_rq->load.weight) {
@@ -4731,6 +4770,7 @@
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
 		cfs_rq->h_nr_running--;
+		walt_dec_cfs_rq_stats(cfs_rq, p);
 
 		if (cfs_rq_throttled(cfs_rq))
 			break;
@@ -10348,7 +10388,7 @@
 	rq->misfit_task = misfit;
 
 	if (old_misfit != misfit) {
-		walt_adjust_nr_big_tasks(rq, 1, misfit);
+		walt_fixup_nr_big_tasks(rq, curr, 1, misfit);
 		curr->misfit = misfit;
 	}
 #endif
@@ -10813,7 +10853,7 @@
 	.task_change_group	= task_change_group_fair,
 #endif
 #ifdef CONFIG_SCHED_WALT
-	.fixup_walt_sched_stats	= fixup_walt_sched_stats_common,
+	.fixup_walt_sched_stats	= walt_fixup_sched_stats_fair,
 #endif
 };
 
@@ -10865,6 +10905,161 @@
 /* WALT sched implementation begins here */
 #ifdef CONFIG_SCHED_WALT
 
+#ifdef CONFIG_CFS_BANDWIDTH
+
+static void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq)
+{
+	cfs_rq->walt_stats.nr_big_tasks = 0;
+	cfs_rq->walt_stats.cumulative_runnable_avg = 0;
+	cfs_rq->walt_stats.pred_demands_sum = 0;
+}
+
+static void walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p)
+{
+	inc_nr_big_task(&cfs_rq->walt_stats, p);
+	fixup_cumulative_runnable_avg(&cfs_rq->walt_stats, p->ravg.demand,
+				      p->ravg.pred_demand);
+}
+
+static void walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p)
+{
+	dec_nr_big_task(&cfs_rq->walt_stats, p);
+	fixup_cumulative_runnable_avg(&cfs_rq->walt_stats, -(s64)p->ravg.demand,
+				      -(s64)p->ravg.pred_demand);
+}
+
+static void walt_inc_throttled_cfs_rq_stats(struct walt_sched_stats *stats,
+					    struct cfs_rq *tcfs_rq)
+{
+	struct rq *rq = rq_of(tcfs_rq);
+
+	stats->nr_big_tasks += tcfs_rq->walt_stats.nr_big_tasks;
+	fixup_cumulative_runnable_avg(stats,
+				tcfs_rq->walt_stats.cumulative_runnable_avg,
+				tcfs_rq->walt_stats.pred_demands_sum);
+
+	if (stats == &rq->walt_stats)
+		walt_fixup_cum_window_demand(rq,
+			tcfs_rq->walt_stats.cumulative_runnable_avg);
+
+}
+
+static void walt_dec_throttled_cfs_rq_stats(struct walt_sched_stats *stats,
+					    struct cfs_rq *tcfs_rq)
+{
+	struct rq *rq = rq_of(tcfs_rq);
+
+	stats->nr_big_tasks -= tcfs_rq->walt_stats.nr_big_tasks;
+	fixup_cumulative_runnable_avg(stats,
+				-tcfs_rq->walt_stats.cumulative_runnable_avg,
+				-tcfs_rq->walt_stats.pred_demands_sum);
+
+	/*
+	 * We remove the throttled cfs_rq's tasks's contribution from the
+	 * cumulative window demand so that the same can be added
+	 * unconditionally when the cfs_rq is unthrottled.
+	 */
+	if (stats == &rq->walt_stats)
+		walt_fixup_cum_window_demand(rq,
+			-tcfs_rq->walt_stats.cumulative_runnable_avg);
+}
+
+static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p,
+				       u32 new_task_load, u32 new_pred_demand)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &p->se;
+	s64 task_load_delta = (s64)new_task_load - task_load(p);
+	s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+
+		fixup_cumulative_runnable_avg(&cfs_rq->walt_stats,
+					      task_load_delta,
+					      pred_demand_delta);
+		if (cfs_rq_throttled(cfs_rq))
+			break;
+	}
+
+	/* Fix up rq->walt_stats only if we didn't find any throttled cfs_rq */
+	if (!se) {
+		fixup_cumulative_runnable_avg(&rq->walt_stats,
+					      task_load_delta,
+					      pred_demand_delta);
+		walt_fixup_cum_window_demand(rq, task_load_delta);
+	}
+}
+
+static void walt_fixup_nr_big_tasks(struct rq *rq, struct task_struct *p,
+				    int delta, bool inc)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &p->se;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+
+		cfs_rq->walt_stats.nr_big_tasks += inc ? delta : -delta;
+		BUG_ON(cfs_rq->walt_stats.nr_big_tasks < 0);
+
+		if (cfs_rq_throttled(cfs_rq))
+			break;
+	}
+
+	/* Fix up rq->walt_stats only if we didn't find any throttled cfs_rq */
+	if (!se)
+		walt_adjust_nr_big_tasks(rq, delta, inc);
+}
+
+/*
+ * Check if task is part of a hierarchy where some cfs_rq does not have any
+ * runtime left.
+ *
+ * We can't rely on throttled_hierarchy() to do this test, as
+ * cfs_rq->throttle_count will not be updated yet when this function is called
+ * from scheduler_tick()
+ */
+static int task_will_be_throttled(struct task_struct *p)
+{
+	struct sched_entity *se = &p->se;
+	struct cfs_rq *cfs_rq;
+
+	if (!cfs_bandwidth_used())
+		return 0;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		if (!cfs_rq->runtime_enabled)
+			continue;
+		if (cfs_rq->runtime_remaining <= 0)
+			return 1;
+	}
+
+	return 0;
+}
+
+#else /* CONFIG_CFS_BANDWIDTH */
+
+static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p,
+				       u32 new_task_load, u32 new_pred_demand)
+{
+	fixup_walt_sched_stats_common(rq, p, new_task_load, new_pred_demand);
+}
+
+static void walt_fixup_nr_big_tasks(struct rq *rq, struct task_struct *p,
+				    int delta, bool inc)
+{
+	walt_adjust_nr_big_tasks(rq, delta, inc);
+}
+
+static int task_will_be_throttled(struct task_struct *p)
+{
+	return false;
+}
+
+#endif /* CONFIG_CFS_BANDWIDTH */
+
 static inline int
 kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
 {
@@ -10897,6 +11092,9 @@
 		    rq->curr->nr_cpus_allowed == 1)
 			return;
 
+		if (task_will_be_throttled(p))
+			return;
+
 		raw_spin_lock(&migration_lock);
 		rcu_read_lock();
 		new_cpu = energy_aware_wake_cpu(p, cpu, 0);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b4adb2a..4753977 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -25,6 +25,8 @@
 struct rq;
 struct cpuidle_state;
 
+extern __read_mostly bool sched_predl;
+
 #ifdef CONFIG_SCHED_WALT
 extern unsigned int sched_ravg_window;
 
@@ -510,6 +512,11 @@
 	struct task_group *tg;	/* group that "owns" this runqueue */
 
 #ifdef CONFIG_CFS_BANDWIDTH
+
+#ifdef CONFIG_SCHED_WALT
+	struct walt_sched_stats walt_stats;
+#endif
+
 	int runtime_enabled;
 	u64 runtime_expires;
 	s64 runtime_remaining;
@@ -1797,13 +1804,18 @@
 		if (walt_load) {
 			u64 nl = cpu_rq(cpu)->nt_prev_runnable_sum +
 				rq->grp_time.nt_prev_runnable_sum;
+			u64 pl = rq->walt_stats.pred_demands_sum;
 
 			nl = div64_u64(nl, sched_ravg_window >>
 						SCHED_CAPACITY_SHIFT);
+			pl = div64_u64(pl, sched_ravg_window >>
+						SCHED_CAPACITY_SHIFT);
 
 			walt_load->prev_window_util = util;
 			walt_load->nl = nl;
-			walt_load->pl = 0;
+			walt_load->pl = pl;
+			rq->old_busy_time = util;
+			rq->old_estimated_time = pl;
 			walt_load->ws = rq->window_start;
 		}
 	}
@@ -2225,6 +2237,9 @@
 	struct update_util_data *data;
 
 #ifdef CONFIG_SCHED_WALT
+	unsigned int exception_flags = SCHED_CPUFREQ_INTERCLUSTER_MIG |
+						SCHED_CPUFREQ_PL;
+
 	/*
 	 * Skip if we've already reported, but not if this is an inter-cluster
 	 * migration. Also only allow WALT update sites.
@@ -2233,7 +2248,7 @@
 		return;
 	if (!sched_disable_window_stats &&
 		(rq->load_reported_window == rq->window_start) &&
-		!(flags & SCHED_CPUFREQ_INTERCLUSTER_MIG))
+		!(flags & exception_flags))
 		return;
 	rq->load_reported_window = rq->window_start;
 #endif
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 985668b..48f3512 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -116,8 +116,6 @@
  * IMPORTANT: Initialize both copies to same value!!
  */
 
-static __read_mostly bool sched_predl;
-
 __read_mostly unsigned int sched_ravg_hist_size = 5;
 __read_mostly unsigned int sysctl_sched_ravg_hist_size = 5;
 
@@ -1279,6 +1277,33 @@
 	return delta;
 }
 
+/* Convert busy time to frequency equivalent
+ * Assumes load is scaled to 1024
+ */
+static inline unsigned int load_to_freq(struct rq *rq, u64 load)
+{
+	return mult_frac(cpu_max_possible_freq(cpu_of(rq)), load,
+			 capacity_orig_of(cpu_of(rq)));
+}
+
+bool do_pl_notif(struct rq *rq)
+{
+	u64 prev = rq->old_busy_time;
+	u64 pl = rq->walt_stats.pred_demands_sum;
+	int cpu = cpu_of(rq);
+
+	/* If already at max freq, bail out */
+	if (capacity_orig_of(cpu) == capacity_curr_of(cpu))
+		return false;
+
+	prev = max(prev, rq->old_estimated_time);
+
+	pl = div64_u64(pl, sched_ravg_window >> SCHED_CAPACITY_SHIFT);
+
+	/* 400 MHz filter. */
+	return (pl > prev) && (load_to_freq(rq, pl - prev) > 400000);
+}
+
 static void rollover_cpu_window(struct rq *rq, bool full_window)
 {
 	u64 curr_sum = rq->curr_runnable_sum;
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index d669626..535f14b 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -162,6 +162,7 @@
 extern void set_window_start(struct rq *rq);
 void account_irqtime(int cpu, struct task_struct *curr, u64 delta,
                                   u64 wallclock);
+extern bool do_pl_notif(struct rq *rq);
 
 #define SCHED_HIGH_IRQ_TIMEOUT 3
 static inline u64 sched_irqload(int cpu)
@@ -349,6 +350,7 @@
 }
 
 static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; }
+static inline bool do_pl_notif(struct rq *rq) { return false; }
 
 static inline void
 inc_rq_walt_stats(struct rq *rq, struct task_struct *p) { }
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 562fa69..997ac0b 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -259,7 +259,8 @@
 void perf_trace_del(struct perf_event *p_event, int flags)
 {
 	struct trace_event_call *tp_event = p_event->tp_event;
-	hlist_del_rcu(&p_event->hlist_entry);
+	if (!hlist_unhashed(&p_event->hlist_entry))
+		hlist_del_rcu(&p_event->hlist_entry);
 	tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
 }
 
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 2d1a6af..cf7063a 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -220,16 +220,7 @@
 	enum cfg80211_event_type type;
 
 	union {
-		struct {
-			u8 bssid[ETH_ALEN];
-			const u8 *req_ie;
-			const u8 *resp_ie;
-			size_t req_ie_len;
-			size_t resp_ie_len;
-			struct cfg80211_bss *bss;
-			int status; /* -1 = failed; 0..65535 = status code */
-			enum nl80211_timeout_reason timeout_reason;
-		} cr;
+		struct cfg80211_connect_resp_params cr;
 		struct {
 			const u8 *req_ie;
 			const u8 *resp_ie;
@@ -385,12 +376,9 @@
 		     struct cfg80211_connect_params *connect,
 		     struct cfg80211_cached_keys *connkeys,
 		     const u8 *prev_bssid);
-void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
-			       const u8 *req_ie, size_t req_ie_len,
-			       const u8 *resp_ie, size_t resp_ie_len,
-			       int status, bool wextev,
-			       struct cfg80211_bss *bss,
-			       enum nl80211_timeout_reason timeout_reason);
+void __cfg80211_connect_result(struct net_device *dev,
+			       struct cfg80211_connect_resp_params *params,
+			       bool wextev);
 void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
 			     size_t ie_len, u16 reason, bool from_ap);
 int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index f860bea..5499e9f 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -26,9 +26,16 @@
 	struct wiphy *wiphy = wdev->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
-	u8 *ie = mgmt->u.assoc_resp.variable;
-	int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
-	u16 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
+	struct cfg80211_connect_resp_params cr;
+
+	memset(&cr, 0, sizeof(cr));
+	cr.status = (int)le16_to_cpu(mgmt->u.assoc_resp.status_code);
+	cr.bssid = mgmt->bssid;
+	cr.bss = bss;
+	cr.resp_ie = mgmt->u.assoc_resp.variable;
+	cr.resp_ie_len =
+		len - offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
+	cr.timeout_reason = NL80211_TIMEOUT_UNSPECIFIED;
 
 	trace_cfg80211_send_rx_assoc(dev, bss);
 
@@ -38,7 +45,7 @@
 	 * and got a reject -- we only try again with an assoc
 	 * frame instead of reassoc.
 	 */
-	if (cfg80211_sme_rx_assoc_resp(wdev, status_code)) {
+	if (cfg80211_sme_rx_assoc_resp(wdev, cr.status)) {
 		cfg80211_unhold_bss(bss_from_pub(bss));
 		cfg80211_put_bss(wiphy, bss);
 		return;
@@ -46,10 +53,7 @@
 
 	nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL, uapsd_queues);
 	/* update current_bss etc., consumes the bss reference */
-	__cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs,
-				  status_code,
-				  status_code == WLAN_STATUS_SUCCESS, bss,
-				  NL80211_TIMEOUT_UNSPECIFIED);
+	__cfg80211_connect_result(dev, &cr, cr.status == WLAN_STATUS_SUCCESS);
 }
 EXPORT_SYMBOL(cfg80211_rx_assoc_resp);
 
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index efc95a5..01324d2 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -424,6 +424,15 @@
 		.len = sizeof(struct nl80211_bss_select_rssi_adjust)
 	},
 	[NL80211_ATTR_TIMEOUT_REASON] = { .type = NLA_U32 },
+	[NL80211_ATTR_FILS_ERP_USERNAME] = { .type = NLA_BINARY,
+					     .len = FILS_ERP_MAX_USERNAME_LEN },
+	[NL80211_ATTR_FILS_ERP_REALM] = { .type = NLA_BINARY,
+					  .len = FILS_ERP_MAX_REALM_LEN },
+	[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] = { .type = NLA_U16 },
+	[NL80211_ATTR_FILS_ERP_RRK] = { .type = NLA_BINARY,
+					.len = FILS_ERP_MAX_RRK_LEN },
+	[NL80211_ATTR_FILS_CACHE_ID] = { .len = 2 },
+	[NL80211_ATTR_PMK] = { .type = NLA_BINARY, .len = PMK_MAX_LEN },
 };
 
 /* policy for the key attributes */
@@ -3766,6 +3775,19 @@
 			return false;
 		return true;
 	case NL80211_CMD_CONNECT:
+		/* SAE not supported yet */
+		if (auth_type == NL80211_AUTHTYPE_SAE)
+			return false;
+		/* FILS with SK PFS or PK not supported yet */
+		if (auth_type == NL80211_AUTHTYPE_FILS_SK_PFS ||
+		    auth_type == NL80211_AUTHTYPE_FILS_PK)
+			return false;
+		if (!wiphy_ext_feature_isset(
+			    &rdev->wiphy,
+			    NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) &&
+		    auth_type == NL80211_AUTHTYPE_FILS_SK)
+			return false;
+		return true;
 	case NL80211_CMD_START_AP:
 		/* SAE not supported yet */
 		if (auth_type == NL80211_AUTHTYPE_SAE)
@@ -8801,6 +8823,35 @@
 		}
 	}
 
+	if (wiphy_ext_feature_isset(&rdev->wiphy,
+				    NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) &&
+	    info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] &&
+	    info->attrs[NL80211_ATTR_FILS_ERP_REALM] &&
+	    info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] &&
+	    info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
+		connect.fils_erp_username =
+			nla_data(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
+		connect.fils_erp_username_len =
+			nla_len(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
+		connect.fils_erp_realm =
+			nla_data(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
+		connect.fils_erp_realm_len =
+			nla_len(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
+		connect.fils_erp_next_seq_num =
+			nla_get_u16(
+			   info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM]);
+		connect.fils_erp_rrk =
+			nla_data(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
+		connect.fils_erp_rrk_len =
+			nla_len(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
+	} else if (info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] ||
+		   info->attrs[NL80211_ATTR_FILS_ERP_REALM] ||
+		   info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] ||
+		   info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
+		kzfree(connkeys);
+		return -EINVAL;
+	}
+
 	wdev_lock(dev->ieee80211_ptr);
 	err = cfg80211_connect(rdev, dev, &connect, connkeys,
 			       connect.prev_bssid);
@@ -8907,14 +8958,28 @@
 
 	memset(&pmksa, 0, sizeof(struct cfg80211_pmksa));
 
-	if (!info->attrs[NL80211_ATTR_MAC])
-		return -EINVAL;
-
 	if (!info->attrs[NL80211_ATTR_PMKID])
 		return -EINVAL;
 
 	pmksa.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]);
-	pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+	if (info->attrs[NL80211_ATTR_MAC]) {
+		pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
+	} else if (info->attrs[NL80211_ATTR_SSID] &&
+		   info->attrs[NL80211_ATTR_FILS_CACHE_ID] &&
+		   (info->genlhdr->cmd == NL80211_CMD_DEL_PMKSA ||
+		    info->attrs[NL80211_ATTR_PMK])) {
+		pmksa.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
+		pmksa.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
+		pmksa.cache_id =
+			nla_data(info->attrs[NL80211_ATTR_FILS_CACHE_ID]);
+	} else {
+		return -EINVAL;
+	}
+	if (info->attrs[NL80211_ATTR_PMK]) {
+		pmksa.pmk = nla_data(info->attrs[NL80211_ATTR_PMK]);
+		pmksa.pmk_len = nla_len(info->attrs[NL80211_ATTR_PMK]);
+	}
 
 	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
 	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT)
@@ -13238,17 +13303,16 @@
 }
 
 void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
-				 struct net_device *netdev, const u8 *bssid,
-				 const u8 *req_ie, size_t req_ie_len,
-				 const u8 *resp_ie, size_t resp_ie_len,
-				 int status,
-				 enum nl80211_timeout_reason timeout_reason,
+				 struct net_device *netdev,
+				 struct cfg80211_connect_resp_params *cr,
 				 gfp_t gfp)
 {
 	struct sk_buff *msg;
 	void *hdr;
 
-	msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp);
+	msg = nlmsg_new(100 + cr->req_ie_len + cr->resp_ie_len +
+			cr->fils_kek_len + cr->pmk_len +
+			(cr->pmkid ? WLAN_PMKID_LEN : 0), gfp);
 	if (!msg)
 		return;
 
@@ -13260,17 +13324,31 @@
 
 	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
 	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
-	    (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) ||
+	    (cr->bssid &&
+	     nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, cr->bssid)) ||
 	    nla_put_u16(msg, NL80211_ATTR_STATUS_CODE,
-			status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE :
-			status) ||
-	    (status < 0 &&
+			cr->status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE :
+			cr->status) ||
+	    (cr->status < 0 &&
 	     (nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) ||
-	      nla_put_u32(msg, NL80211_ATTR_TIMEOUT_REASON, timeout_reason))) ||
-	    (req_ie &&
-	     nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
-	    (resp_ie &&
-	     nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie)))
+	      nla_put_u32(msg, NL80211_ATTR_TIMEOUT_REASON,
+			  cr->timeout_reason))) ||
+	    (cr->req_ie &&
+	     nla_put(msg, NL80211_ATTR_REQ_IE, cr->req_ie_len, cr->req_ie)) ||
+	    (cr->resp_ie &&
+	     nla_put(msg, NL80211_ATTR_RESP_IE, cr->resp_ie_len,
+		     cr->resp_ie)) ||
+	    (cr->update_erp_next_seq_num &&
+	     nla_put_u16(msg, NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM,
+			 cr->fils_erp_next_seq_num)) ||
+	    (cr->status == WLAN_STATUS_SUCCESS &&
+	     ((cr->fils_kek &&
+	       nla_put(msg, NL80211_ATTR_FILS_KEK, cr->fils_kek_len,
+		       cr->fils_kek)) ||
+	      (cr->pmk &&
+	       nla_put(msg, NL80211_ATTR_PMK, cr->pmk_len, cr->pmk)) ||
+	      (cr->pmkid &&
+	       nla_put(msg, NL80211_ATTR_PMKID, WLAN_PMKID_LEN, cr->pmkid)))))
 		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index f551438..2a84d18 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -55,11 +55,8 @@
 				struct net_device *netdev,
 				const u8 *addr, gfp_t gfp);
 void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
-				 struct net_device *netdev, const u8 *bssid,
-				 const u8 *req_ie, size_t req_ie_len,
-				 const u8 *resp_ie, size_t resp_ie_len,
-				 int status,
-				 enum nl80211_timeout_reason timeout_reason,
+				 struct net_device *netdev,
+				 struct cfg80211_connect_resp_params *params,
 				 gfp_t gfp);
 void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
 			 struct net_device *netdev, const u8 *bssid,
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index a186dd8..d7e6abc 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -276,10 +276,13 @@
 		}
 		treason = NL80211_TIMEOUT_UNSPECIFIED;
 		if (cfg80211_conn_do_work(wdev, &treason)) {
-			__cfg80211_connect_result(
-					wdev->netdev, bssid,
-					NULL, 0, NULL, 0, -1, false, NULL,
-					treason);
+			struct cfg80211_connect_resp_params cr;
+
+			memset(&cr, 0, sizeof(cr));
+			cr.status = -1;
+			cr.bssid = bssid;
+			cr.timeout_reason = treason;
+			__cfg80211_connect_result(wdev->netdev, &cr, false);
 		}
 		wdev_unlock(wdev);
 	}
@@ -382,10 +385,13 @@
 		wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
 		schedule_work(&rdev->conn_work);
 	} else if (status_code != WLAN_STATUS_SUCCESS) {
-		__cfg80211_connect_result(wdev->netdev, mgmt->bssid,
-					  NULL, 0, NULL, 0,
-					  status_code, false, NULL,
-					  NL80211_TIMEOUT_UNSPECIFIED);
+		struct cfg80211_connect_resp_params cr;
+
+		memset(&cr, 0, sizeof(cr));
+		cr.status = status_code;
+		cr.bssid = mgmt->bssid;
+		cr.timeout_reason = NL80211_TIMEOUT_UNSPECIFIED;
+		__cfg80211_connect_result(wdev->netdev, &cr, false);
 	} else if (wdev->conn->state == CFG80211_CONN_AUTHENTICATING) {
 		wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT;
 		schedule_work(&rdev->conn_work);
@@ -693,12 +699,9 @@
  */
 
 /* This method must consume bss one way or another */
-void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
-			       const u8 *req_ie, size_t req_ie_len,
-			       const u8 *resp_ie, size_t resp_ie_len,
-			       int status, bool wextev,
-			       struct cfg80211_bss *bss,
-			       enum nl80211_timeout_reason timeout_reason)
+void __cfg80211_connect_result(struct net_device *dev,
+			       struct cfg80211_connect_resp_params *cr,
+			       bool wextev)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	const u8 *country_ie;
@@ -710,48 +713,48 @@
 
 	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
 		    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) {
-		cfg80211_put_bss(wdev->wiphy, bss);
+		cfg80211_put_bss(wdev->wiphy, cr->bss);
 		return;
 	}
 
-	nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev,
-				    bssid, req_ie, req_ie_len,
-				    resp_ie, resp_ie_len,
-				    status, timeout_reason, GFP_KERNEL);
+	nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev, cr,
+				    GFP_KERNEL);
 
 #ifdef CONFIG_CFG80211_WEXT
 	if (wextev) {
-		if (req_ie && status == WLAN_STATUS_SUCCESS) {
+		if (cr->req_ie && cr->status == WLAN_STATUS_SUCCESS) {
 			memset(&wrqu, 0, sizeof(wrqu));
-			wrqu.data.length = req_ie_len;
-			wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, req_ie);
+			wrqu.data.length = cr->req_ie_len;
+			wireless_send_event(dev, IWEVASSOCREQIE, &wrqu,
+					    cr->req_ie);
 		}
 
-		if (resp_ie && status == WLAN_STATUS_SUCCESS) {
+		if (cr->resp_ie && cr->status == WLAN_STATUS_SUCCESS) {
 			memset(&wrqu, 0, sizeof(wrqu));
-			wrqu.data.length = resp_ie_len;
-			wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, resp_ie);
+			wrqu.data.length = cr->resp_ie_len;
+			wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu,
+					    cr->resp_ie);
 		}
 
 		memset(&wrqu, 0, sizeof(wrqu));
 		wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-		if (bssid && status == WLAN_STATUS_SUCCESS) {
-			memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
-			memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN);
+		if (cr->bssid && cr->status == WLAN_STATUS_SUCCESS) {
+			memcpy(wrqu.ap_addr.sa_data, cr->bssid, ETH_ALEN);
+			memcpy(wdev->wext.prev_bssid, cr->bssid, ETH_ALEN);
 			wdev->wext.prev_bssid_valid = true;
 		}
 		wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
 	}
 #endif
 
-	if (!bss && (status == WLAN_STATUS_SUCCESS)) {
+	if (!cr->bss && (cr->status == WLAN_STATUS_SUCCESS)) {
 		WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect);
-		bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
-				       wdev->ssid, wdev->ssid_len,
-				       wdev->conn_bss_type,
-				       IEEE80211_PRIVACY_ANY);
-		if (bss)
-			cfg80211_hold_bss(bss_from_pub(bss));
+		cr->bss = cfg80211_get_bss(wdev->wiphy, NULL, cr->bssid,
+					   wdev->ssid, wdev->ssid_len,
+					   wdev->conn_bss_type,
+					   IEEE80211_PRIVACY_ANY);
+		if (cr->bss)
+			cfg80211_hold_bss(bss_from_pub(cr->bss));
 	}
 
 	if (wdev->current_bss) {
@@ -760,28 +763,28 @@
 		wdev->current_bss = NULL;
 	}
 
-	if (status != WLAN_STATUS_SUCCESS) {
+	if (cr->status != WLAN_STATUS_SUCCESS) {
 		kzfree(wdev->connect_keys);
 		wdev->connect_keys = NULL;
 		wdev->ssid_len = 0;
-		if (bss) {
-			cfg80211_unhold_bss(bss_from_pub(bss));
-			cfg80211_put_bss(wdev->wiphy, bss);
+		if (cr->bss) {
+			cfg80211_unhold_bss(bss_from_pub(cr->bss));
+			cfg80211_put_bss(wdev->wiphy, cr->bss);
 		}
 		cfg80211_sme_free(wdev);
 		return;
 	}
 
-	if (WARN_ON(!bss))
+	if (WARN_ON(!cr->bss))
 		return;
 
-	wdev->current_bss = bss_from_pub(bss);
+	wdev->current_bss = bss_from_pub(cr->bss);
 
 	if (!(wdev->wiphy->flags & WIPHY_FLAG_HAS_STATIC_WEP))
 		cfg80211_upload_connect_keys(wdev);
 
 	rcu_read_lock();
-	country_ie = ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY);
+	country_ie = ieee80211_bss_get_ie(cr->bss, WLAN_EID_COUNTRY);
 	if (!country_ie) {
 		rcu_read_unlock();
 		return;
@@ -798,64 +801,95 @@
 	 * - country_ie + 2, the start of the country ie data, and
 	 * - and country_ie[1] which is the IE length
 	 */
-	regulatory_hint_country_ie(wdev->wiphy, bss->channel->band,
+	regulatory_hint_country_ie(wdev->wiphy, cr->bss->channel->band,
 				   country_ie + 2, country_ie[1]);
 	kfree(country_ie);
 }
 
 /* Consumes bss object one way or another */
-void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
-			  struct cfg80211_bss *bss, const u8 *req_ie,
-			  size_t req_ie_len, const u8 *resp_ie,
-			  size_t resp_ie_len, int status, gfp_t gfp,
-			  enum nl80211_timeout_reason timeout_reason)
+void cfg80211_connect_done(struct net_device *dev,
+			   struct cfg80211_connect_resp_params *params,
+			   gfp_t gfp)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 	struct cfg80211_event *ev;
 	unsigned long flags;
+	u8 *next;
 
-	if (bss) {
+	if (params->bss) {
 		/* Make sure the bss entry provided by the driver is valid. */
-		struct cfg80211_internal_bss *ibss = bss_from_pub(bss);
+		struct cfg80211_internal_bss *ibss = bss_from_pub(params->bss);
 
 		if (WARN_ON(list_empty(&ibss->list))) {
-			cfg80211_put_bss(wdev->wiphy, bss);
+			cfg80211_put_bss(wdev->wiphy, params->bss);
 			return;
 		}
 	}
 
-	ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
+	ev = kzalloc(sizeof(*ev) + (params->bssid ? ETH_ALEN : 0) +
+		     params->req_ie_len + params->resp_ie_len +
+		     params->fils_kek_len + params->pmk_len +
+		     (params->pmkid ? WLAN_PMKID_LEN : 0), gfp);
 	if (!ev) {
-		cfg80211_put_bss(wdev->wiphy, bss);
+		cfg80211_put_bss(wdev->wiphy, params->bss);
 		return;
 	}
 
 	ev->type = EVENT_CONNECT_RESULT;
-	if (bssid)
-		memcpy(ev->cr.bssid, bssid, ETH_ALEN);
-	if (req_ie_len) {
-		ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev);
-		ev->cr.req_ie_len = req_ie_len;
-		memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len);
+	next = ((u8 *)ev) + sizeof(*ev);
+	if (params->bssid) {
+		ev->cr.bssid = next;
+		memcpy((void *)ev->cr.bssid, params->bssid, ETH_ALEN);
+		next += ETH_ALEN;
 	}
-	if (resp_ie_len) {
-		ev->cr.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len;
-		ev->cr.resp_ie_len = resp_ie_len;
-		memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len);
+	if (params->req_ie_len) {
+		ev->cr.req_ie = next;
+		ev->cr.req_ie_len = params->req_ie_len;
+		memcpy((void *)ev->cr.req_ie, params->req_ie,
+		       params->req_ie_len);
+		next += params->req_ie_len;
 	}
-	if (bss)
-		cfg80211_hold_bss(bss_from_pub(bss));
-	ev->cr.bss = bss;
-	ev->cr.status = status;
-	ev->cr.timeout_reason = timeout_reason;
+	if (params->resp_ie_len) {
+		ev->cr.resp_ie = next;
+		ev->cr.resp_ie_len = params->resp_ie_len;
+		memcpy((void *)ev->cr.resp_ie, params->resp_ie,
+		       params->resp_ie_len);
+		next += params->resp_ie_len;
+	}
+	if (params->fils_kek_len) {
+		ev->cr.fils_kek = next;
+		ev->cr.fils_kek_len = params->fils_kek_len;
+		memcpy((void *)ev->cr.fils_kek, params->fils_kek,
+		       params->fils_kek_len);
+		next += params->fils_kek_len;
+	}
+	if (params->pmk_len) {
+		ev->cr.pmk = next;
+		ev->cr.pmk_len = params->pmk_len;
+		memcpy((void *)ev->cr.pmk, params->pmk, params->pmk_len);
+		next += params->pmk_len;
+	}
+	if (params->pmkid) {
+		ev->cr.pmkid = next;
+		memcpy((void *)ev->cr.pmkid, params->pmkid, WLAN_PMKID_LEN);
+		next += WLAN_PMKID_LEN;
+	}
+	ev->cr.update_erp_next_seq_num = params->update_erp_next_seq_num;
+	if (params->update_erp_next_seq_num)
+		ev->cr.fils_erp_next_seq_num = params->fils_erp_next_seq_num;
+	if (params->bss)
+		cfg80211_hold_bss(bss_from_pub(params->bss));
+	ev->cr.bss = params->bss;
+	ev->cr.status = params->status;
+	ev->cr.timeout_reason = params->timeout_reason;
 
 	spin_lock_irqsave(&wdev->event_lock, flags);
 	list_add_tail(&ev->list, &wdev->event_list);
 	spin_unlock_irqrestore(&wdev->event_lock, flags);
 	queue_work(cfg80211_wq, &rdev->event_work);
 }
-EXPORT_SYMBOL(cfg80211_connect_bss);
+EXPORT_SYMBOL(cfg80211_connect_done);
 
 /* Consumes bss object one way or another */
 void __cfg80211_roamed(struct wireless_dev *wdev,
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 8a313c6..8ac413f 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -936,7 +936,6 @@
 {
 	struct cfg80211_event *ev;
 	unsigned long flags;
-	const u8 *bssid = NULL;
 
 	spin_lock_irqsave(&wdev->event_lock, flags);
 	while (!list_empty(&wdev->event_list)) {
@@ -948,15 +947,10 @@
 		wdev_lock(wdev);
 		switch (ev->type) {
 		case EVENT_CONNECT_RESULT:
-			if (!is_zero_ether_addr(ev->cr.bssid))
-				bssid = ev->cr.bssid;
 			__cfg80211_connect_result(
-				wdev->netdev, bssid,
-				ev->cr.req_ie, ev->cr.req_ie_len,
-				ev->cr.resp_ie, ev->cr.resp_ie_len,
-				ev->cr.status,
-				ev->cr.status == WLAN_STATUS_SUCCESS,
-				ev->cr.bss, ev->cr.timeout_reason);
+				wdev->netdev,
+				&ev->cr,
+				ev->cr.status == WLAN_STATUS_SUCCESS);
 			break;
 		case EVENT_ROAMED:
 			__cfg80211_roamed(wdev, ev->rm.bss, ev->rm.req_ie,
diff --git a/scripts/dtc/libfdt/fdt.c b/scripts/dtc/libfdt/fdt.c
index 22286a1..14fb793 100644
--- a/scripts/dtc/libfdt/fdt.c
+++ b/scripts/dtc/libfdt/fdt.c
@@ -71,6 +71,20 @@
 		return -FDT_ERR_BADMAGIC;
 	}
 
+	if (fdt_off_dt_struct(fdt) > (UINT_MAX - fdt_size_dt_struct(fdt)))
+		return FDT_ERR_BADOFFSET;
+
+	if (fdt_off_dt_strings(fdt) > (UINT_MAX -  fdt_size_dt_strings(fdt)))
+		return FDT_ERR_BADOFFSET;
+
+	if ((fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt))
+	    > fdt_totalsize(fdt))
+		return FDT_ERR_BADOFFSET;
+
+	if ((fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt))
+	    > fdt_totalsize(fdt))
+		return FDT_ERR_BADOFFSET;
+
 	return 0;
 }
 
diff --git a/scripts/dtc/libfdt/fdt_rw.c b/scripts/dtc/libfdt/fdt_rw.c
index 8be02b1..468e169 100644
--- a/scripts/dtc/libfdt/fdt_rw.c
+++ b/scripts/dtc/libfdt/fdt_rw.c
@@ -396,7 +396,7 @@
 static void _fdt_packblocks(const char *old, char *new,
 			    int mem_rsv_size, int struct_size)
 {
-	int mem_rsv_off, struct_off, strings_off;
+	uint32_t mem_rsv_off, struct_off, strings_off;
 
 	mem_rsv_off = FDT_ALIGN(sizeof(struct fdt_header), 8);
 	struct_off = mem_rsv_off + mem_rsv_size;
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index a472bf2..01c67be 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -1159,7 +1159,7 @@
 
 config SND_SOC_MSM_HDMI_CODEC_RX
 	bool "HDMI Audio Playback"
-	depends on FB_MSM_MDSS_HDMI_PANEL && (SND_SOC_APQ8084 || SND_SOC_MSM8994 || SND_SOC_MSM8996 || SND_SOC_MSM8998 || SND_SOC_SDM660_COMMON)
+	depends on FB_MSM_MDSS_HDMI_PANEL && (SND_SOC_APQ8084 || SND_SOC_MSM8994 || SND_SOC_MSM8996 || SND_SOC_MSM8998 || SND_SOC_660)
 	help
 	HDMI audio drivers should be built only if the platform
         supports hdmi panel.
diff --git a/sound/soc/msm/Makefile b/sound/soc/msm/Makefile
index caf8843..312bb45 100644
--- a/sound/soc/msm/Makefile
+++ b/sound/soc/msm/Makefile
@@ -22,7 +22,7 @@
 
 # for SDM660 sound card driver
 snd-soc-sdm660-common-objs := sdm660-common.o
-obj-$(CONFIG_SND_SOC_SDM660_COMMON) += snd-soc-sdm660-common.o
+obj-$(CONFIG_SND_SOC_660) += snd-soc-sdm660-common.o
 
 # for SDM660 sound card driver
 snd-soc-int-codec-objs := sdm660-internal.o
diff --git a/sound/soc/msm/sdm660-common.c b/sound/soc/msm/sdm660-common.c
index b34b04b..43df772 100644
--- a/sound/soc/msm/sdm660-common.c
+++ b/sound/soc/msm/sdm660-common.c
@@ -44,6 +44,8 @@
 	EXT_DISP_RX_IDX_MAX,
 };
 
+bool codec_reg_done;
+
 /* TDM default config */
 static struct dev_config tdm_rx_cfg[TDM_INTERFACE_MAX][TDM_PORT_MAX] = {
 	{ /* PRI TDM */
@@ -2016,6 +2018,12 @@
 }
 EXPORT_SYMBOL(msm_common_snd_controls_size);
 
+void msm_set_codec_reg_done(bool done)
+{
+	codec_reg_done = done;
+}
+EXPORT_SYMBOL(msm_set_codec_reg_done);
+
 static inline int param_is_mask(int p)
 {
 	return (p >= SNDRV_PCM_HW_PARAM_FIRST_MASK) &&
@@ -3027,6 +3035,12 @@
 	  .data = "tasha_codec"},
 	{ .compatible = "qcom,sdm660-asoc-snd-tavil",
 	  .data = "tavil_codec"},
+	{ .compatible = "qcom,sdm670-asoc-snd",
+	  .data = "internal_codec"},
+	{ .compatible = "qcom,sdm670-asoc-snd-tasha",
+	  .data = "tasha_codec"},
+	{ .compatible = "qcom,sdm670-asoc-snd-tavil",
+	  .data = "tavil_codec"},
 	{},
 };
 
@@ -3044,6 +3058,7 @@
 	if (!pdata)
 		return -ENOMEM;
 
+	msm_set_codec_reg_done(false);
 	match = of_match_node(sdm660_asoc_machine_of_match,
 			      pdev->dev.of_node);
 	if (!match)
diff --git a/sound/soc/msm/sdm660-common.h b/sound/soc/msm/sdm660-common.h
index bca8cd7..ffe77bc 100644
--- a/sound/soc/msm/sdm660-common.h
+++ b/sound/soc/msm/sdm660-common.h
@@ -122,4 +122,5 @@
 int msm_mi2s_snd_startup(struct snd_pcm_substream *substream);
 void msm_mi2s_snd_shutdown(struct snd_pcm_substream *substream);
 int msm_common_snd_controls_size(void);
+void msm_set_codec_reg_done(bool done);
 #endif
diff --git a/sound/soc/msm/sdm660-ext-dai-links.c b/sound/soc/msm/sdm660-ext-dai-links.c
index 6ff29c9..34a6626 100644
--- a/sound/soc/msm/sdm660-ext-dai-links.c
+++ b/sound/soc/msm/sdm660-ext-dai-links.c
@@ -29,8 +29,15 @@
 #define WCN_CDC_SLIM_RX_CH_MAX 2
 #define WCN_CDC_SLIM_TX_CH_MAX 3
 
-static struct snd_soc_card snd_soc_card_msm_card_tavil;
-static struct snd_soc_card snd_soc_card_msm_card_tasha;
+static struct snd_soc_card snd_soc_card_msm_card_tavil = {
+	.name = "sdm670-tavil-snd-card",
+	.late_probe = msm_snd_card_tavil_late_probe,
+};
+
+static struct snd_soc_card snd_soc_card_msm_card_tasha = {
+	.name = "sdm670-tasha-snd-card",
+	.late_probe = msm_snd_card_tasha_late_probe,
+};
 
 static struct snd_soc_ops msm_ext_slimbus_be_ops = {
 	.hw_params = msm_snd_hw_params,
diff --git a/sound/soc/msm/sdm660-external.c b/sound/soc/msm/sdm660-external.c
index 426c150..4224289 100644
--- a/sound/soc/msm/sdm660-external.c
+++ b/sound/soc/msm/sdm660-external.c
@@ -55,7 +55,6 @@
 
 static int msm_ext_spk_control = 1;
 static struct wcd_mbhc_config *wcd_mbhc_cfg_ptr;
-bool codec_reg_done;
 
 struct msm_asoc_wcd93xx_codec {
 	void* (*get_afe_config_fn)(struct snd_soc_codec *codec,
@@ -603,23 +602,23 @@
 
 static void *def_ext_mbhc_cal(void)
 {
-	void *tavil_wcd_cal;
+	void *wcd_mbhc_cal;
 	struct wcd_mbhc_btn_detect_cfg *btn_cfg;
 	u16 *btn_high;
 
-	tavil_wcd_cal = kzalloc(WCD_MBHC_CAL_SIZE(WCD_MBHC_DEF_BUTTONS,
+	wcd_mbhc_cal = kzalloc(WCD_MBHC_CAL_SIZE(WCD_MBHC_DEF_BUTTONS,
 				WCD9XXX_MBHC_DEF_RLOADS), GFP_KERNEL);
-	if (!tavil_wcd_cal)
+	if (!wcd_mbhc_cal)
 		return NULL;
 
-#define S(X, Y) ((WCD_MBHC_CAL_PLUG_TYPE_PTR(tavil_wcd_cal)->X) = (Y))
+#define S(X, Y) ((WCD_MBHC_CAL_PLUG_TYPE_PTR(wcd_mbhc_cal)->X) = (Y))
 	S(v_hs_max, 1600);
 #undef S
-#define S(X, Y) ((WCD_MBHC_CAL_BTN_DET_PTR(tavil_wcd_cal)->X) = (Y))
+#define S(X, Y) ((WCD_MBHC_CAL_BTN_DET_PTR(wcd_mbhc_cal)->X) = (Y))
 	S(num_btn, WCD_MBHC_DEF_BUTTONS);
 #undef S
 
-	btn_cfg = WCD_MBHC_CAL_BTN_DET_PTR(tavil_wcd_cal);
+	btn_cfg = WCD_MBHC_CAL_BTN_DET_PTR(wcd_mbhc_cal);
 	btn_high = ((void *)&btn_cfg->_v_btn_low) +
 		(sizeof(btn_cfg->_v_btn_low[0]) * btn_cfg->num_btn);
 
@@ -632,7 +631,7 @@
 	btn_high[6] = 500;
 	btn_high[7] = 500;
 
-	return tavil_wcd_cal;
+	return wcd_mbhc_cal;
 }
 
 static inline int param_is_mask(int p)
@@ -1478,6 +1477,79 @@
 	{"MIC BIAS4", NULL, "MCLK"},
 };
 
+int msm_snd_card_tasha_late_probe(struct snd_soc_card *card)
+{
+	const char *be_dl_name = LPASS_BE_SLIMBUS_0_RX;
+	struct snd_soc_pcm_runtime *rtd;
+	int ret = 0;
+	void *mbhc_calibration;
+
+	rtd = snd_soc_get_pcm_runtime(card, be_dl_name);
+	if (!rtd) {
+		dev_err(card->dev,
+			"%s: snd_soc_get_pcm_runtime for %s failed!\n",
+			__func__, be_dl_name);
+		ret = -EINVAL;
+		goto err_pcm_runtime;
+	}
+
+	mbhc_calibration = def_ext_mbhc_cal();
+	if (!mbhc_calibration) {
+		ret = -ENOMEM;
+		goto err_mbhc_cal;
+	}
+	wcd_mbhc_cfg_ptr->calibration = mbhc_calibration;
+	ret = tasha_mbhc_hs_detect(rtd->codec, wcd_mbhc_cfg_ptr);
+	if (ret) {
+		dev_err(card->dev, "%s: mbhc hs detect failed, err:%d\n",
+			__func__, ret);
+		goto err_hs_detect;
+	}
+	return 0;
+
+err_hs_detect:
+	kfree(mbhc_calibration);
+err_mbhc_cal:
+err_pcm_runtime:
+	return ret;
+}
+
+int msm_snd_card_tavil_late_probe(struct snd_soc_card *card)
+{
+	const char *be_dl_name = LPASS_BE_SLIMBUS_0_RX;
+	struct snd_soc_pcm_runtime *rtd;
+	int ret = 0;
+	void *mbhc_calibration;
+
+	rtd = snd_soc_get_pcm_runtime(card, be_dl_name);
+	if (!rtd) {
+		dev_err(card->dev,
+			"%s: snd_soc_get_pcm_runtime for %s failed!\n",
+			__func__, be_dl_name);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	mbhc_calibration = def_ext_mbhc_cal();
+	if (!mbhc_calibration) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	wcd_mbhc_cfg_ptr->calibration = mbhc_calibration;
+	ret = tavil_mbhc_hs_detect(rtd->codec, wcd_mbhc_cfg_ptr);
+	if (ret) {
+		dev_err(card->dev, "%s: mbhc hs detect failed, err:%d\n",
+			__func__, ret);
+		goto err_free_mbhc_cal;
+	}
+	return 0;
+
+err_free_mbhc_cal:
+	kfree(mbhc_calibration);
+err:
+	return ret;
+}
+
 /**
  * msm_audrx_init - Audio init function of sound card instantiate.
  *
@@ -1698,7 +1770,6 @@
 		if (!entry) {
 			pr_debug("%s: Cannot create codecs module entry\n",
 				 __func__);
-			pdata->codec_root = NULL;
 			goto done;
 		}
 		pdata->codec_root = entry;
@@ -1721,50 +1792,17 @@
 		if (!entry) {
 			pr_debug("%s: Cannot create codecs module entry\n",
 				 __func__);
-			ret = 0;
-			goto err_snd_module;
+			goto done;
 		}
 		pdata->codec_root = entry;
 		tasha_codec_info_create_codec_entry(pdata->codec_root, codec);
 		tasha_mbhc_zdet_gpio_ctrl(msm_config_hph_en0_gpio, rtd->codec);
 	}
-
-	wcd_mbhc_cfg_ptr->calibration = def_ext_mbhc_cal();
-	if (!strcmp(dev_name(codec_dai->dev), "tavil_codec")) {
-		if (wcd_mbhc_cfg_ptr->calibration) {
-			pdata->codec = codec;
-			ret = tavil_mbhc_hs_detect(codec, wcd_mbhc_cfg_ptr);
-			if (ret < 0)
-				pr_err("%s: Failed to intialise mbhc %d\n",
-						__func__, ret);
-		} else {
-			pr_err("%s: wcd_mbhc_cfg calibration is NULL\n",
-					__func__);
-			ret = -ENOMEM;
-			goto err_mbhc_cal;
-		}
-	} else {
-		if (wcd_mbhc_cfg_ptr->calibration) {
-			pdata->codec = codec;
-			ret = tasha_mbhc_hs_detect(codec, wcd_mbhc_cfg_ptr);
-			if (ret < 0)
-				pr_err("%s: Failed to intialise mbhc %d\n",
-						__func__, ret);
-		} else {
-			pr_err("%s: wcd_mbhc_cfg calibration is NULL\n",
-					__func__);
-			ret = -ENOMEM;
-			goto err_mbhc_cal;
-		}
-
-	}
-	codec_reg_done = true;
 done:
+	msm_set_codec_reg_done(true);
 	return 0;
 
-err_snd_module:
 err_afe_cfg:
-err_mbhc_cal:
 	return ret;
 }
 EXPORT_SYMBOL(msm_audrx_init);
diff --git a/sound/soc/msm/sdm660-external.h b/sound/soc/msm/sdm660-external.h
index acf5735..d53e7c7 100644
--- a/sound/soc/msm/sdm660-external.h
+++ b/sound/soc/msm/sdm660-external.h
@@ -30,6 +30,8 @@
 						int snd_card_val);
 int msm_ext_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
 			       struct snd_pcm_hw_params *params);
+int msm_snd_card_tavil_late_probe(struct snd_soc_card *card);
+int msm_snd_card_tasha_late_probe(struct snd_soc_card *card);
 #ifdef CONFIG_SND_SOC_EXT_CODEC
 int msm_ext_cdc_init(struct platform_device *, struct msm_asoc_mach_data *,
 		     struct snd_soc_card **, struct wcd_mbhc_config *);
diff --git a/sound/soc/msm/sdm660-internal.c b/sound/soc/msm/sdm660-internal.c
index 4b9334b..1402154 100644
--- a/sound/soc/msm/sdm660-internal.c
+++ b/sound/soc/msm/sdm660-internal.c
@@ -1314,6 +1314,7 @@
 	msm_dig_codec_info_create_codec_entry(codec_root, dig_cdc);
 	msm_anlg_codec_info_create_codec_entry(codec_root, ana_cdc);
 done:
+	msm_set_codec_reg_done(true);
 	return 0;
 }
 
diff --git a/sound/usb/card.c b/sound/usb/card.c
index eaf18aa..a87a526 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -619,7 +619,7 @@
 	assoc = intf->intf_assoc;
 	if (assoc && assoc->bFunctionClass == USB_CLASS_AUDIO &&
 	    assoc->bFunctionProtocol == UAC_VERSION_3 &&
-	    assoc->bFunctionSubClass == FULL_ADC_PROFILE) {
+	    assoc->bFunctionSubClass == FULL_ADC_3_0) {
 		dev_info(&dev->dev, "No support for full-fledged ADC 3.0 yet!!\n");
 		return -EINVAL;
 	}