Merge "msm: vidc: Fix use-after-free memory failure"
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
index c8077cb..051b315 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
@@ -20,6 +20,8 @@
   - qcom,ce-hw-key : optional, indicates if the hardware supports use of HW KEY.
   - qcom,support-core-clk-only : optional, indicates if the HW supports single crypto core clk.
   - qcom,bsm-ee : optional, indicate the BAM EE value, changes from target to target. Default value is 1 if not specified.
+  - qcom,smmu-s1-bypass : Boolean flag to bypass SMMU stage 1 translation.
+  - iommus : A list of phandle and IOMMU specifier pairs that describe the IOMMU master interfaces of the device.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
index 3c8a79a..fa27198 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
@@ -40,6 +40,9 @@
 	required. For other targets such as fsm, they do not perform
 	bus scaling. It is not required for those targets.
 
+  - qcom,smmu-s1-bypass : Boolean flag to bypass SMMU stage 1 translation.
+  - iommus : A list of phandle and IOMMU specifier pairs that describe the IOMMU master interfaces of the device.
+
 Example:
 
         qcom,qcrypto@fd444000 {
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index cbe8378..32c31af 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -401,6 +401,24 @@
 					String that specifies the ctrl state for reading the panel status.
 					"dsi_lp_mode" = DSI low power mode
 					"dsi_hs_mode" = DSI high speed mode
+- qcom,mdss-dsi-lp1-command:		An optional byte stream to request low
+					power mode on a panel
+- qcom,mdss-dsi-lp1-command-mode:	String that specifies the ctrl state for
+					setting the panel power mode.
+					"dsi_lp_mode" = DSI low power mode
+					"dsi_hs_mode" = DSI high speed mode
+- qcom,mdss-dsi-lp2-command:		An optional byte stream to request ultra
+					low power mode on a panel
+- qcom,mdss-dsi-lp2-command-mode:	String that specifies the ctrl state for
+					setting the panel power mode.
+					"dsi_lp_mode" = DSI low power mode
+					"dsi_hs_mode" = DSI high speed mode
+- qcom,mdss-dsi-nolp-command:		An optional byte stream to disable low
+					power and ultra low power panel modes
+- qcom,mdss-dsi-nolp-command-mode:	String that specifies the ctrl state for
+					setting the panel power mode.
+					"dsi_lp_mode" = DSI low power mode
+					"dsi_hs_mode" = DSI high speed mode
 - qcom,mdss-dsi-panel-status-check-mode:Specifies the panel status check method for ESD recovery.
 					"bta_check" = Uses BTA to check the panel status
 					"reg_read" = Reads panel status register to check the panel status
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt b/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt
new file mode 100644
index 0000000..933ad85
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt
@@ -0,0 +1,438 @@
+* Qualcomm Technologies, Inc. MSM EEPROM
+
+EEPROM is an one time programmed(OTP) device that stores the calibration data
+use for camera sensor. It may either be integrated in the sensor module or in
+the sensor itself. As a result, the power, clock and GPIOs may be the same as
+the camera sensor. The following describes the page block map, power supply,
+clock, GPIO and power on sequence properties of the EEPROM device.
+
+=======================================================
+Required Node Structure if probe happens from userspace
+=======================================================
+The EEPROM device is described in one level of the device node.
+
+======================================
+First Level Node - CAM EEPROM device
+======================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,eeprom".
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the hardware index id.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for EEPROM HW.
+
+- xxxx-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed in
+		"regulator-names".
+
+- rgltr-cntrl-support
+  Usage: required
+  Value type: <bool>
+  Definition: This property specifies if the regulator control is supported
+		e.g. rgltr-min-voltage.
+
+- rgltr-min-voltage
+  Usage: required
+  Value type: <u32>
+  Definition: should contain minimum voltage level for regulators
+		mentioned in regulator-names property.
+
+- rgltr-max-voltage
+  Usage: required
+  Value type: <u32>
+  Definition: should contain maximum voltage level for regulators
+		mentioned in regulator-names property.
+
+- rgltr-load-current
+  Usage: required
+  Value type: <u32>
+  Definition: should contain the maximum current in microamps required for
+		the regulators mentioned in regulator-names property.
+
+- gpio-no-mux
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the gpio mux type.
+
+- gpios
+  Usage: required
+  Value type: <phandle>
+  Definition: should specify the gpios to be used for the eeprom.
+
+- gpio-reset
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the reset gpio index.
+
+- gpio-standby
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the standby gpio index.
+
+- gpio-req-tbl-num
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the gpio table index.
+
+- gpio-req-tbl-flags
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the gpio functions.
+
+- gpio-req-tbl-label
+  Usage: required
+  Value type: <string>
+  Definition: should specify the gpio labels.
+
+- sensor-position
+  Usage: required
+  Value type: <u32>
+  Definition: should contain the mount angle of the camera sensor.
+
+- cci-master
+  Usage: required
+  Value type: <u32>
+  Definition: should contain i2c master id to be used for this camera
+		sensor.
+
+- sensor-mode
+  Usage: required
+  Value type: <u32>
+  Definition: should contain sensor mode supported.
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for EEPROM HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for EEPROM HW.
+
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition: says what all different clock levels eeprom node has.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+Example:
+
+	eprom0: qcom,eeprom@0 {
+	        cell-index = <0>;
+		reg = <0x0>;
+		compatible = "qcom,eeprom";
+		cam_vdig-supply = <&pm8998_l5>;
+		cam_vio-supply = <&pm8998_lvs1>;
+		regulator-names = "cam_vdig", "cam_vio";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1200000 0>;
+		rgltr-max-voltage = <1200000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		gpios = <&msmgpio 26 0>,
+			<&msmgpio 37 0>,
+			<&msmgpio 36 0>;
+		gpio-reset = <1>;
+		gpio-standby = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK",
+			"CAM_RESET1",
+			"CAM_STANDBY";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+=======================================================
+Required Node Structure if probe happens from kernel
+=======================================================
+The EEPROM device is described in one level of the device node.
+
+======================================
+First Level Node - CAM EEPROM device
+======================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,eeprom".
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the hardware index id.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- qcom,eeprom-name
+  Usage: required
+  Value type: <string>
+  Definition: Name of the EEPROM HW.
+
+- qcom,slave-addr
+  Usage: required
+  Value type: <u32>
+  Definition: Slave address of the EEPROM HW.
+
+- qcom,num-blocks
+  Usage: required
+  Value type: <u32>
+  Definition: Total block number that eeprom contains.
+
+- qcom,pageX
+  Usage: required
+  Value type: <u32>
+  Definition: List of values specifying page size, start address,
+		address type, data, data type, delay in ms.
+		size 0 stand for non-paged.
+
+- qcom,pollX
+  Usage: required
+  Value type: <u32>
+  Definition: List of values specifying poll size, poll reg address,
+		address type, data, data type, delay in ms.
+		size 0 stand for not used.
+
+- qcom,memX
+  Usage: required
+  Value type: <u32>
+  Definition: List of values specifying memory size, start address,
+		address type, data, data type, delay in ms.
+		size 0 stand for not used.
+
+- qcom,saddrX
+  Usage: required
+  Value type: <u32>
+  Definition: property should specify the slave address for block (%d).
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for EEPROM HW.
+
+- qcom,cmm-data-support
+  Usage: required
+  Value type: <u32>
+  Definition: Camera MultiModule data capability flag..
+
+- qcom,cmm-data-compressed
+  Usage: required
+  Value type: <u32>
+  Definition: Camera MultiModule data compression flag.
+
+- qcom,cmm-data-offset
+  Usage: required
+  Value type: <u32>
+  Definition: Camera MultiModule data start offset.
+
+- qcom,cmm-data-size
+  Usage: required
+  Value type: <u32>
+  Definition: Camera MultiModule data size.
+
+- qcom,cam-power-seq-type
+  Usage: required
+  Value type: <string>
+  Definition: should specify the power on sequence types.
+
+- qcom,cam-power-seq-val
+  Usage: required
+  Value type: <string>
+  Definition: should specify the power on sequence values.
+
+- qcom,cam-power-seq-cfg-val
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the power on sequence config values.
+
+- qcom,cam-power-seq-delay
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the power on sequence delay time in ms.
+
+- xxxx-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed in
+		"regulator-names".
+
+- rgltr-cntrl-support
+  Usage: required
+  Value type: <bool>
+  Definition: This property specifies if the regulator control is supported
+		e.g. rgltr-min-voltage.
+
+- rgltr-min-voltage
+  Usage: required
+  Value type: <u32>
+  Definition: should contain minimum voltage level for regulators
+		mentioned in regulator-names property.
+
+- rgltr-max-voltage
+  Usage: required
+  Value type: <u32>
+  Definition: should contain maximum voltage level for regulators
+		mentioned in regulator-names property.
+
+- rgltr-load-current
+  Usage: required
+  Value type: <u32>
+  Definition: should contain the maximum current in microamps required for
+		the regulators mentioned in regulator-names property.
+
+- gpio-no-mux
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the gpio mux type.
+
+- gpios
+  Usage: required
+  Value type: <phandle>
+  Definition: should specify the gpios to be used for the eeprom.
+
+- gpio-reset
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the reset gpio index.
+
+- gpio-standby
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the standby gpio index.
+
+- gpio-req-tbl-num
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the gpio table index.
+
+- gpio-req-tbl-flags
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the gpio functions.
+
+- gpio-req-tbl-label
+  Usage: required
+  Value type: <string>
+  Definition: should specify the gpio labels.
+
+- sensor-position
+  Usage: required
+  Value type: <u32>
+  Definition: should contain the mount angle of the camera sensor.
+
+- cci-master
+  Usage: required
+  Value type: <u32>
+  Definition: should contain i2c master id to be used for this camera
+		sensor.
+
+- sensor-mode
+  Usage: required
+  Value type: <u32>
+  Definition: should contain sensor mode supported.
+
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition:  says what all different clock levels eeprom node has.
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for EEPROM HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for EEPROM HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+Example:
+
+	eeprom0: qcom,eeprom@0 {
+		cell-index = <0>;
+		reg = <0x0>;
+		qcom,eeprom-name = "msm_eeprom";
+		compatible = "qcom,eeprom";
+		qcom,slave-addr = <0x60>;
+		qcom,num-blocks = <2>;
+		qcom,page0 = <1 0x100 2 0x01 1 1>;
+		qcom,poll0 = <0 0x0 2 0 1 1>;
+		qcom,mem0 = <0 0x0 2 0 1 0>;
+		qcom,page1 = <1 0x0200 2 0x8 1 1>;
+		qcom,pageen1 = <1 0x0202 2 0x01 1 10>;
+		qcom,poll1 = <0 0x0 2 0 1 1>;
+		qcom,mem1 = <32 0x3000 2 0 1 0>;
+		qcom,saddr1 = <0x62>;
+		qcom,cmm-data-support;
+		qcom,cmm-data-compressed;
+		qcom,cmm-data-offset = <0>;
+		qcom,cmm-data-size = <0>;
+		qcom,cam-power-seq-type = "sensor_vreg",
+			"sensor_vreg", "sensor_clk",
+			"sensor_gpio", "sensor_gpio";
+		qcom,cam-power-seq-val = "cam_vdig",
+			"cam_vio", "sensor_cam_mclk",
+			"sensor_gpio_reset",
+			"sensor_gpio_standby";
+		qcom,cam-power-seq-cfg-val = <1 1 24000000 1 1>;
+		qcom,cam-power-seq-delay = <1 1 5 5 10>;
+		cam_vdig-supply = <&pm8998_l5>;
+		cam_vio-supply = <&pm8998_lvs1>;
+		regulator-names = "cam_vdig", "cam_vio";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1200000 0>;
+		rgltr-max-voltage = <1200000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		qcom,gpio-no-mux = <0>;
+		gpios = <&msmgpio 26 0>,
+			<&msmgpio 37 0>,
+			<&msmgpio 36 0>;
+		gpio-reset = <1>;
+		gpio-standby = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK",
+			"CAM_RESET1",
+			"CAM_STANDBY";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-cntl-level = "turbo";
+		clock-names = "cam_clk";
+		clock-rates = <24000000>;
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-jpeg.txt b/Documentation/devicetree/bindings/media/video/msm-cam-jpeg.txt
new file mode 100644
index 0000000..a2ed98e
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-jpeg.txt
@@ -0,0 +1,187 @@
+* Qualcomm Technologies, Inc. MSM Camera JPEG
+
+The MSM camera JPEG devices are implemented multiple device nodes.
+The root JPEG device node has properties defined to hint the driver
+about the number of Encoder and DMA nodes available during the
+probe sequence. Each node has multiple properties defined
+for interrupts, clocks and regulators.
+
+=======================
+Required Node Structure
+=======================
+JPEG root interface node takes care of the handling account for number
+of Encoder and DMA devices present on the hardware.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-jpeg".
+
+- compat-hw-name
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,jpegenc" or "qcom,jpegdma".
+
+- num-jpeg-enc
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported Encoder HW blocks.
+
+- num-jpeg-dma
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported DMA HW blocks.
+
+Example:
+	qcom,cam-jpeg {
+		compatible = "qcom,cam-jpeg";
+		compat-hw-name = "qcom,jpegenc",
+			"qcom,jpegdma";
+		num-jpeg-enc = <1>;
+		num-jpeg-dma = <1>;
+		status = "ok";
+	};
+
+
+=======================
+Required Node Structure
+=======================
+Encoder/DMA Nodes provide interface for JPEG driver about
+the device register map, interrupt map, clocks and regulators.
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Node instance number.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam_jpeg_enc".
+
+- reg-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the register resources.
+
+- reg
+  Usage: optional
+  Value type: <u32>
+  Definition: Register values.
+
+- reg-cam-base
+  Usage: optional
+  Value type: <u32>
+  Definition: Offset of the register space compared to
+              to Camera base register space.
+
+- interrupt-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: optional
+  Value type: <u32>
+  Definition: Interrupt associated with JPEG HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for JPEG HW.
+
+- camss-vdd-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed
+              in "regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for JPEG HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for JPEG HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- src-clock-name
+  Usage: required
+  Value type: <string>
+  Definition: Source clock name.
+
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition: List of strings corresponds clock-rates levels.
+  Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo.
+
+Examples:
+	cam_jpeg_enc: qcom,jpegenc@ac4e000 {
+		cell-index = <0>;
+		compatible = "qcom,cam_jpeg_enc";
+		reg-names = "jpege_hw";
+		reg = <0xac4e000 0x4000>;
+		reg-cam-base = <0x4e000>;
+		interrupt-names = "jpeg";
+		interrupts = <0 474 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"jpegenc_clk_src",
+			"jpegenc_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_JPEG_CLK_SRC>,
+			<&clock_camcc CAM_CC_JPEG_CLK>;
+
+		clock-rates = <0 0 0 0 0 600000000 0>;
+		src-clock-name = "jpegenc_clk_src";
+		clock-cntl-level = "nominal";
+		status = "ok";
+	};
+
+	cam_jpeg_dma: qcom,jpegdma@0xac52000{
+		cell-index = <0>;
+		compatible = "qcom,cam_jpeg_dma";
+		reg-names = "jpegdma_hw";
+		reg = <0xac52000 0x4000>;
+		reg-cam-base = <0x52000>;
+		interrupt-names = "jpegdma";
+		interrupts = <0 475 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"jpegdma_clk_src",
+			"jpegdma_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_JPEG_CLK_SRC>,
+			<&clock_camcc CAM_CC_JPEG_CLK>;
+
+		clock-rates = <0 0 0 0 0 600000000 0>;
+		src-clock-name = "jpegdma_clk_src";
+		clock-cntl-level = "nominal";
+		status = "ok";
+	};
+
diff --git a/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt b/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt
new file mode 100644
index 0000000..d24314a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt
@@ -0,0 +1,126 @@
+* Qualcomm Technologies, Inc. MSM FLASH
+
+The MSM camera Flash driver provides the definitions for
+enabling and disabling LED Torch/Flash by requesting it to
+PMIC/I2C/GPIO based hardware. It provides the functions for
+the Client to control the Flash hardware.
+
+=======================================================
+Required Node Structure
+=======================================================
+The Flash device is described in one level of the device node.
+
+======================================
+First Level Node - CAM FLASH device
+======================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,camera-flash".
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the hardware index id.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- flash-source
+  Usage: required
+  Value type: <phandle>
+  Definition: Should contain array of phandles to Flash source nodes.
+
+- torch-source
+  Usage: required
+  Value type: <phandle>
+  Definition: Should contain array of phandles to torch source nodes.
+
+- switch-source
+  Usage: Optional
+  Value type: <phandle>
+  Definition: Should contain phandle to switch source nodes.
+
+- slave-id
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain i2c slave address, device id address
+		      and expected id read value.
+
+- cci-master
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain i2c master id to be used for this camera
+              flash.
+
+- max-current
+  Usage: optional
+  Value type: <u32>
+  Definition: Max current in mA supported by flash
+
+- max-duration
+  Usage: optional
+  Value type: <u32>
+  Definition: Max duration in ms flash can glow.
+
+- gpios
+  Usage: optional
+  Value type: <u32>
+  Definition: should specify the gpios to be used for the flash.
+
+- gpio-req-tbl-num
+  Usage: optional
+  Value type: <u32>
+  Definition: should specify the gpio table index.
+
+- gpio-req-tbl-flags
+  Usage: optional
+  Value type: <u32>
+  Definition: should specify the gpio functions.
+
+- gpio-req-tbl-label
+  Usage: optional
+  Value type: <u32>
+  Definition: should specify the gpio labels.
+
+- gpio-flash-reset
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain index to gpio used by flash's "flash reset" pin.
+
+- gpio-flash-en
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain index to gpio used by flash's "flash enable" pin.
+
+- gpio-flash-now
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain index to gpio used by flash's "flash now" pin.
+
+Example:
+
+led_flash_rear: qcom,camera-flash@0 {
+		reg = <0x00 0x00>;
+		cell-index = <0>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+		torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+		switch-source = <&pmi8998_switch0>;
+        qcom,slave-id = <0x00 0x00 0x0011>;
+		qcom,cci-master = <0>;
+		gpios = <&msmgpio 23 0>,
+			<&msmgpio 24 0>;
+			<&msmgpio 25 0>;
+		qcom,gpio-flash-reset = <0>;
+		qcom,gpio-flash-en = <0>;
+		qcom,gpio-flash-now = <1>;
+		qcom,gpio-req-tbl-num = <0 1>;
+		qcom,gpio-req-tbl-flags = <0 0>;
+		qcom,gpio-req-tbl-label = "FLASH_EN",
+			"FLASH_NOW";
+		qcom,max-current = <1500>;
+		qcom,max-duration = <1200>;
+	};
diff --git a/Documentation/devicetree/bindings/platform/msm/msm_tspp.txt b/Documentation/devicetree/bindings/platform/msm/msm_tspp.txt
index 072f920..b4ce7cb 100644
--- a/Documentation/devicetree/bindings/platform/msm/msm_tspp.txt
+++ b/Documentation/devicetree/bindings/platform/msm/msm_tspp.txt
@@ -23,6 +23,8 @@
 - qcom, msm_bus,num_paths: The paths for source and destination ports
 - qcom, msm_bus,vectors: Vectors for bus topology.
 - pinctrl-names: Names for the TSIF mode configuration to specify which TSIF interface is active.
+- qcom,smmu-s1-bypass : Boolean flag to bypass SMMU stage 1 translation.
+- iommus : A list of phandle and IOMMU specifier pairs that describe the IOMMU master interfaces of the device.
 
 Example:
 
@@ -75,4 +77,7 @@
                         &tsif0_sync_active
                         &tsif1_signals_active
                         &tsif1_sync_active>;            /* dual-tsif-mode2 */
+
+		qcom,smmu-s1-bypass;
+		iommus = <&apps_smmu 0x20 0x0f>;
         };
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
index 441d771..6df71af 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -85,21 +85,6 @@
 		maximum charge current in mA for each thermal
 		level.
 
-- qcom,step-soc-thresholds
-  Usage:      optional
-  Value type: Array of <u32>
-  Definition: Array of SOC threshold values, size of 4. This should be a
-		flat array that denotes the percentage ranging from 0 to 100.
-		If the array is not present, step charging is disabled.
-
-- qcom,step-current-deltas
-  Usage:      optional
-  Value type: Array of <s32>
-  Definition: Array of delta values for charging current, size of 5, with
-		FCC as base.  This should be a flat array that denotes the
-		offset of charging current in uA, from -3100000 to 3200000.
-		If the array is not present, step charging is disabled.
-
 - io-channels
   Usage:      optional
   Value type: List of <phandle u32>
@@ -182,6 +167,18 @@
   Definition: Specifies the deglitch interval for OTG detection.
 		If the value is not present, 50 msec is used as default.
 
+- qcom,step-charging-enable
+  Usage:      optional
+  Value type: bool
+  Definition: Boolean flag which when present enables step-charging.
+
+- qcom,wd-bark-time-secs
+  Usage:      optional
+  Value type: <u32>
+  Definition: WD bark-timeout in seconds. The possible values are
+		16, 32, 64, 128. If not defined it defaults to 64.
+
+
 =============================================
 Second Level Nodes - SMB2 Charger Peripherals
 =============================================
@@ -217,9 +214,6 @@
 
 	dpdm-supply = <&qusb_phy0>;
 
-	qcom,step-soc-thresholds = <60 70 80 90>;
-	qcom,step-current-deltas = <500000 250000 150000 0 (-150000)>;
-
 	qcom,chgr@1000 {
 		reg = <0x1000 0x100>;
 		interrupts =    <0x2 0x10 0x0 IRQ_TYPE_NONE>,
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index c116e42..4d05e50 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -329,7 +329,7 @@
  - qcom,gpio-connect                       Gpio that connects to parent
                                            interrupt controller
 
-* audio-ext-clk
+* audio-ext-clk-up
 
 Required properties:
 
@@ -2579,3 +2579,256 @@
 		qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
 					  "SpkrLeft", "SpkrRight";
 	};
+
+* SDM670 ASoC Machine driver
+
+Required properties:
+- compatible : "qcom,sdm670-asoc-snd"
+- qcom,model : The user-visible name of this sound card.
+- qcom,msm-hs-micbias-type : This property is used to recognize the headset
+  micbias type, internal or external.
+- qcom,msm-mbhc-hphl-swh: This property is used to distinguish headset HPHL
+switch type on target typically the switch type will be normally open or
+normally close, value for this property 0 for normally close and 1 for
+normally open.
+-  qcom,msm-mbhc-gnd-swh: This property is used to distinguish headset GND
+switch type on target typically the switch type will be normally open or
+normally close, value for this property 0 for normally close and 1 for
+normally open.
+- qcom,audio-routing : A list of the connections between audio components.
+- qcom,msm-gpios : Lists down all the gpio sets that are supported.
+- qcom,pinctrl-names : Lists all the possible combinations of the gpio sets
+mentioned in qcom,msm-gpios.
+- pinctrl-names : The combinations of gpio sets from above that are supported in
+the flavor.
+- pinctrl-# : Pinctrl states as mentioned in pinctrl-names.
+
+Optional properties:
+- qcom,cdc-us-euro-gpios : GPIO on which gnd/mic swap signal is coming.
+- qcom,msm-micbias1-ext-cap : Boolean. Enable micbias1 external
+capacitor mode.
+- qcom,msm-micbias2-ext-cap : Boolean. Enable micbias2 external
+capacitor mode.
+- qcom,wsa-disable : Boolean. Disables WSA speaker dailinks from sound node.
+- qcom,msm-spk-ext-pa : GPIO which enables external speaker pa.
+- qcom,msm-mclk-freq : This property is used to inform machine driver about
+mclk frequency needs to be configured for internal and external PA.
+- asoc-platform: This is phandle list containing the references to platform device
+		 nodes that are used as part of the sound card dai-links.
+- asoc-platform-names: This property contains list of platform names. The order of
+		       the platform names should match to that of the phandle order
+		       given in "asoc-platform".
+- asoc-cpu: This is phandle list containing the references to cpu dai device nodes
+	    that are used as part of the sound card dai-links.
+- asoc-cpu-names: This property contains list of cpu dai names. The order of the
+		  cpu dai names should match to that of the phandle order given.
+- asoc-codec: This is phandle list containing the references to codec dai device
+	nodes that are used as part of the sound card dai-links.
+- asoc-codec-names: This property contains list of codec dai names. The order of the
+	codec dai names should match to that of the phandle order given
+	in "asoc-codec".
+- qcom,wsa-max-devs : Maximum number of WSA881x devices present in the target
+- qcom,wsa-devs : List of phandles for all possible WSA881x devices supported for the target
+- qcom,wsa-aux-dev-prefix : Name prefix with Left/Right configuration for WSA881x device
+- qcom,cdc-pdm-gpios : phandle for pdm gpios.
+- qcom,cdc-comp-gpios : phandle for compander gpios.
+- qcom,cdc-dmic-gpios : phandle for Digital mic clk and data gpios.
+- qcom,cdc-sdw-gpios : phandle for soundwire clk and data gpios.
+- qcom,msm-mbhc-moist-cfg: This property is used to set moisture detection
+		threshold values for different codecs. First parameter is V(voltage)
+		second one is i(current), third one is r (resistance). Depending on the
+		codec set corresponding element in array and set others to 0.
+
+Example:
+	 sound {
+		compatible = "qcom,sdm670-asoc-snd";
+		qcom,model = "sdm670-snd-card";
+		qcom,msm-mclk-freq = <9600000>;
+		qcom,msm-mbhc-hphl-swh = <0>;
+		qcom,msm-mbhc-gnd-swh = <0>;
+		qcom,msm-hs-micbias-type = "internal";
+		qcom,msm-micbias1-ext-cap;
+		qcom,audio-routing =
+			"RX_BIAS", "MCLK",
+			"SPK_RX_BIAS", "MCLK",
+			"INT_LDO_H", "MCLK",
+			"MIC BIAS External", "Handset Mic",
+			"MIC BIAS Internal2", "Headset Mic",
+			"MIC BIAS External", "Secondary Mic",
+			"AMIC1", "MIC BIAS External",
+			"AMIC2", "MIC BIAS Internal2",
+			"AMIC3", "MIC BIAS External";
+		qcom,cdc-us-euro-gpios = <&msm_gpio 63 0>;
+		qcom,cdc-pdm-gpios = <&cdc_pdm_gpios>;
+		qcom,cdc-comp-gpios = <&cdc_comp_gpios>;
+		qcom,cdc-dmic-gpios = <&cdc_dmic_gpios>;
+		qcom,cdc-sdw-gpios = <&cdc_sdw_gpios>;
+		asoc-platform = <&pcm0>, <&pcm1>, <&voip>, <&voice>,
+				<&loopback>, <&compress>, <&hostless>,
+				<&afe>, <&lsm>, <&routing>, <&lpa>;
+		asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+				"msm-voip-dsp", "msm-pcm-voice", "msm-pcm-loopback",
+				"msm-compress-dsp", "msm-pcm-hostless", "msm-pcm-afe",
+				"msm-lsm-client", "msm-pcm-routing", "msm-pcm-lpa";
+		asoc-cpu = <&dai_pri_auxpcm>, <&dai_hdmi>,
+				<&dai_mi2s0>, <&dai_mi2s1>, <&dai_mi2s2>, <&dai_mi2s3>,
+				<&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
+				<&sb_3_rx>, <&sb_3_tx>, <&sb_4_rx>, <&sb_4_tx>,
+				<&bt_sco_rx>, <&bt_sco_tx>, <&int_fm_rx>, <&int_fm_tx>,
+				<&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>, <&afe_proxy_tx>,
+				<&incall_record_rx>, <&incall_record_tx>, <&incall_music_rx>,
+				<&incall_music_2_rx>;
+		asoc-cpu-names = "msm-dai-q6-auxpcm.1", "msm-dai-q6-hdmi.8",
+				"msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+				"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
+				"msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
+				"msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+				"msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+				"msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+				"msm-dai-q6-dev.12288", "msm-dai-q6-dev.12289",
+				"msm-dai-q6-dev.12292", "msm-dai-q6-dev.12293",
+				"msm-dai-q6-dev.224", "msm-dai-q6-dev.225",
+				"msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
+				"msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
+				"msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770";
+		asoc-codec = <&stub_codec>;
+		asoc-codec-names = "msm-stub-codec.1";
+		qcom,wsa-max-devs = <2>;
+		qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_212>,
+				<&wsa881x_213>, <&wsa881x_214>;
+		qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrLeft",
+					  "SpkrRight", "SpkrLeft";
+	};
+
+* SDM670 ASoC Slimbus Machine driver
+
+Required properties:
+- compatible : "qcom,sdm670-asoc-snd-tasha" for tasha codec,
+		"qcom,sdm670-asoc-snd-tavil" for tavil codec.
+- qcom,model : The user-visible name of this sound card.
+- qcom,msm-mclk-freq : MCLK frequency value for external codec
+- qcom,msm-gpios : Lists down all the gpio sets that are supported.
+- qcom,pinctrl-names : Lists all the possible combinations of the gpio sets
+mentioned in qcom,msm-gpios. Say we have 2^N combinations for N GPIOs,
+this would list all the 2^N combinations.
+- pinctrl-names : The combinations of gpio sets from above that are supported in
+the flavor. This can be sometimes same as qcom, pinctrl-names i.e with 2^N
+combinations or will have less incase if some combination is not supported.
+- pinctrl-# : Pinctrl states as mentioned in pinctrl-names.
+- qcom,audio-routing : A list of the connections between audio components.
+- asoc-platform: This is phandle list containing the references to platform device
+		 nodes that are used as part of the sound card dai-links.
+- asoc-platform-names: This property contains list of platform names. The order of
+		       the platform names should match to that of the phandle order
+		       given in "asoc-platform".
+- asoc-cpu: This is phandle list containing the references to cpu dai device nodes
+	    that are used as part of the sound card dai-links.
+- asoc-cpu-names: This property contains list of cpu dai names. The order of the
+		  cpu dai names should match to that of the phandle order given
+		  in "asoc-cpu". The cpu names are in the form of "%s.%d" form,
+		  where the id (%d) field represents the back-end AFE port id that
+		  this CPU dai is associated with.
+- asoc-codec: This is phandle list containing the references to codec dai device
+	      nodes that are used as part of the sound card dai-links.
+- asoc-codec-names: This property contains list of codec dai names. The order of the
+		    codec dai names should match to that of the phandle order given
+		    in "asoc-codec".
+Optional properties:
+- qcom,cdc-us-euro-gpios : GPIO on which gnd/mic swap signal is coming.
+- clock-names : clock name defined for external clock.
+- clocks : external clock defined for codec clock.
+- qcom,wsa-max-devs : Maximum number of WSA881x devices present in the target
+- qcom,wsa-devs : List of phandles for all possible WSA881x devices supported for the target
+- qcom,wsa-aux-dev-prefix : Name prefix with Left/Right configuration for WSA881x device
+
+Example:
+
+	sound-9335 {
+	compatible = "qcom,sdm670-asoc-snd-tasha";
+	qcom,model = "sdm670-tasha-snd-card";
+
+	qcom,audio-routing =
+		"RX_BIAS", "MCLK",
+		"LDO_H", "MCLK",
+		"AIF4 MAD", "MCLK",
+		"ultrasound amp", "LINEOUT1",
+		"ultrasound amp", "LINEOUT3",
+		"AMIC1", "MIC BIAS1 Internal1",
+		"MIC BIAS1 Internal1", "Handset Mic",
+		"AMIC2", "MIC BIAS2 External",
+		"MIC BIAS2 External", "Headset Mic",
+		"AMIC3", "MIC BIAS2 External",
+		"MIC BIAS2 External", "ANCRight Headset Mic",
+		"AMIC4", "MIC BIAS2 External",
+		"MIC BIAS2 External", "ANCLeft Headset Mic",
+		"DMIC1", "MIC BIAS1 External",
+		"MIC BIAS1 External", "Digital Mic1",
+		"DMIC2", "MIC BIAS1 External",
+		"MIC BIAS1 External", "Digital Mic2",
+		"DMIC3", "MIC BIAS3 External",
+		"MIC BIAS3 External", "Digital Mic3",
+		"DMIC4", "MIC BIAS3 External",
+		"MIC BIAS3 External", "Digital Mic4",
+		"DMIC5", "MIC BIAS4 External",
+		"MIC BIAS4 External", "Digital Mic5",
+		"DMIC6", "MIC BIAS4 External",
+		"MIC BIAS4 External", "Digital Mic6";
+
+		qcom,msm-mbhc-hphl-swh = <0>;
+		qcom,msm-mbhc-gnd-swh = <0>;
+		qcom,msm-mclk-freq = <9600000>;
+		qcom,msm-gpios =
+			"slim",
+			"us_eu_gpio";
+		qcom,pinctrl-names =
+			"all_off",
+			"slim_act",
+			"us_eu_gpio_act",
+			"slim_us_eu_gpio_act";
+		pinctrl-names =
+			"all_off",
+			"slim_act",
+			"us_eu_gpio_act",
+			"slim_us_eu_gpio_act";
+		pinctrl-0 = <&cdc_slim_lines_sus &cross_conn_det_sus>;
+		pinctrl-1 = <&cdc_slim_lines_act &cross_conn_det_sus>;
+		pinctrl-2 = <&cdc_slim_lines_sus &cross_conn_det_act>;
+		pinctrl-3 = <&cdc_slim_lines_act &cross_conn_det_act>;
+		qcom,cdc-us-euro-gpios = <&msm_gpio 63 0>;
+		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
+				<&loopback>, <&compress>, <&hostless>,
+				<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>;
+		asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+				"msm-pcm-dsp.2", "msm-voip-dsp",
+				"msm-pcm-voice", "msm-pcm-loopback",
+				"msm-compress-dsp", "msm-pcm-hostless",
+				"msm-pcm-afe", "msm-lsm-client",
+				"msm-pcm-routing", "msm-cpe-lsm",
+				"msm-compr-dsp";
+		asoc-cpu = <&dai_hdmi>,
+				<&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
+				<&sb_2_rx>, <&sb_2_tx>, <&sb_3_rx>, <&sb_3_tx>,
+				<&sb_4_rx>, <&sb_4_tx>, <&sb_5_tx>,
+				<&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
+				<&afe_proxy_tx>, <&incall_record_rx>,
+				<&incall_record_tx>, <&incall_music_rx>,
+				<&incall_music_2_rx>, <&sb_5_rx>;
+		asoc-cpu-names = "msm-dai-q6-hdmi.8",
+				"msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
+				"msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+				"msm-dai-q6-dev.16388", "msm-dai-q6-dev.16389",
+				"msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+				"msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+				"msm-dai-q6-dev.16395", "msm-dai-q6-dev.224",
+				"msm-dai-q6-dev.225", "msm-dai-q6-dev.241",
+				"msm-dai-q6-dev.240", "msm-dai-q6-dev.32771",
+				"msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773",
+				"msm-dai-q6-dev.32770", "msm-dai-q6-dev.16394";
+		asoc-codec = <&stub_codec>;
+		asoc-codec-names = "msm-stub-codec.1";
+		qcom,wsa-max-devs = <2>;
+		qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_212>,
+				<&wsa881x_213>, <&wsa881x_214>;
+		qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrLeft",
+					  "SpkrRight", "SpkrLeft";
+	};
diff --git a/Makefile b/Makefile
index 04b3557..5894331 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 38
+SUBLEVEL = 40
 EXTRAVERSION =
 NAME = Roaring Lionus
 
@@ -565,7 +565,7 @@
 
 # Objects we will link into vmlinux / subdirs we need to visit
 init-y		:= init/
-drivers-y	:= drivers/ sound/ firmware/
+drivers-y	:= drivers/ sound/ firmware/ techpack/
 net-y		:= net/
 libs-y		:= lib/
 core-y		:= usr/
@@ -633,6 +633,9 @@
 
 KBUILD_CFLAGS	+= $(call cc-option,-fno-delete-null-pointer-checks,)
 KBUILD_CFLAGS	+= $(call cc-disable-warning,frame-address,)
+KBUILD_CFLAGS	+= $(call cc-disable-warning, format-truncation)
+KBUILD_CFLAGS	+= $(call cc-disable-warning, format-overflow)
+KBUILD_CFLAGS	+= $(call cc-disable-warning, int-in-bool-context)
 
 ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
 KBUILD_CFLAGS	+= $(call cc-option,-ffunction-sections,)
@@ -1152,6 +1155,7 @@
 	  $(error Headers not exportable for the $(SRCARCH) architecture))
 	$(Q)$(MAKE) $(hdr-inst)=include/uapi
 	$(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi/asm $(hdr-dst)
+	$(Q)$(MAKE) $(hdr-inst)=techpack
 
 PHONY += headers_check_all
 headers_check_all: headers_install_all
@@ -1161,6 +1165,7 @@
 headers_check: headers_install
 	$(Q)$(MAKE) $(hdr-inst)=include/uapi HDRCHECK=1
 	$(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi/asm $(hdr-dst) HDRCHECK=1
+	$(Q)$(MAKE) $(hdr-inst)=techpack HDRCHECK=1
 
 # ---------------------------------------------------------------------------
 # Kernel selftest
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index d2315ff..f13ae15 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -112,12 +112,8 @@
 #define CORE_DUMP_USE_REGSET
 #define ELF_EXEC_PAGESIZE	4096
 
-/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
-   use of this is to invoke "./ld.so someprog" to test out a new version of
-   the loader.  We need to make sure that it is out of the way of the program
-   that it will "exec", and that there is sufficient room for the brk.  */
-
-#define ELF_ET_DYN_BASE	(TASK_SIZE / 3 * 2)
+/* This is the base location for PIE (ET_DYN with INTERP) loads. */
+#define ELF_ET_DYN_BASE		0x400000UL
 
 /* When the program starts, a1 contains a pointer to a function to be 
    registered with atexit, as per the SVR4 ABI.  A value of 0 means we 
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index 92dc1e6..b97f1de 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -28,6 +28,17 @@
 DTB_OBJS := $(shell find $(obj)/dts/ -name \*.dtb)
 endif
 
+# Add RTIC DTB to the DTB list if RTIC MPGen is enabled
+ifdef RTIC_MPGEN
+DTB_OBJS += rtic_mp.dtb
+endif
+
+rtic_mp.dtb: vmlinux FORCE
+	$(RTIC_MPGEN) --objcopy="${OBJCOPY}" --objdump="${OBJDUMP}" \
+	--binpath="" --vmlinux="vmlinux" --config=${KCONFIG_CONFIG} \
+	--cc="${CC}" --dts=rtic_mp.dts && \
+	$(DTC) -O dtb -o rtic_mp.dtb -b 0 $(DTC_FLAGS) rtic_mp.dts
+
 $(obj)/Image: vmlinux FORCE
 	$(call if_changed,objcopy)
 
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index e9bd587..49a5d8c 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -75,14 +75,10 @@
 
 	timer {
 		compatible = "arm,armv8-timer";
-		interrupts = <GIC_PPI 13
-			(GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
-			     <GIC_PPI 14
-			(GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
-			     <GIC_PPI 11
-			(GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>,
-			     <GIC_PPI 10
-			(GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH>;
 	};
 
 	soc {
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
index 44b817e..bffcdf5 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
@@ -31,6 +31,8 @@
 		qcom,mdss-dsi-h-right-border = <0>;
 		qcom,mdss-dsi-v-top-border = <0>;
 		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-pan-physical-width-dimension = <74>;
+		qcom,mdss-pan-physical-height-dimension = <131>;
 		qcom,mdss-dsi-bpp = <24>;
 		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
 		qcom,mdss-dsi-underflow-color = <0xff>;
@@ -40,6 +42,7 @@
 			17000 15500 30000 8000 3000>;
 		qcom,mdss-dsi-panel-peak-brightness = <4200000>;
 		qcom,mdss-dsi-panel-blackness-level = <3230>;
+		qcom,mdss-dsi-panel-jitter = <0x1 0x1>;
 		qcom,mdss-dsi-on-command = [
 			/* CMD2_P0 */
 			15 01 00 00 00 00 02 ff 20
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
index 647ea4e..f860ea3 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
@@ -35,6 +35,7 @@
 		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
 		qcom,mdss-dsi-underflow-color = <0xff>;
 		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-panel-jitter = <0x1 0x1>;
 		qcom,mdss-dsi-h-sync-pulse = <0>;
 		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
 		qcom,mdss-dsi-bllp-eof-power-mode;
@@ -45,6 +46,8 @@
 		qcom,mdss-dsi-lane-3-state;
 		qcom,adjust-timer-wakeup-ms = <1>;
 		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-pan-physical-width-dimension = <74>;
+		qcom,mdss-pan-physical-height-dimension = <131>;
 		qcom,mdss-dsi-bl-max-level = <4095>;
 		qcom,mdss-dsi-dma-trigger = "trigger_sw";
 		qcom,mdss-dsi-mdp-trigger = "none";
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
index 346a8b4..23a96a4 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
@@ -202,6 +202,8 @@
 		qcom,mdss-dsi-dma-trigger = "trigger_sw";
 		qcom,mdss-dsi-mdp-trigger = "none";
 		qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+		qcom,mdss-pan-physical-width-dimension = <74>;
+		qcom,mdss-pan-physical-height-dimension = <131>;
 		qcom,mdss-dsi-tx-eot-append;
 
 		qcom,config-select = <&dsi_dual_nt35597_truly_video_config0>;
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
index c2b054a..6f66e8e 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
@@ -37,6 +37,8 @@
 		qcom,mdss-dsi-underflow-color = <0xff>;
 		qcom,mdss-dsi-border-color = <0>;
 		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-pan-physical-width-dimension = <64>;
+		qcom,mdss-pan-physical-height-dimension = <117>;
 		qcom,mdss-dsi-on-command = [
 			15 01 00 00 00 00 02 bb 10
 			15 01 00 00 00 00 02 b0 03
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
index 6534cdc..4562f8c 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
@@ -41,6 +41,8 @@
 		qcom,mdss-dsi-dma-trigger = "trigger_sw";
 		qcom,mdss-dsi-mdp-trigger = "none";
 		qcom,mdss-dsi-reset-sequence = <1 100>, <0 100>, <1 100>;
+		qcom,mdss-pan-physical-width-dimension = <71>;
+		qcom,mdss-pan-physical-height-dimension = <129>;
 		qcom,mdss-dsi-te-pin-select = <1>;
 		qcom,mdss-dsi-wr-mem-start = <0x2c>;
 		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
@@ -49,6 +51,7 @@
 		qcom,mdss-dsi-te-using-te-pin;
 		qcom,dcs-cmd-by-left;
 		qcom,mdss-dsi-tx-eot-append;
+		qcom,mdss-dsi-panel-jitter = <0x8 0xa>;
 
 		qcom,adjust-timer-wakeup-ms = <1>;
 		qcom,mdss-dsi-on-command = [
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
index b314e99..7954856 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
@@ -41,6 +41,8 @@
 		qcom,mdss-dsi-dma-trigger = "trigger_sw";
 		qcom,mdss-dsi-mdp-trigger = "none";
 		qcom,mdss-dsi-reset-sequence = <1 100>, <0 100>, <1 100>;
+		qcom,mdss-pan-physical-width-dimension = <71>;
+		qcom,mdss-pan-physical-height-dimension = <129>;
 		qcom,mdss-dsi-tx-eot-append;
 
 		qcom,adjust-timer-wakeup-ms = <1>;
diff --git a/arch/arm64/boot/dts/qcom/pm660l.dtsi b/arch/arm64/boot/dts/qcom/pm660l.dtsi
index 0f18ba5..9cd117c 100644
--- a/arch/arm64/boot/dts/qcom/pm660l.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm660l.dtsi
@@ -250,9 +250,8 @@
 				<0xd900 0x100>;
 			reg-names = "qpnp-wled-ctrl-base",
 					"qpnp-wled-sink-base";
-			interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>,
-					<0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>;
-			interrupt-names = "ovp-irq", "sc-irq";
+			interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "ovp-irq";
 			linux,name = "wled";
 			linux,default-trigger = "bkl-trigger";
 			qcom,fdbk-output = "auto";
@@ -268,7 +267,6 @@
 			qcom,fs-curr-ua = <25000>;
 			qcom,cons-sync-write-delay-us = <1000>;
 			qcom,led-strings-list = [00 01 02];
-			qcom,en-ext-pfet-sc-pro;
 			qcom,loop-auto-gm-en;
 			qcom,pmic-revid = <&pm660l_revid>;
 			status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index dcc5d1b..46d4aa6 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -1146,5 +1146,103 @@
 				};
 			};
 		};
+		/* SDC pin type */
+		sdc1_clk_on: sdc1_clk_on {
+			config {
+				pins = "sdc1_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc1_clk_off: sdc1_clk_off {
+			config {
+				pins = "sdc1_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc1_cmd_on: sdc1_cmd_on {
+			config {
+				pins = "sdc1_cmd";
+				bias-pull-up;		/* pull up */
+				drive-strength = <10>;	/* 10 MA */
+			};
+		};
+
+		sdc1_cmd_off: sdc1_cmd_off {
+			config {
+				pins = "sdc1_cmd";
+				num-grp-pins = <1>;
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc1_data_on: sdc1_data_on {
+			config {
+				pins = "sdc1_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <10>;	/* 10 MA */
+			};
+		};
+
+		sdc1_data_off: sdc1_data_off {
+			config {
+				pins = "sdc1_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc2_clk_on: sdc2_clk_on {
+			config {
+				pins = "sdc2_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <16>;	/* 16 MA */
+			};
+		};
+
+		sdc2_clk_off: sdc2_clk_off {
+			config {
+				pins = "sdc2_clk";
+				bias-disable;		/* NO pull */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc2_cmd_on: sdc2_cmd_on {
+			config {
+				pins = "sdc2_cmd";
+				bias-pull-up;		/* pull up */
+				drive-strength = <10>;	/* 10 MA */
+			};
+		};
+
+		sdc2_cmd_off: sdc2_cmd_off {
+			config {
+				pins = "sdc2_cmd";
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
+		sdc2_data_on: sdc2_data_on {
+			config {
+				pins = "sdc2_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <10>;	/* 10 MA */
+			};
+		};
+
+		sdc2_data_off: sdc2_data_off {
+			config {
+				pins = "sdc2_data";
+				bias-pull-up;		/* pull up */
+				drive-strength = <2>;	/* 2 MA */
+			};
+		};
+
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
index 17b90c7..f2f41fd 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
@@ -77,3 +77,24 @@
 	spm-level = <0>;
 	status = "ok";
 };
+
+&sdhc_1 {
+	vdd-supply = <&pm660l_l4>;
+	qcom,vdd-voltage-level = <2960000 2960000>;
+	qcom,vdd-current-level = <200 570000>;
+
+	vdd-io-supply = <&pm660_l8>;
+	qcom,vdd-io-always-on;
+	qcom,vdd-io-lpm-sup;
+	qcom,vdd-io-voltage-level = <1800000 1800000>;
+	qcom,vdd-io-current-level = <200 325000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc1_clk_on  &sdc1_cmd_on &sdc1_data_on>;
+	pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off>;
+
+	qcom,clk-rates = <400000 20000000 25000000 50000000>;
+	qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi b/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi
index 6dd1b749..f3e5ddb 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi
@@ -25,8 +25,8 @@
 		compatible = "qcom,smp2p";
 		reg = <0x1799000c 0x4>;
 		qcom,remote-pid = <2>;
-		qcom,irq-bitmask = <0x200>;
-		interrupts = <GIC_SPI 157 IRQ_TYPE_EDGE_RISING>;
+		qcom,irq-bitmask = <0x4000000>;
+		interrupts = <GIC_SPI 172 IRQ_TYPE_EDGE_RISING>;
 	};
 
 	qcom,smp2p-cdsp@1799000c {
@@ -245,4 +245,27 @@
 		interrupt-controller;
 		#interrupt-cells = <2>;
 	};
+
+	/* ssr - inbound entry from cdsp */
+	smp2pgpio_ssr_smp2p_5_in: qcom,smp2pgpio-ssr-smp2p-5-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "slave-kernel";
+		qcom,remote-pid = <5>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - outbound entry to cdsp */
+	smp2pgpio_ssr_smp2p_5_out: qcom,smp2pgpio-ssr-smp2p-5-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "master-kernel";
+		qcom,remote-pid = <5>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 21c40fc..3bef777 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -30,6 +30,7 @@
 
 	aliases {
 		ufshc1 = &ufshc_mem; /* Embedded UFS slot */
+		sdhc1 = &sdhc_1; /* SDC1 eMMC slot */
 	};
 
 	aliases {
@@ -825,6 +826,12 @@
 		reg-names = "pshold-base", "tcsr-boot-misc-detect";
 	};
 
+	aop-msg-client {
+		compatible = "qcom,debugfs-qmp-client";
+		mboxes = <&qmp_aop 0>;
+		mbox-names = "aop";
+	};
+
 	clock_rpmh: qcom,rpmhclk {
 		compatible = "qcom,dummycc";
 		clock-output-names = "rpmh_clocks";
@@ -1086,7 +1093,7 @@
 		qcom,mpu-enabled;
 	};
 
-	qmp_aop: mailbox@1799000c {
+	qmp_aop: qcom,qmp-aop@c300000 {
 		compatible = "qcom,qmp-mbox";
 		label = "aop";
 		reg = <0xc300000 0x100000>,
@@ -1094,6 +1101,7 @@
 		reg-names = "msgram", "irq-reg-base";
 		qcom,irq-mask = <0x1>;
 		interrupts = <0 389 1>;
+		priority = <0>;
 		mbox-desc-offset = <0x0>;
 		#mbox-cells = <1>;
 	};
@@ -1113,8 +1121,8 @@
 		reg = <0x86000000 0x200000>,
 			<0x1799000c 0x4>;
 		reg-names = "smem", "irq-reg-base";
-		qcom,irq-mask = <0x100>;
-		interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
+		qcom,irq-mask = <0x1000000>;
+		interrupts = <GIC_SPI 170 IRQ_TYPE_EDGE_RISING>;
 		label = "lpass";
 		qcom,qos-config = <&glink_qos_adsp>;
 		qcom,ramp-time = <0xaf>;
@@ -1368,6 +1376,19 @@
 				<CONTROL_TCS   1>;
 	};
 
+	disp_rsc: mailbox@af20000 {
+		compatible = "qcom,tcs-drv";
+		label = "display_rsc";
+		reg = <0xaf20000 0x100>, <0xaf21c00 0x3000>;
+		interrupts = <0 129 0>;
+		#mbox-cells = <1>;
+		qcom,drv-id = <0>;
+		qcom,tcs-config = <SLEEP_TCS 1>,
+				<WAKE_TCS    1>,
+				<ACTIVE_TCS  0>,
+				<CONTROL_TCS 1>;
+	};
+
 	system_pm {
 		compatible = "qcom,system-pm";
 		mboxes = <&apps_rsc 0>;
@@ -1715,6 +1736,85 @@
 			memory-region = <&pil_mba_mem>;
 		};
 	};
+
+	qcom,venus@aae0000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0xaae0000 0x4000>;
+
+		vdd-supply = <&venus_gdsc>;
+		qcom,proxy-reg-names = "vdd";
+
+		clocks = <&clock_videocc VIDEO_CC_VENUS_CTL_CORE_CLK>,
+			 <&clock_videocc VIDEO_CC_VENUS_AHB_CLK>,
+			 <&clock_videocc VIDEO_CC_VENUS_CTL_AXI_CLK>;
+		clock-names = "core_clk", "iface_clk", "bus_clk";
+		qcom,proxy-clock-names = "core_clk", "iface_clk", "bus_clk";
+
+		qcom,pas-id = <9>;
+		qcom,msm-bus,name = "pil-venus";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<63 512 0 0>,
+			<63 512 0 304000>;
+		qcom,proxy-timeout-ms = <100>;
+		qcom,firmware-name = "venus";
+		memory-region = <&pil_video_mem>;
+		status = "ok";
+	};
+
+	qcom,turing@8300000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0x8300000 0x100000>;
+		interrupts = <0 578 1>;
+
+		vdd_cx-supply = <&pm660l_s3_level>;
+		qcom,proxy-reg-names = "vdd_cx";
+		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 100000>;
+
+		clocks = <&clock_rpmh RPMH_CXO_CLK>;
+		clock-names = "xo";
+		qcom,proxy-clock-names = "xo";
+
+		qcom,pas-id = <18>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,smem-id = <601>;
+		qcom,sysmon-id = <7>;
+		qcom,ssctl-instance-id = <0x17>;
+		qcom,firmware-name = "cdsp";
+		memory-region = <&pil_cdsp_mem>;
+
+		/* GPIO inputs from turing */
+		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_5_in 0 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_5_in 2 0>;
+		qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_5_in 1 0>;
+		qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_5_in 3 0>;
+
+		/* GPIO output to turing*/
+		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_5_out 0 0>;
+		status = "ok";
+	};
+
+	sdhc_1: sdhci@7c4000 {
+		compatible = "qcom,sdhci-msm-v5";
+		reg = <0x7C4000 0x1000>, <0x7C5000 0x1000>;
+		reg-names = "hc_mem", "cmdq_mem";
+
+		interrupts = <0 641 0>, <0 644 0>;
+		interrupt-names = "hc_irq", "pwr_irq";
+
+		qcom,bus-width = <8>;
+		qcom,large-address-bus;
+
+		clocks = <&clock_gcc GCC_SDCC1_AHB_CLK>,
+			<&clock_gcc GCC_SDCC1_APPS_CLK>;
+		clock-names = "iface_clk", "core_clk";
+
+		qcom,nonremovable;
+
+		qcom,scaling-lower-bus-speed-mode = "DDR52";
+		status = "disabled";
+	};
 };
 
 #include "sdm670-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
index ab6ba18..f18137c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
@@ -245,7 +245,7 @@
 		compatible = "qcom,cam-sensor";
 		reg = <0x0>;
 		csiphy-sd-index = <0>;
-		sensor-position-roll = <90>;
+		sensor-position-roll = <270>;
 		sensor-position-pitch = <0>;
 		sensor-position-yaw = <180>;
 		led-flash-src = <&led_flash_rear>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index cbd495a..3fa0ab3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -36,9 +36,7 @@
 			<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
 			<&clock_camcc CAM_CC_CSIPHY0_CLK>,
 			<&clock_camcc CAM_CC_CSI0PHYTIMER_CLK_SRC>,
-			<&clock_camcc CAM_CC_CSI0PHYTIMER_CLK>,
-			<&clock_camcc CAM_CC_IFE_0_CSID_CLK>,
-			<&clock_camcc CAM_CC_IFE_0_CSID_CLK_SRC>;
+			<&clock_camcc CAM_CC_CSI0PHYTIMER_CLK>;
 		clock-names = "camnoc_axi_clk",
 			"soc_ahb_clk",
 			"slow_ahb_src_clk",
@@ -46,12 +44,10 @@
 			"cphy_rx_clk_src",
 			"csiphy0_clk",
 			"csi0phytimer_clk_src",
-			"csi0phytimer_clk",
-			"ife_0_csid_clk",
-			"ife_0_csid_clk_src";
+			"csi0phytimer_clk";
 		clock-cntl-level = "turbo";
 		clock-rates =
-			<0 0 0 0 320000000 0 269333333 0 0 384000000>;
+			<0 0 0 0 320000000 0 269333333 0>;
 		status = "ok";
 	};
 
@@ -74,9 +70,7 @@
 			<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
 			<&clock_camcc CAM_CC_CSIPHY1_CLK>,
 			<&clock_camcc CAM_CC_CSI1PHYTIMER_CLK_SRC>,
-			<&clock_camcc CAM_CC_CSI1PHYTIMER_CLK>,
-			<&clock_camcc CAM_CC_IFE_1_CSID_CLK>,
-			<&clock_camcc CAM_CC_IFE_1_CSID_CLK_SRC>;
+			<&clock_camcc CAM_CC_CSI1PHYTIMER_CLK>;
 		clock-names = "camnoc_axi_clk",
 			"soc_ahb_clk",
 			"slow_ahb_src_clk",
@@ -84,12 +78,10 @@
 			"cphy_rx_clk_src",
 			"csiphy1_clk",
 			"csi1phytimer_clk_src",
-			"csi1phytimer_clk",
-			"ife_1_csid_clk",
-			"ife_1_csid_clk_src";
+			"csi1phytimer_clk";
 		clock-cntl-level = "turbo";
 		clock-rates =
-			<0 0 0 0 320000000 0 269333333 0 0 384000000>;
+			<0 0 0 0 320000000 0 269333333 0>;
 
 		status = "ok";
 	};
@@ -113,9 +105,7 @@
 			<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
 			<&clock_camcc CAM_CC_CSIPHY2_CLK>,
 			<&clock_camcc CAM_CC_CSI2PHYTIMER_CLK_SRC>,
-			<&clock_camcc CAM_CC_CSI2PHYTIMER_CLK>,
-			<&clock_camcc CAM_CC_IFE_LITE_CSID_CLK>,
-			<&clock_camcc CAM_CC_IFE_LITE_CSID_CLK_SRC>;
+			<&clock_camcc CAM_CC_CSI2PHYTIMER_CLK>;
 		clock-names = "camnoc_axi_clk",
 			"soc_ahb_clk",
 			"slow_ahb_src_clk",
@@ -123,12 +113,10 @@
 			"cphy_rx_clk_src",
 			"csiphy2_clk",
 			"csi2phytimer_clk_src",
-			"csi2phytimer_clk",
-			"ife_lite_csid_clk",
-			"ife_lite_csid_clk_src";
+			"csi2phytimer_clk";
 		clock-cntl-level = "turbo";
 		clock-rates =
-			<0 0 0 0 320000000 0 269333333 0 0 384000000>;
+			<0 0 0 0 320000000 0 269333333 0>;
 		status = "ok";
 	};
 
@@ -258,6 +246,23 @@
 			};
 		};
 
+		msm_cam_smmu_jpeg {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_smmu 0x1060 0x8>,
+				<&apps_smmu 0x1068 0x8>;
+			label = "jpeg";
+			jpeg_iova_mem_map: iova-mem-map {
+				/* IO region is approximately 3.4 GB */
+				iova-mem-region-io {
+					iova-region-name = "io";
+					iova-region-start = <0x7400000>;
+					iova-region-len = <0xd8c00000>;
+					iova-region-id = <0x3>;
+					status = "ok";
+				};
+			};
+		};
+
 		msm_cam_icp_fw {
 			compatible = "qcom,msm-cam-smmu-fw-dev";
 			label="icp";
@@ -410,7 +415,7 @@
 			"csid0", "csid1", "csid2",
 			"ife0", "ife1", "ife2", "ipe0",
 			"ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
-			"icp0", "jpeg-dma0", "jpeg0", "fd0";
+			"icp0", "jpeg-dma0", "jpeg-enc0", "fd0";
 		client-axi-port-names =
 			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
 			"cam_hf_1", "cam_hf_2", "cam_hf_2",
@@ -503,8 +508,8 @@
 		label = "cam-cdm-intf";
 		num-hw-cdm = <1>;
 		cdm-client-names = "vfe",
-			"jpeg-dma",
-			"jpeg",
+			"jpegdma",
+			"jpegenc",
 			"fd";
 		status = "ok";
 	};
@@ -887,4 +892,76 @@
 		clock-cntl-level = "turbo";
 		status = "ok";
 	};
+
+	qcom,cam-jpeg {
+		compatible = "qcom,cam-jpeg";
+		compat-hw-name = "qcom,jpegenc",
+			"qcom,jpegdma";
+		num-jpeg-enc = <1>;
+		num-jpeg-dma = <1>;
+		status = "ok";
+	};
+
+	cam_jpeg_enc: qcom,jpegenc@ac4e000 {
+		cell-index = <0>;
+		compatible = "qcom,cam_jpeg_enc";
+		reg-names = "jpege_hw";
+		reg = <0xac4e000 0x4000>;
+		reg-cam-base = <0x4e000>;
+		interrupt-names = "jpeg";
+		interrupts = <0 474 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"jpegenc_clk_src",
+			"jpegenc_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_JPEG_CLK_SRC>,
+			<&clock_camcc CAM_CC_JPEG_CLK>;
+
+		clock-rates = <0 0 0 0 0 600000000 0>;
+		src-clock-name = "jpegenc_clk_src";
+		clock-cntl-level = "nominal";
+		status = "ok";
+	};
+
+	cam_jpeg_dma: qcom,jpegdma@0xac52000{
+		cell-index = <0>;
+		compatible = "qcom,cam_jpeg_dma";
+		reg-names = "jpegdma_hw";
+		reg = <0xac52000 0x4000>;
+		reg-cam-base = <0x52000>;
+		interrupt-names = "jpegdma";
+		interrupts = <0 475 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"jpegdma_clk_src",
+			"jpegdma_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_JPEG_CLK_SRC>,
+			<&clock_camcc CAM_CC_JPEG_CLK>;
+
+		clock-rates = <0 0 0 0 0 600000000 0>;
+		src-clock-name = "jpegdma_clk_src";
+		clock-cntl-level = "nominal";
+		status = "ok";
+	};
+
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index c4ec012..8fca29c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -169,10 +169,6 @@
 	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on>;
 	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
 
-	qcom,clk-rates = <400000 20000000 25000000
-				50000000 100000000 200000000>;
-	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
-
 	extcon = <&extcon_storage_cd>;
 
 	status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 1453975..29d80a7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -245,10 +245,6 @@
 	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on>;
 	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
 
-	qcom,clk-rates = <400000 20000000 25000000
-				50000000 100000000 200000000>;
-	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
-
 	extcon = <&extcon_storage_cd>;
 
 	status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 2a7b6d1..04f67cd 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -2667,7 +2667,7 @@
 
 			config {
 				pins = "gpio80","gpio79";
-				bias-disable; /* No PULL */
+				bias-pull-down; /* PULL DOWN */
 				drive-strength = <2>; /* 2 MA */
 			};
 		};
@@ -2723,7 +2723,7 @@
 
 			config {
 				pins = "gpio28";
-				bias-disable; /* No PULL */
+				bias-pull-down; /* PULL DOWN */
 				drive-strength = <2>; /* 2 MA */
 			};
 		};
@@ -2780,7 +2780,7 @@
 			};
 			config {
 				pins = "gpio9","gpio8";
-				bias-disable; /* No PULL */
+				bias-pull-down; /* PULL DOWN */
 				drive-strength = <2>; /* 2 MA */
 			};
 		};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index 9cf18b7..6bdc149 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -170,10 +170,6 @@
 	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on>;
 	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
 
-	qcom,clk-rates = <400000 20000000 25000000
-				50000000 100000000 200000000>;
-	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
-
 	extcon = <&extcon_storage_cd>;
 
 	status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index b826768..03b9e06 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -453,7 +453,7 @@
 	};
 
 	apc1_cpr: cprh-ctrl@17db0000 {
-		compatible = "qcom,cprh-sdm845-kbss-regulator";
+		compatible = "qcom,cprh-sdm845-v1-kbss-regulator";
 		reg =	<0x17db0000 0x4000>,
 			<0x00784000 0x1000>,
 			<0x17830000 0x1000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index ec048ca..86e97f8 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -64,7 +64,7 @@
 				MSM_BUS_SLAVE_EBI_CH0 240000 800000>,
 			<MSM_BUS_MASTER_USB3
 				MSM_BUS_SLAVE_IPA_CFG 0 2400>,
-			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3 0 80000>;
+			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3 0 40000>;
 
 		dwc3@a600000 {
 			compatible = "snps,dwc3";
@@ -369,7 +369,7 @@
 			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3_1 0 0>,
 			<MSM_BUS_MASTER_USB3_1
 				MSM_BUS_SLAVE_EBI_CH0 240000 800000>,
-			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3_1 0 80000>;
+			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3_1 0 40000>;
 
 		dwc3@a600000 {
 			compatible = "snps,dwc3";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index f5848c1..b8aeb87 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -17,6 +17,427 @@
 	qcom,msm-id = <321 0x20000>;
 };
 
+&sdhc_2 {
+	qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
+				   100000000 200000000 4294967295>;
+	qcom,clk-rates = <400000 20000000 25000000 50000000
+			  100000000 200000000>;
+	qcom,devfreq,freq-table = <50000000 200000000>;
+	/delete-property/ qcom,sdr104-wa;
+};
+
+/delete-node/ &apc0_cpr;
+/delete-node/ &apc1_cpr;
+
+&soc {
+	/* CPR controller regulators */
+	apc0_cpr: cprh-ctrl@17dc0000 {
+		compatible = "qcom,cprh-sdm845-v2-kbss-regulator";
+		reg =	<0x17dc0000 0x4000>,
+			<0x00784000 0x1000>,
+			<0x17840000 0x1000>;
+		reg-names = "cpr_ctrl", "fuse_base", "saw";
+		clocks = <&clock_gcc GCC_CPUSS_RBCPR_CLK>;
+		clock-names = "core_clk";
+		qcom,cpr-ctrl-name = "apc0";
+		qcom,cpr-controller-id = <0>;
+
+		qcom,cpr-sensor-time = <1000>;
+		qcom,cpr-loop-time = <5000000>;
+		qcom,cpr-idle-cycles = <15>;
+		qcom,cpr-up-down-delay-time = <3000>;
+		qcom,cpr-step-quot-init-min = <11>;
+		qcom,cpr-step-quot-init-max = <12>;
+		qcom,cpr-count-mode = <0>;		/* All at once */
+		qcom,cpr-count-repeat = <20>;
+		qcom,cpr-down-error-step-limit = <1>;
+		qcom,cpr-up-error-step-limit = <1>;
+		qcom,cpr-corner-switch-delay-time = <1042>;
+		qcom,cpr-voltage-settling-time = <1760>;
+		qcom,cpr-reset-step-quot-loop-en;
+
+		qcom,voltage-step = <4000>;
+		qcom,voltage-base = <352000>;
+		qcom,cpr-saw-use-unit-mV;
+
+		qcom,saw-avs-ctrl = <0x101C031>;
+		qcom,saw-avs-limit = <0x3B803B8>;
+
+		qcom,cpr-enable;
+		qcom,cpr-hw-closed-loop;
+
+		qcom,cpr-panic-reg-addr-list =
+			<0x17dc3a84 0x17dc3a88 0x17840c18>;
+		qcom,cpr-panic-reg-name-list =
+			"APSS_SILVER_CPRH_STATUS_0",
+			"APSS_SILVER_CPRH_STATUS_1",
+			"SILVER_SAW4_PMIC_STS";
+
+		qcom,cpr-aging-ref-voltage = <952000>;
+		vdd-supply = <&pm8998_s13>;
+
+		thread@0 {
+			qcom,cpr-thread-id = <0>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <0>;
+			qcom,cpr-up-threshold = <2>;
+			qcom,cpr-down-threshold = <2>;
+
+			apc0_pwrcl_vreg: regulator {
+				regulator-name = "apc0_pwrcl_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <18>;
+
+				qcom,cpr-fuse-corners = <4>;
+				qcom,cpr-fuse-combos = <16>;
+				qcom,cpr-speed-bins = <2>;
+				qcom,cpr-speed-bin-corners = <18 18>;
+				qcom,cpr-corners = <18>;
+
+				qcom,cpr-corner-fmax-map = <6 12 15 18>;
+
+				qcom,cpr-voltage-ceiling =
+					<828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 884000  952000  952000>;
+
+				qcom,cpr-voltage-floor =
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  40000  40000>;
+
+				qcom,corner-frequencies =
+					<300000000  403200000  480000000
+					 576000000  652800000  748800000
+					 825600000  902400000  979200000
+					1056000000 1132800000 1228800000
+					1324800000 1420800000 1516800000
+					1612800000 1689600000 1766400000>;
+
+				qcom,cpr-ro-scaling-factor =
+					<2594 2795 2576 2761 2469 2673 2198
+					 2553 3188 3255 3191 2962 3055 2984
+					 2043 2947>,
+					<2594 2795 2576 2761 2469 2673 2198
+					 2553 3188 3255 3191 2962 3055 2984
+					 2043 2947>,
+					<2259 2389 2387 2531 2294 2464 2218
+					 2476 2525 2855 2817 2836 2740 2490
+					 1950 2632>,
+					<2259 2389 2387 2531 2294 2464 2218
+					 2476 2525 2855 2817 2836 2740 2490
+					 1950 2632>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<100000 100000 100000 100000>;
+
+				qcom,cpr-closed-loop-voltage-fuse-adjustment =
+					<100000 100000 100000 100000>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,allow-quotient-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+				qcom,cpr-aging-max-voltage-adjustment = <15000>;
+				qcom,cpr-aging-ref-corner = <18>;
+				qcom,cpr-aging-ro-scaling-factor = <1620>;
+				qcom,allow-aging-voltage-adjustment =
+					/* Speed bin 0 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 1 */
+					<0 1 1 1 1 1 1 1>;
+				qcom,allow-aging-open-loop-voltage-adjustment =
+					<1>;
+			};
+		};
+
+		thread@1 {
+			qcom,cpr-thread-id = <1>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <0>;
+			qcom,cpr-up-threshold = <2>;
+			qcom,cpr-down-threshold = <2>;
+
+			apc0_l3_vreg: regulator {
+				regulator-name = "apc0_l3_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <14>;
+
+				qcom,cpr-fuse-corners = <4>;
+				qcom,cpr-fuse-combos = <16>;
+				qcom,cpr-speed-bins = <2>;
+				qcom,cpr-speed-bin-corners = <14 14>;
+				qcom,cpr-corners = <14>;
+
+				qcom,cpr-corner-fmax-map = <4 8 11 14>;
+
+				qcom,cpr-voltage-ceiling =
+					<828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  884000  884000  952000>;
+
+				qcom,cpr-voltage-floor =
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  40000>;
+
+				qcom,corner-frequencies =
+					<300000000  403200000  480000000
+					 576000000  652800000  748800000
+					 844800000  940800000 1036800000
+					1132800000 1209600000 1305600000
+					1401600000 1478400000>;
+
+				qcom,cpr-ro-scaling-factor =
+					<2857 3056 2828 2952 2699 2796 2447
+					 2631 2630 2579 2244 3343 3287 3137
+					 3164 2656>,
+					<2857 3056 2828 2952 2699 2796 2447
+					 2631 2630 2579 2244 3343 3287 3137
+					 3164 2656>,
+					<2439 2577 2552 2667 2461 2577 2394
+					 2536 2132 2307 2191 2903 2838 2912
+					 2501 2095>,
+					<2439 2577 2552 2667 2461 2577 2394
+					 2536 2132 2307 2191 2903 2838 2912
+					 2501 2095>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<100000 100000 100000 100000>;
+
+				qcom,cpr-closed-loop-voltage-fuse-adjustment =
+					<100000 100000 100000 100000>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,allow-quotient-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+				qcom,cpr-aging-max-voltage-adjustment = <15000>;
+				qcom,cpr-aging-ref-corner = <14>;
+				qcom,cpr-aging-ro-scaling-factor = <1620>;
+				qcom,allow-aging-voltage-adjustment =
+					/* Speed bin 0 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 1 */
+					<0 1 1 1 1 1 1 1>;
+				qcom,allow-aging-open-loop-voltage-adjustment =
+					<1>;
+			};
+		};
+	};
+
+	apc1_cpr: cprh-ctrl@17db0000 {
+		compatible = "qcom,cprh-sdm845-v2-kbss-regulator";
+		reg =	<0x17db0000 0x4000>,
+			<0x00784000 0x1000>,
+			<0x17830000 0x1000>;
+		reg-names = "cpr_ctrl", "fuse_base", "saw";
+		clocks = <&clock_gcc GCC_CPUSS_RBCPR_CLK>;
+		clock-names = "core_clk";
+		qcom,cpr-ctrl-name = "apc1";
+		qcom,cpr-controller-id = <1>;
+
+		qcom,cpr-sensor-time = <1000>;
+		qcom,cpr-loop-time = <5000000>;
+		qcom,cpr-idle-cycles = <15>;
+		qcom,cpr-up-down-delay-time = <3000>;
+		qcom,cpr-step-quot-init-min = <9>;
+		qcom,cpr-step-quot-init-max = <14>;
+		qcom,cpr-count-mode = <0>;		/* All at once */
+		qcom,cpr-count-repeat = <20>;
+		qcom,cpr-down-error-step-limit = <1>;
+		qcom,cpr-up-error-step-limit = <1>;
+		qcom,cpr-corner-switch-delay-time = <1042>;
+		qcom,cpr-voltage-settling-time = <1760>;
+		qcom,cpr-reset-step-quot-loop-en;
+
+		qcom,apm-threshold-voltage = <800000>;
+		qcom,apm-crossover-voltage = <880000>;
+		qcom,mem-acc-threshold-voltage = <852000>;
+		qcom,mem-acc-crossover-voltage = <852000>;
+
+		qcom,voltage-step = <4000>;
+		qcom,voltage-base = <352000>;
+		qcom,cpr-saw-use-unit-mV;
+
+		qcom,saw-avs-ctrl = <0x101C031>;
+		qcom,saw-avs-limit = <0x4700470>;
+
+		qcom,cpr-enable;
+		qcom,cpr-hw-closed-loop;
+
+		qcom,cpr-panic-reg-addr-list =
+			<0x17db3a84 0x17830c18>;
+		qcom,cpr-panic-reg-name-list =
+			"APSS_GOLD_CPRH_STATUS_0", "GOLD_SAW4_PMIC_STS";
+
+		qcom,cpr-aging-ref-voltage = <1136000>;
+		vdd-supply = <&pm8998_s12>;
+
+		thread@0 {
+			qcom,cpr-thread-id = <0>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <0>;
+			qcom,cpr-up-threshold = <2>;
+			qcom,cpr-down-threshold = <2>;
+
+			apc1_perfcl_vreg: regulator {
+				regulator-name = "apc1_perfcl_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <33>;
+
+				qcom,cpr-fuse-corners = <5>;
+				qcom,cpr-fuse-combos = <16>;
+				qcom,cpr-speed-bins = <2>;
+				qcom,cpr-speed-bin-corners = <28 31>;
+				qcom,cpr-corners =
+					/* Speed bin 0 */
+					<28 28 28 28 28 28 28 28>,
+					/* Speed bin 1 */
+					<31 31 31 31 31 31 31 31>;
+
+				qcom,cpr-corner-fmax-map =
+					/* Speed bin 0 */
+					<7 14 22 27 28>,
+					/* Speed bin 1 */
+					<7 14 22 27 31>;
+
+				qcom,cpr-voltage-ceiling =
+					/* Speed bin 0 */
+					<828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  828000  884000  884000
+					 884000  884000 1104000 1104000 1104000
+					1104000 1136000 1136000>,
+					/* Speed bin 1 */
+					<828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  828000  884000  884000
+					 884000  884000 1104000 1104000 1104000
+					1104000 1136000 1136000 1136000 1136000
+					1136000>;
+
+				qcom,cpr-voltage-floor =
+					/* Speed bin 0 */
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000>,
+					/* Speed bin 1 */
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+					/* Speed bin 0 */
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000>,
+					/* Speed bin 1 */
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  40000  40000  40000
+					 40000>;
+
+				qcom,corner-frequencies =
+					/* Speed bin 0 */
+					<300000000  403200000  480000000
+					 576000000  652800000  748800000
+					 825600000  902400000  979200000
+					1056000000 1132800000 1209600000
+					1286400000 1363200000 1459200000
+					1536000000 1612800000 1689600000
+					1766400000 1843200000 1920000000
+					1996800000 2092800000 2169600000
+					2246400000 2323200000 2400000000
+					2400000000>,
+					/* Speed bin 1 */
+					<300000000  403200000  480000000
+					 576000000  652800000  748800000
+					 825600000  902400000  979200000
+					1056000000 1132800000 1209600000
+					1286400000 1363200000 1459200000
+					1536000000 1612800000 1689600000
+					1766400000 1843200000 1920000000
+					1996800000 2092800000 2169600000
+					2246400000 2323200000 2400000000
+					2476800000 2553600000 2630400000
+					2707200000>;
+
+				qcom,cpr-ro-scaling-factor =
+					<2857 3056 2828 2952 2699 2796 2447
+					 2631 2630 2579 2244 3343 3287 3137
+					 3164 2656>,
+					<2857 3056 2828 2952 2699 2796 2447
+					 2631 2630 2579 2244 3343 3287 3137
+					 3164 2656>,
+					<2086 2208 2273 2408 2203 2327 2213
+					 2340 1755 2039 2049 2474 2437 2618
+					 2003 1675>,
+					<2086 2208 2273 2408 2203 2327 2213
+					 2340 1755 2039 2049 2474 2437 2618
+					 2003 1675>,
+					<2086 2208 2273 2408 2203 2327 2213
+					 2340 1755 2039 2049 2474 2437 2618
+					 2003 1675>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<100000 100000 100000 100000 100000>;
+
+				qcom,cpr-closed-loop-voltage-fuse-adjustment =
+					<100000 100000 100000 100000 100000>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,allow-quotient-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+				qcom,cpr-aging-max-voltage-adjustment = <15000>;
+				qcom,cpr-aging-ref-corner = <27 31>;
+				qcom,cpr-aging-ro-scaling-factor = <1700>;
+				qcom,allow-aging-voltage-adjustment =
+					/* Speed bin 0 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 1 */
+					<0 1 1 1 1 1 1 1>;
+				qcom,allow-aging-open-loop-voltage-adjustment =
+					<1>;
+			};
+		};
+	};
+};
+
+&clock_cpucc {
+	vdd-l3-supply = <&apc0_l3_vreg>;
+	vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
+};
+
 &clock_gcc {
 	compatible = "qcom,gcc-sdm845-v2";
 };
@@ -37,3 +458,9 @@
 	qcom,allowed-clock-rates = <100000000 200000000 330000000
 		404000000 444000000 533000000>;
 };
+
+&spss_utils {
+	qcom,spss-dev-firmware-name  = "spss2d";	/* 8 chars max */
+	qcom,spss-test-firmware-name = "spss2t";	/* 8 chars max */
+	qcom,spss-prod-firmware-name = "spss2p";	/* 8 chars max */
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 11d553d..f408719 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -960,7 +960,6 @@
 		clock-names = "devfreq_clk";
 		clocks = <&clock_cpucc L3_CLUSTER0_VOTE_CLK>;
 		governor = "performance";
-		qcom,prepare-clk;
 	};
 
 	l3_cpu4: qcom,l3-cpu4 {
@@ -968,7 +967,6 @@
 		clock-names = "devfreq_clk";
 		clocks = <&clock_cpucc L3_CLUSTER1_VOTE_CLK>;
 		governor = "performance";
-		qcom,prepare-clk;
 	};
 
 	devfreq_l3lat_0: qcom,cpu0-l3lat-mon {
@@ -1676,13 +1674,18 @@
 			<81 512 1338562 4096000>,
 			<1 608 1338562 4096000>;
 		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
-			100000000 200000000 4294967295>;
+			100750000 200000000 4294967295>;
 
 		qcom,sdr104-wa;
 
 		qcom,restore-after-cx-collapse;
 
-		qcom,devfreq,freq-table = <50000000 200000000>;
+		qcom,clk-rates = <400000 20000000 25000000
+					50000000 100000000 201500000>;
+		qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50",
+				      "SDR104";
+
+		qcom,devfreq,freq-table = <50000000 201500000>;
 		clocks = <&clock_gcc GCC_SDCC2_AHB_CLK>,
 			<&clock_gcc GCC_SDCC2_APPS_CLK>;
 		clock-names = "iface_clk", "core_clk";
@@ -2700,6 +2703,9 @@
 			 <&clock_gcc GCC_CE1_AXI_CLK>;
 		qcom,ce-opp-freq = <171430000>;
 		qcom,request-bw-before-clk;
+		qcom,smmu-s1-bypass;
+		iommus = <&apps_smmu 0x702 0x1>,
+			 <&apps_smmu 0x712 0x1>;
 	};
 
 	qcom_crypto: qcrypto@1de0000 {
@@ -2734,6 +2740,9 @@
 		qcom,use-sw-ahash-algo;
 		qcom,use-sw-aead-algo;
 		qcom,use-sw-hmac-algo;
+		qcom,smmu-s1-bypass;
+		iommus = <&apps_smmu 0x704 0x3>,
+			 <&apps_smmu 0x714 0x3>;
 	};
 
 	qcom,msm_gsi {
@@ -3614,6 +3623,9 @@
 			&tsif0_sync_active
 			&tsif1_signals_active
 			&tsif1_sync_active>;		/* dual-tsif-mode2 */
+
+		qcom,smmu-s1-bypass;
+		iommus = <&apps_smmu 0x20 0x0f>;
 	};
 };
 
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index b8f115b..f5c62aa 100644
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -512,6 +512,7 @@
 CONFIG_QCOM_DCC_V2=y
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
 CONFIG_QCOMCCI_HWMON=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 5872f88..f1dcb9d 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -403,6 +403,7 @@
 CONFIG_USB_PD_POLICY=y
 CONFIG_QPNP_USB_PDPHY=y
 CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_DUAL_ROLE_USB_INTF=y
 CONFIG_USB_MSM_SSPHY_QMP=y
@@ -513,6 +514,7 @@
 CONFIG_MSM_GLINK_PKT=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
 CONFIG_ICNSS=y
@@ -528,6 +530,7 @@
 CONFIG_APSS_CORE_EA=y
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
 CONFIG_QCOMCCI_HWMON=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 82455e5..3aefe13 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -407,6 +407,7 @@
 CONFIG_USB_PD_POLICY=y
 CONFIG_QPNP_USB_PDPHY=y
 CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_DUAL_ROLE_USB_INTF=y
 CONFIG_USB_MSM_SSPHY_QMP=y
@@ -530,6 +531,7 @@
 CONFIG_MSM_GLINK_PKT=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
 CONFIG_ICNSS=y
@@ -547,6 +549,7 @@
 CONFIG_QCOM_DCC_V2=y
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
 CONFIG_QCOMCCI_HWMON=y
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index a55384f..afa23b0 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -113,12 +113,11 @@
 #define ELF_EXEC_PAGESIZE	PAGE_SIZE
 
 /*
- * This is the location that an ET_DYN program is loaded if exec'ed.  Typical
- * use of this is to invoke "./ld.so someprog" to test out a new version of
- * the loader.  We need to make sure that it is out of the way of the program
- * that it will "exec", and that there is sufficient room for the brk.
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
  */
-#define ELF_ET_DYN_BASE	(2 * TASK_SIZE_64 / 3)
+#define ELF_ET_DYN_BASE		0x100000000UL
 
 #ifndef __ASSEMBLY__
 
@@ -169,7 +168,8 @@
 
 #ifdef CONFIG_COMPAT
 
-#define COMPAT_ELF_ET_DYN_BASE		(2 * TASK_SIZE_32 / 3)
+/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
+#define COMPAT_ELF_ET_DYN_BASE		0x000400000UL
 
 /* AArch32 registers. */
 #define COMPAT_ELF_NGREG		18
diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h
index de781cf..da80878 100644
--- a/arch/mips/include/asm/branch.h
+++ b/arch/mips/include/asm/branch.h
@@ -74,10 +74,7 @@
 			return __microMIPS_compute_return_epc(regs);
 		if (cpu_has_mips16)
 			return __MIPS16e_compute_return_epc(regs);
-		return regs->cp0_epc;
-	}
-
-	if (!delay_slot(regs)) {
+	} else if (!delay_slot(regs)) {
 		regs->cp0_epc += 4;
 		return 0;
 	}
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index c86b66b..c3f2fb3 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -399,7 +399,7 @@
  *
  * @regs:	Pointer to pt_regs
  * @insn:	branch instruction to decode
- * @returns:	-EFAULT on error and forces SIGBUS, and on success
+ * @returns:	-EFAULT on error and forces SIGILL, and on success
  *		returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
  *		evaluating the branch.
  *
@@ -431,7 +431,7 @@
 			/* Fall through */
 		case jr_op:
 			if (NO_R6EMU && insn.r_format.func == jr_op)
-				goto sigill_r6;
+				goto sigill_r2r6;
 			regs->cp0_epc = regs->regs[insn.r_format.rs];
 			break;
 		}
@@ -446,7 +446,7 @@
 		switch (insn.i_format.rt) {
 		case bltzl_op:
 			if (NO_R6EMU)
-				goto sigill_r6;
+				goto sigill_r2r6;
 		case bltz_op:
 			if ((long)regs->regs[insn.i_format.rs] < 0) {
 				epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -459,7 +459,7 @@
 
 		case bgezl_op:
 			if (NO_R6EMU)
-				goto sigill_r6;
+				goto sigill_r2r6;
 		case bgez_op:
 			if ((long)regs->regs[insn.i_format.rs] >= 0) {
 				epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -473,10 +473,8 @@
 		case bltzal_op:
 		case bltzall_op:
 			if (NO_R6EMU && (insn.i_format.rs ||
-			    insn.i_format.rt == bltzall_op)) {
-				ret = -SIGILL;
-				break;
-			}
+			    insn.i_format.rt == bltzall_op))
+				goto sigill_r2r6;
 			regs->regs[31] = epc + 8;
 			/*
 			 * OK we are here either because we hit a NAL
@@ -507,10 +505,8 @@
 		case bgezal_op:
 		case bgezall_op:
 			if (NO_R6EMU && (insn.i_format.rs ||
-			    insn.i_format.rt == bgezall_op)) {
-				ret = -SIGILL;
-				break;
-			}
+			    insn.i_format.rt == bgezall_op))
+				goto sigill_r2r6;
 			regs->regs[31] = epc + 8;
 			/*
 			 * OK we are here either because we hit a BAL
@@ -556,6 +552,7 @@
 	/*
 	 * These are unconditional and in j_format.
 	 */
+	case jalx_op:
 	case jal_op:
 		regs->regs[31] = regs->cp0_epc + 8;
 	case j_op:
@@ -573,7 +570,7 @@
 	 */
 	case beql_op:
 		if (NO_R6EMU)
-			goto sigill_r6;
+			goto sigill_r2r6;
 	case beq_op:
 		if (regs->regs[insn.i_format.rs] ==
 		    regs->regs[insn.i_format.rt]) {
@@ -587,7 +584,7 @@
 
 	case bnel_op:
 		if (NO_R6EMU)
-			goto sigill_r6;
+			goto sigill_r2r6;
 	case bne_op:
 		if (regs->regs[insn.i_format.rs] !=
 		    regs->regs[insn.i_format.rt]) {
@@ -601,7 +598,7 @@
 
 	case blezl_op: /* not really i_format */
 		if (!insn.i_format.rt && NO_R6EMU)
-			goto sigill_r6;
+			goto sigill_r2r6;
 	case blez_op:
 		/*
 		 * Compact branches for R6 for the
@@ -636,7 +633,7 @@
 
 	case bgtzl_op:
 		if (!insn.i_format.rt && NO_R6EMU)
-			goto sigill_r6;
+			goto sigill_r2r6;
 	case bgtz_op:
 		/*
 		 * Compact branches for R6 for the
@@ -774,35 +771,27 @@
 #else
 	case bc6_op:
 		/* Only valid for MIPS R6 */
-		if (!cpu_has_mips_r6) {
-			ret = -SIGILL;
-			break;
-		}
+		if (!cpu_has_mips_r6)
+			goto sigill_r6;
 		regs->cp0_epc += 8;
 		break;
 	case balc6_op:
-		if (!cpu_has_mips_r6) {
-			ret = -SIGILL;
-			break;
-		}
+		if (!cpu_has_mips_r6)
+			goto sigill_r6;
 		/* Compact branch: BALC */
 		regs->regs[31] = epc + 4;
 		epc += 4 + (insn.i_format.simmediate << 2);
 		regs->cp0_epc = epc;
 		break;
 	case pop66_op:
-		if (!cpu_has_mips_r6) {
-			ret = -SIGILL;
-			break;
-		}
+		if (!cpu_has_mips_r6)
+			goto sigill_r6;
 		/* Compact branch: BEQZC || JIC */
 		regs->cp0_epc += 8;
 		break;
 	case pop76_op:
-		if (!cpu_has_mips_r6) {
-			ret = -SIGILL;
-			break;
-		}
+		if (!cpu_has_mips_r6)
+			goto sigill_r6;
 		/* Compact branch: BNEZC || JIALC */
 		if (!insn.i_format.rs) {
 			/* JIALC: set $31/ra */
@@ -814,10 +803,8 @@
 	case pop10_op:
 	case pop30_op:
 		/* Only valid for MIPS R6 */
-		if (!cpu_has_mips_r6) {
-			ret = -SIGILL;
-			break;
-		}
+		if (!cpu_has_mips_r6)
+			goto sigill_r6;
 		/*
 		 * Compact branches:
 		 * bovc, beqc, beqzalc, bnvc, bnec, bnezlac
@@ -831,11 +818,17 @@
 	return ret;
 
 sigill_dsp:
-	printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
-	force_sig(SIGBUS, current);
+	pr_info("%s: DSP branch but not DSP ASE - sending SIGILL.\n",
+		current->comm);
+	force_sig(SIGILL, current);
+	return -EFAULT;
+sigill_r2r6:
+	pr_info("%s: R2 branch but r2-to-r6 emulator is not present - sending SIGILL.\n",
+		current->comm);
+	force_sig(SIGILL, current);
 	return -EFAULT;
 sigill_r6:
-	pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n",
+	pr_info("%s: R6 branch but no MIPSr6 ISA support - sending SIGILL.\n",
 		current->comm);
 	force_sig(SIGILL, current);
 	return -EFAULT;
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 4eff2ae..4c01ee5 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -83,7 +83,7 @@
 	}
 
 	seq_printf(m, "isa\t\t\t:"); 
-	if (cpu_has_mips_r1)
+	if (cpu_has_mips_1)
 		seq_printf(m, " mips1");
 	if (cpu_has_mips_2)
 		seq_printf(m, "%s", " mips2");
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index bf83dc1..3de0260 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -924,7 +924,7 @@
 	audit_syscall_exit(regs);
 
 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
-		trace_sys_exit(regs, regs->regs[2]);
+		trace_sys_exit(regs, regs_return_value(regs));
 
 	if (test_thread_flag(TIF_SYSCALL_TRACE))
 		tracehook_report_syscall_exit(regs, 0);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index c29d397..e6be1f62 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -371,7 +371,7 @@
 	PTR	sys_writev
 	PTR	sys_cacheflush
 	PTR	sys_cachectl
-	PTR	sys_sysmips
+	PTR	__sys_sysmips
 	PTR	sys_ni_syscall			/* 4150 */
 	PTR	sys_getsid
 	PTR	sys_fdatasync
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 0687f96..aa27daf 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -311,7 +311,7 @@
 	PTR	sys_sched_getaffinity
 	PTR	sys_cacheflush
 	PTR	sys_cachectl
-	PTR	sys_sysmips
+	PTR	__sys_sysmips
 	PTR	sys_io_setup			/* 5200 */
 	PTR	sys_io_destroy
 	PTR	sys_io_getevents
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 0331ba3..37f608f 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -302,7 +302,7 @@
 	PTR	compat_sys_sched_getaffinity
 	PTR	sys_cacheflush
 	PTR	sys_cachectl
-	PTR	sys_sysmips
+	PTR	__sys_sysmips
 	PTR	compat_sys_io_setup			/* 6200 */
 	PTR	sys_io_destroy
 	PTR	compat_sys_io_getevents
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 5a47042..7913a5c 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -371,7 +371,7 @@
 	PTR	compat_sys_writev
 	PTR	sys_cacheflush
 	PTR	sys_cachectl
-	PTR	sys_sysmips
+	PTR	__sys_sysmips
 	PTR	sys_ni_syscall			/* 4150 */
 	PTR	sys_getsid
 	PTR	sys_fdatasync
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 53a7ef9..4234b2d 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -28,6 +28,7 @@
 #include <linux/elf.h>
 
 #include <asm/asm.h>
+#include <asm/asm-eva.h>
 #include <asm/branch.h>
 #include <asm/cachectl.h>
 #include <asm/cacheflush.h>
@@ -138,10 +139,12 @@
 		__asm__ __volatile__ (
 		"	.set	"MIPS_ISA_ARCH_LEVEL"			\n"
 		"	li	%[err], 0				\n"
-		"1:	ll	%[old], (%[addr])			\n"
+		"1:							\n"
+		user_ll("%[old]", "(%[addr])")
 		"	move	%[tmp], %[new]				\n"
-		"2:	sc	%[tmp], (%[addr])			\n"
-		"	bnez	%[tmp], 4f				\n"
+		"2:							\n"
+		user_sc("%[tmp]", "(%[addr])")
+		"	beqz	%[tmp], 4f				\n"
 		"3:							\n"
 		"	.insn						\n"
 		"	.subsection 2					\n"
@@ -199,6 +202,12 @@
 	unreachable();
 }
 
+/*
+ * mips_atomic_set() normally returns directly via syscall_exit potentially
+ * clobbering static registers, so be sure to preserve them.
+ */
+save_static_function(sys_sysmips);
+
 SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2)
 {
 	switch (cmd) {
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index f8b7bf8..e9385bc 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -2522,6 +2522,35 @@
 	return 0;
 }
 
+/*
+ * Emulate FPU instructions.
+ *
+ * If we use FPU hardware, then we have been typically called to handle
+ * an unimplemented operation, such as where an operand is a NaN or
+ * denormalized.  In that case exit the emulation loop after a single
+ * iteration so as to let hardware execute any subsequent instructions.
+ *
+ * If we have no FPU hardware or it has been disabled, then continue
+ * emulating floating-point instructions until one of these conditions
+ * has occurred:
+ *
+ * - a non-FPU instruction has been encountered,
+ *
+ * - an attempt to emulate has ended with a signal,
+ *
+ * - the ISA mode has been switched.
+ *
+ * We need to terminate the emulation loop if we got switched to the
+ * MIPS16 mode, whether supported or not, so that we do not attempt
+ * to emulate a MIPS16 instruction as a regular MIPS FPU instruction.
+ * Similarly if we got switched to the microMIPS mode and only the
+ * regular MIPS mode is supported, so that we do not attempt to emulate
+ * a microMIPS instruction as a regular MIPS FPU instruction.  Or if
+ * we got switched to the regular MIPS mode and only the microMIPS mode
+ * is supported, so that we do not attempt to emulate a regular MIPS
+ * instruction that should cause an Address Error exception instead.
+ * For simplicity we always terminate upon an ISA mode switch.
+ */
 int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
 	int has_fpu, void *__user *fault_addr)
 {
@@ -2607,6 +2636,15 @@
 			break;
 		if (sig)
 			break;
+		/*
+		 * We have to check for the ISA bit explicitly here,
+		 * because `get_isa16_mode' may return 0 if support
+		 * for code compression has been globally disabled,
+		 * or otherwise we may produce the wrong signal or
+		 * even proceed successfully where we must not.
+		 */
+		if ((xcp->cp0_epc ^ prevepc) & 0x1)
+			break;
 
 		cond_resched();
 	} while (xcp->cp0_epc > prevepc);
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index 16e0246..cb7697d 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -20,6 +20,8 @@
 ** flush/purge and allocate "regular" cacheable pages for everything.
 */
 
+#define DMA_ERROR_CODE	(~(dma_addr_t)0)
+
 #ifdef CONFIG_PA11
 extern struct dma_map_ops pcxl_dma_ops;
 extern struct dma_map_ops pcx_dma_ops;
@@ -54,12 +56,13 @@
 			break;
 		}
 	}
-	BUG_ON(!dev->platform_data);
 	return dev->platform_data;
 }
-		
-#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu)
-	
+
+#define GET_IOC(dev) ({					\
+	void *__pdata = parisc_walk_tree(dev);		\
+	__pdata ? HBA_DATA(__pdata)->iommu : NULL;	\
+})
 
 #ifdef CONFIG_IOMMU_CCIO
 struct parisc_device;
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h
index 59be257..a812262 100644
--- a/arch/parisc/include/asm/mmu_context.h
+++ b/arch/parisc/include/asm/mmu_context.h
@@ -49,15 +49,26 @@
 	mtctl(__space_to_prot(context), 8);
 }
 
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
+static inline void switch_mm_irqs_off(struct mm_struct *prev,
+		struct mm_struct *next, struct task_struct *tsk)
 {
-
 	if (prev != next) {
 		mtctl(__pa(next->pgd), 25);
 		load_context(next->context);
 	}
 }
 
+static inline void switch_mm(struct mm_struct *prev,
+		struct mm_struct *next, struct task_struct *tsk)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	switch_mm_irqs_off(prev, next, tsk);
+	local_irq_restore(flags);
+}
+#define switch_mm_irqs_off switch_mm_irqs_off
+
 #define deactivate_mm(tsk,mm)	do { } while (0)
 
 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 3cfef1d..8ec2ff8 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -361,7 +361,7 @@
 	ENTRY_SAME(ni_syscall)	/* 263: reserved for vserver */
 	ENTRY_SAME(add_key)
 	ENTRY_SAME(request_key)		/* 265 */
-	ENTRY_SAME(keyctl)
+	ENTRY_COMP(keyctl)
 	ENTRY_SAME(ioprio_set)
 	ENTRY_SAME(ioprio_get)
 	ENTRY_SAME(inotify_init)
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 040c48f..b6f3b5e 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -366,7 +366,7 @@
 		case 15:	/* Data TLB miss fault/Data page fault */
 			/* send SIGSEGV when outside of vma */
 			if (!vma ||
-			    address < vma->vm_start || address > vma->vm_end) {
+			    address < vma->vm_start || address >= vma->vm_end) {
 				si.si_signo = SIGSEGV;
 				si.si_code = SEGV_MAPERR;
 				break;
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 2b90335..a2cc801 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -560,7 +560,7 @@
  * Atomically increments @v by 1, so long as @v is non-zero.
  * Returns non-zero if @v was non-zero, and zero otherwise.
  */
-static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
+static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
 {
 	long t1, t2;
 
@@ -579,7 +579,7 @@
 	: "r" (&v->counter)
 	: "cc", "xer", "memory");
 
-	return t1;
+	return t1 != 0;
 }
 
 #endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index ee46ffe..743ad7a 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -23,12 +23,13 @@
 #define CORE_DUMP_USE_REGSET
 #define ELF_EXEC_PAGESIZE	PAGE_SIZE
 
-/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
-   use of this is to invoke "./ld.so someprog" to test out a new version of
-   the loader.  We need to make sure that it is out of the way of the program
-   that it will "exec", and that there is sufficient room for the brk.  */
-
-#define ELF_ET_DYN_BASE	0x20000000
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE		(is_32bit_task() ? 0x000400000UL : \
+						   0x100000000UL)
 
 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
 
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index e7d9eca..ceb168c 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1283,7 +1283,7 @@
 				"	.llong 0\n"			\
 				".previous"				\
 			: "=r" (rval) \
-			: "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL)); \
+			: "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \
 			rval;})
 #else
 #define mftb()		({unsigned long rval;	\
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 6ca3b90..776c1a1 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -687,8 +687,10 @@
 	case 19:
 		switch ((instr >> 1) & 0x3ff) {
 		case 0:		/* mcrf */
-			rd = (instr >> 21) & 0x1c;
-			ra = (instr >> 16) & 0x1c;
+			rd = 7 - ((instr >> 23) & 0x7);
+			ra = 7 - ((instr >> 18) & 0x7);
+			rd *= 4;
+			ra *= 4;
 			val = (regs->ccr >> ra) & 0xf;
 			regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
 			goto instr_done;
@@ -968,6 +970,19 @@
 #endif
 
 		case 19:	/* mfcr */
+			if ((instr >> 20) & 1) {
+				imm = 0xf0000000UL;
+				for (sh = 0; sh < 8; ++sh) {
+					if (instr & (0x80000 >> sh)) {
+						regs->gpr[rd] = regs->ccr & imm;
+						break;
+					}
+					imm >>= 4;
+				}
+
+				goto instr_done;
+			}
+
 			regs->gpr[rd] = regs->ccr;
 			regs->gpr[rd] &= 0xffffffffUL;
 			goto instr_done;
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 73bf6e1..a006f82 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -167,9 +167,15 @@
 	mm->context.cop_lockp = NULL;
 #endif /* CONFIG_PPC_ICSWX */
 
-	if (radix_enabled())
-		process_tb[mm->context.id].prtb1 = 0;
-	else
+	if (radix_enabled()) {
+		/*
+		 * Radix doesn't have a valid bit in the process table
+		 * entries. However we know that at least P9 implementation
+		 * will avoid caching an entry with an invalid RTS field,
+		 * and 0 is invalid. So this will do.
+		 */
+		process_tb[mm->context.id].prtb0 = 0;
+	} else
 		subpage_prot_free(mm);
 	destroy_pagetable_page(mm);
 	__destroy_context(mm->context.id);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index f2c98f6..a7bb872 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -279,7 +279,7 @@
 				       int ssize, unsigned long inv_flags)
 {
 	unsigned long lpar_rc;
-	unsigned long flags = (newpp & 7) | H_AVPN;
+	unsigned long flags;
 	unsigned long want_v;
 
 	want_v = hpte_encode_avpn(vpn, psize, ssize);
@@ -287,6 +287,11 @@
 	pr_devel("    update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
 		 want_v, slot, flags, psize);
 
+	flags = (newpp & 7) | H_AVPN;
+	if (mmu_has_feature(MMU_FTR_KERNEL_RO))
+		/* Move pp0 into bit 8 (IBM 55) */
+		flags |= (newpp & HPTE_R_PP0) >> 55;
+
 	lpar_rc = plpar_pte_protect(flags, slot, want_v);
 
 	if (lpar_rc == H_NOT_FOUND) {
@@ -358,6 +363,10 @@
 	BUG_ON(slot == -1);
 
 	flags = newpp & 7;
+	if (mmu_has_feature(MMU_FTR_KERNEL_RO))
+		/* Move pp0 into bit 8 (IBM 55) */
+		flags |= (newpp & HPTE_R_PP0) >> 55;
+
 	lpar_rc = plpar_pte_protect(flags, slot, 0);
 
 	BUG_ON(lpar_rc != H_SUCCESS);
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 1736c7d..8d665f1 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -158,14 +158,13 @@
 #define CORE_DUMP_USE_REGSET
 #define ELF_EXEC_PAGESIZE	4096
 
-/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
-   use of this is to invoke "./ld.so someprog" to test out a new version of
-   the loader.  We need to make sure that it is out of the way of the program
-   that it will "exec", and that there is sufficient room for the brk. 64-bit
-   tasks are aligned to 4GB. */
-#define ELF_ET_DYN_BASE (is_compat_task() ? \
-				(STACK_TOP / 3 * 2) : \
-				(STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE		(is_compat_task() ? 0x000400000UL : \
+						    0x100000000UL)
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports. */
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 6ba0bf9..6bc941b 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -64,6 +64,12 @@
 {
 	unsigned long mask = -1UL;
 
+	/*
+	 * No arguments for this syscall, there's nothing to do.
+	 */
+	if (!n)
+		return;
+
 	BUG_ON(i + n > 6);
 #ifdef CONFIG_COMPAT
 	if (test_tsk_thread_flag(task, TIF_31BIT))
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index fc61739..f960a04 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -201,7 +201,7 @@
 
 static bool avx2_usable(void)
 {
-	if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
+	if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
 		&& boot_cpu_has(X86_FEATURE_BMI1)
 		&& boot_cpu_has(X86_FEATURE_BMI2))
 		return true;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 94aad63..c152db2 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -245,12 +245,13 @@
 #define CORE_DUMP_USE_REGSET
 #define ELF_EXEC_PAGESIZE	4096
 
-/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
-   use of this is to invoke "./ld.so someprog" to test out a new version of
-   the loader.  We need to make sure that it is out of the way of the program
-   that it will "exec", and that there is sufficient room for the brk.  */
-
-#define ELF_ET_DYN_BASE		(TASK_SIZE / 3 * 2)
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE		(mmap_is_ia32() ? 0x000400000UL : \
+						  0x100000000UL)
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports.  This could be done in user space,
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 78f3760..b601dda 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -405,6 +405,8 @@
 #define MSR_IA32_TSC_ADJUST             0x0000003b
 #define MSR_IA32_BNDCFGS		0x00000d90
 
+#define MSR_IA32_BNDCFGS_RSVD		0x00000ffc
+
 #define MSR_IA32_XSS			0x00000da0
 
 #define FEATURE_CONTROL_LOCKED				(1<<0)
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index a12a047..8b678af 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -43,6 +43,7 @@
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
+#include <asm/smap.h>
 
 #include <xen/interface/xen.h>
 #include <xen/interface/sched.h>
@@ -214,10 +215,12 @@
 	__HYPERCALL_DECLS;
 	__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
 
+	stac();
 	asm volatile("call *%[call]"
 		     : __HYPERCALL_5PARAM
 		     : [call] "a" (&hypercall_page[call])
 		     : __HYPERCALL_CLOBBER5);
+	clac();
 
 	return (long)__res;
 }
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 931ced8..d3e0d04 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -338,6 +338,14 @@
 	struct mpc_intsrc mp_irq;
 
 	/*
+	 * Check bus_irq boundary.
+	 */
+	if (bus_irq >= NR_IRQS_LEGACY) {
+		pr_warn("Invalid bus_irq %u for legacy override\n", bus_irq);
+		return;
+	}
+
+	/*
 	 * Convert 'gsi' to 'ioapic.pin'.
 	 */
 	ioapic = mp_find_ioapic(gsi);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 7249f15..cf89928 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2116,7 +2116,7 @@
 			int idx;
 			idx = find_irq_entry(apic1, pin1, mp_INT);
 			if (idx != -1 && irq_trigger(idx))
-				unmask_ioapic_irq(irq_get_chip_data(0));
+				unmask_ioapic_irq(irq_get_irq_data(0));
 		}
 		irq_domain_deactivate_irq(irq_data);
 		irq_domain_activate_irq(irq_data);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 35058c2..9368fec 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -144,6 +144,14 @@
 	return best && (best->ebx & bit(X86_FEATURE_RTM));
 }
 
+static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpuid_entry2 *best;
+
+	best = kvm_find_cpuid_entry(vcpu, 7, 0);
+	return best && (best->ebx & bit(X86_FEATURE_MPX));
+}
+
 static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
 {
 	struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 04e6bbb..3dc6d80 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2987,7 +2987,8 @@
 		msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
 		break;
 	case MSR_IA32_BNDCFGS:
-		if (!kvm_mpx_supported())
+		if (!kvm_mpx_supported() ||
+		    (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
 			return 1;
 		msr_info->data = vmcs_read64(GUEST_BNDCFGS);
 		break;
@@ -3069,7 +3070,11 @@
 		vmcs_writel(GUEST_SYSENTER_ESP, data);
 		break;
 	case MSR_IA32_BNDCFGS:
-		if (!kvm_mpx_supported())
+		if (!kvm_mpx_supported() ||
+		    (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
+			return 1;
+		if (is_noncanonical_address(data & PAGE_MASK) ||
+		    (data & MSR_IA32_BNDCFGS_RSVD))
 			return 1;
 		vmcs_write64(GUEST_BNDCFGS, data);
 		break;
@@ -6474,7 +6479,6 @@
 	vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
 	vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
 	vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
-	vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
 
 	memcpy(vmx_msr_bitmap_legacy_x2apic,
 			vmx_msr_bitmap_legacy, PAGE_SIZE);
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 6d52b94..20fa7c8 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -571,3 +571,35 @@
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
+
+/*
+ * Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff]
+ *
+ * Using the [mem 0x7fa00000-0x7fbfffff] region, e.g., by assigning it to
+ * the 00:1c.0 Root Port, causes a conflict with [io 0x1804], which is used
+ * for soft poweroff and suspend-to-RAM.
+ *
+ * As far as we know, this is related to the address space, not to the Root
+ * Port itself.  Attaching the quirk to the Root Port is a convenience, but
+ * it could probably also be a standalone DMI quirk.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=103211
+ */
+static void quirk_apple_mbp_poweroff(struct pci_dev *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+
+	if ((!dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,4") &&
+	     !dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,5")) ||
+	    pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x1c, 0))
+		return;
+
+	res = request_mem_region(0x7fa00000, 0x200000,
+				 "MacBook Pro poweroff workaround");
+	if (res)
+		dev_info(dev, "claimed %s %pR\n", res->name, res);
+	else
+		dev_info(dev, "can't work around MacBook Pro poweroff issue\n");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 22ca892..79152db 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -147,7 +147,7 @@
 module_param(ec_storm_threshold, uint, 0644);
 MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
 
-static bool ec_freeze_events __read_mostly = true;
+static bool ec_freeze_events __read_mostly = false;
 module_param(ec_freeze_events, bool, 0644);
 MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
 
@@ -1865,24 +1865,6 @@
 }
 
 #ifdef CONFIG_PM_SLEEP
-static int acpi_ec_suspend_noirq(struct device *dev)
-{
-	struct acpi_ec *ec =
-		acpi_driver_data(to_acpi_device(dev));
-
-	acpi_ec_enter_noirq(ec);
-	return 0;
-}
-
-static int acpi_ec_resume_noirq(struct device *dev)
-{
-	struct acpi_ec *ec =
-		acpi_driver_data(to_acpi_device(dev));
-
-	acpi_ec_leave_noirq(ec);
-	return 0;
-}
-
 static int acpi_ec_suspend(struct device *dev)
 {
 	struct acpi_ec *ec =
@@ -1904,7 +1886,6 @@
 #endif
 
 static const struct dev_pm_ops acpi_ec_pm = {
-	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
 	SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
 };
 
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 9ef3941..f3bc901 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2945,6 +2945,8 @@
 
 static __init int nfit_init(void)
 {
+	int ret;
+
 	BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
 	BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
 	BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
@@ -2972,8 +2974,14 @@
 		return -ENOMEM;
 
 	nfit_mce_register();
+	ret = acpi_bus_register_driver(&acpi_nfit_driver);
+	if (ret) {
+		nfit_mce_unregister();
+		destroy_workqueue(nfit_wq);
+	}
 
-	return acpi_bus_register_driver(&acpi_nfit_driver);
+	return ret;
+
 }
 
 static __exit void nfit_exit(void)
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index b351c85..632c814 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -28,10 +28,10 @@
  *    binder_node_lock() and binder_node_unlock() are
  *    used to acq/rel
  * 3) proc->inner_lock : protects the thread and node lists
- *    (proc->threads, proc->nodes) and all todo lists associated
- *    with the binder_proc (proc->todo, thread->todo,
- *    proc->delivered_death and node->async_todo), as well as
- *    thread->transaction_stack
+ *    (proc->threads, proc->waiting_threads, proc->nodes)
+ *    and all todo lists associated with the binder_proc
+ *    (proc->todo, thread->todo, proc->delivered_death and
+ *    node->async_todo), as well as thread->transaction_stack
  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
  *    are used to acq/rel
  *
@@ -352,10 +352,14 @@
  *                        and by @lock)
  * @has_async_transaction: async transaction to node in progress
  *                        (protected by @lock)
+ * @sched_policy:         minimum scheduling policy for node
+ *                        (invariant after initialized)
  * @accept_fds:           file descriptor operations supported for node
  *                        (invariant after initialized)
  * @min_priority:         minimum scheduling priority
  *                        (invariant after initialized)
+ * @inherit_rt:           inherit RT scheduling policy from caller
+ *                        (invariant after initialized)
  * @async_todo:           list of async work items
  *                        (protected by @proc->inner_lock)
  *
@@ -391,6 +395,8 @@
 		/*
 		 * invariant after initialization
 		 */
+		u8 sched_policy:2;
+		u8 inherit_rt:1;
 		u8 accept_fds:1;
 		u8 min_priority;
 	};
@@ -465,6 +471,22 @@
 };
 
 /**
+ * struct binder_priority - scheduler policy and priority
+ * @sched_policy            scheduler policy
+ * @prio                    [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
+ *
+ * The binder driver supports inheriting the following scheduler policies:
+ * SCHED_NORMAL
+ * SCHED_BATCH
+ * SCHED_FIFO
+ * SCHED_RR
+ */
+struct binder_priority {
+	unsigned int sched_policy;
+	int prio;
+};
+
+/**
  * struct binder_proc - binder process bookkeeping
  * @proc_node:            element for binder_procs list
  * @threads:              rbtree of binder_threads in this proc
@@ -476,6 +498,8 @@
  *                        (protected by @outer_lock)
  * @refs_by_node:         rbtree of refs ordered by ref->node
  *                        (protected by @outer_lock)
+ * @waiting_threads:      threads currently waiting for proc work
+ *                        (protected by @inner_lock)
  * @pid                   PID of group_leader of process
  *                        (invariant after initialized)
  * @tsk                   task_struct for group_leader of process
@@ -505,8 +529,6 @@
  *                        (protected by @inner_lock)
  * @requested_threads_started: number binder threads started
  *                        (protected by @inner_lock)
- * @ready_threads:        number of threads waiting for proc work
- *                        (protected by @inner_lock)
  * @tmp_ref:              temporary reference to indicate proc is in use
  *                        (protected by @inner_lock)
  * @default_priority:     default scheduler priority
@@ -527,6 +549,7 @@
 	struct rb_root nodes;
 	struct rb_root refs_by_desc;
 	struct rb_root refs_by_node;
+	struct list_head waiting_threads;
 	int pid;
 	struct task_struct *tsk;
 	struct files_struct *files;
@@ -541,9 +564,8 @@
 	int max_threads;
 	int requested_threads;
 	int requested_threads_started;
-	int ready_threads;
 	int tmp_ref;
-	long default_priority;
+	struct binder_priority default_priority;
 	struct dentry *debugfs_entry;
 	struct binder_alloc alloc;
 	struct binder_context *context;
@@ -557,6 +579,7 @@
 	BINDER_LOOPER_STATE_EXITED      = 0x04,
 	BINDER_LOOPER_STATE_INVALID     = 0x08,
 	BINDER_LOOPER_STATE_WAITING     = 0x10,
+	BINDER_LOOPER_STATE_POLL        = 0x20,
 };
 
 /**
@@ -565,6 +588,8 @@
  *                        (invariant after initialization)
  * @rb_node:              element for proc->threads rbtree
  *                        (protected by @proc->inner_lock)
+ * @waiting_thread_node:  element for @proc->waiting_threads list
+ *                        (protected by @proc->inner_lock)
  * @pid:                  PID for this thread
  *                        (invariant after initialization)
  * @looper:               bitmap of looping state
@@ -588,12 +613,14 @@
  * @is_dead:              thread is dead and awaiting free
  *                        when outstanding transactions are cleaned up
  *                        (protected by @proc->inner_lock)
+ * @task:                 struct task_struct for this thread
  *
  * Bookkeeping structure for binder threads.
  */
 struct binder_thread {
 	struct binder_proc *proc;
 	struct rb_node rb_node;
+	struct list_head waiting_thread_node;
 	int pid;
 	int looper;              /* only modified by this thread */
 	bool looper_need_return; /* can be written by other thread */
@@ -605,6 +632,7 @@
 	struct binder_stats stats;
 	atomic_t tmp_ref;
 	bool is_dead;
+	struct task_struct *task;
 };
 
 struct binder_transaction {
@@ -621,8 +649,9 @@
 	struct binder_buffer *buffer;
 	unsigned int	code;
 	unsigned int	flags;
-	long	priority;
-	long	saved_priority;
+	struct binder_priority	priority;
+	struct binder_priority	saved_priority;
+	bool    set_priority_called;
 	kuid_t	sender_euid;
 	/**
 	 * @lock:  protects @from, @to_proc, and @to_thread
@@ -921,22 +950,271 @@
 	return retval;
 }
 
-static void binder_set_nice(long nice)
+static bool binder_has_work_ilocked(struct binder_thread *thread,
+				    bool do_proc_work)
 {
-	long min_nice;
+	return !binder_worklist_empty_ilocked(&thread->todo) ||
+		thread->looper_need_return ||
+		(do_proc_work &&
+		 !binder_worklist_empty_ilocked(&thread->proc->todo));
+}
 
-	if (can_nice(current, nice)) {
-		set_user_nice(current, nice);
+static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
+{
+	bool has_work;
+
+	binder_inner_proc_lock(thread->proc);
+	has_work = binder_has_work_ilocked(thread, do_proc_work);
+	binder_inner_proc_unlock(thread->proc);
+
+	return has_work;
+}
+
+static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
+{
+	return !thread->transaction_stack &&
+		binder_worklist_empty_ilocked(&thread->todo) &&
+		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
+				   BINDER_LOOPER_STATE_REGISTERED));
+}
+
+static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
+					       bool sync)
+{
+	struct rb_node *n;
+	struct binder_thread *thread;
+
+	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
+		thread = rb_entry(n, struct binder_thread, rb_node);
+		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
+		    binder_available_for_proc_work_ilocked(thread)) {
+			if (sync)
+				wake_up_interruptible_sync(&thread->wait);
+			else
+				wake_up_interruptible(&thread->wait);
+		}
+	}
+}
+
+/**
+ * binder_select_thread_ilocked() - selects a thread for doing proc work.
+ * @proc:	process to select a thread from
+ *
+ * Note that calling this function moves the thread off the waiting_threads
+ * list, so it can only be woken up by the caller of this function, or a
+ * signal. Therefore, callers *should* always wake up the thread this function
+ * returns.
+ *
+ * Return:	If there's a thread currently waiting for process work,
+ *		returns that thread. Otherwise returns NULL.
+ */
+static struct binder_thread *
+binder_select_thread_ilocked(struct binder_proc *proc)
+{
+	struct binder_thread *thread;
+
+	BUG_ON(!spin_is_locked(&proc->inner_lock));
+	thread = list_first_entry_or_null(&proc->waiting_threads,
+					  struct binder_thread,
+					  waiting_thread_node);
+
+	if (thread)
+		list_del_init(&thread->waiting_thread_node);
+
+	return thread;
+}
+
+/**
+ * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
+ * @proc:	process to wake up a thread in
+ * @thread:	specific thread to wake-up (may be NULL)
+ * @sync:	whether to do a synchronous wake-up
+ *
+ * This function wakes up a thread in the @proc process.
+ * The caller may provide a specific thread to wake-up in
+ * the @thread parameter. If @thread is NULL, this function
+ * will wake up threads that have called poll().
+ *
+ * Note that for this function to work as expected, callers
+ * should first call binder_select_thread() to find a thread
+ * to handle the work (if they don't have a thread already),
+ * and pass the result into the @thread parameter.
+ */
+static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
+					 struct binder_thread *thread,
+					 bool sync)
+{
+	BUG_ON(!spin_is_locked(&proc->inner_lock));
+
+	if (thread) {
+		if (sync)
+			wake_up_interruptible_sync(&thread->wait);
+		else
+			wake_up_interruptible(&thread->wait);
 		return;
 	}
-	min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
-	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
-		     "%d: nice value %ld not allowed use %ld instead\n",
-		      current->pid, nice, min_nice);
-	set_user_nice(current, min_nice);
-	if (min_nice <= MAX_NICE)
+
+	/* Didn't find a thread waiting for proc work; this can happen
+	 * in two scenarios:
+	 * 1. All threads are busy handling transactions
+	 *    In that case, one of those threads should call back into
+	 *    the kernel driver soon and pick up this work.
+	 * 2. Threads are using the (e)poll interface, in which case
+	 *    they may be blocked on the waitqueue without having been
+	 *    added to waiting_threads. For this case, we just iterate
+	 *    over all threads not handling transaction work, and
+	 *    wake them all up. We wake all because we don't know whether
+	 *    a thread that called into (e)poll is handling non-binder
+	 *    work currently.
+	 */
+	binder_wakeup_poll_threads_ilocked(proc, sync);
+}
+
+static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
+{
+	struct binder_thread *thread = binder_select_thread_ilocked(proc);
+
+	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
+}
+
+static bool is_rt_policy(int policy)
+{
+	return policy == SCHED_FIFO || policy == SCHED_RR;
+}
+
+static bool is_fair_policy(int policy)
+{
+	return policy == SCHED_NORMAL || policy == SCHED_BATCH;
+}
+
+static bool binder_supported_policy(int policy)
+{
+	return is_fair_policy(policy) || is_rt_policy(policy);
+}
+
+static int to_userspace_prio(int policy, int kernel_priority)
+{
+	if (is_fair_policy(policy))
+		return PRIO_TO_NICE(kernel_priority);
+	else
+		return MAX_USER_RT_PRIO - 1 - kernel_priority;
+}
+
+static int to_kernel_prio(int policy, int user_priority)
+{
+	if (is_fair_policy(policy))
+		return NICE_TO_PRIO(user_priority);
+	else
+		return MAX_USER_RT_PRIO - 1 - user_priority;
+}
+
+static void binder_do_set_priority(struct task_struct *task,
+				   struct binder_priority desired,
+				   bool verify)
+{
+	int priority; /* user-space prio value */
+	bool has_cap_nice;
+	unsigned int policy = desired.sched_policy;
+
+	if (task->policy == policy && task->normal_prio == desired.prio)
 		return;
-	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
+
+	has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
+
+	priority = to_userspace_prio(policy, desired.prio);
+
+	if (verify && is_rt_policy(policy) && !has_cap_nice) {
+		long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
+
+		if (max_rtprio == 0) {
+			policy = SCHED_NORMAL;
+			priority = MIN_NICE;
+		} else if (priority > max_rtprio) {
+			priority = max_rtprio;
+		}
+	}
+
+	if (verify && is_fair_policy(policy) && !has_cap_nice) {
+		long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
+
+		if (min_nice > MAX_NICE) {
+			binder_user_error("%d RLIMIT_NICE not set\n",
+					  task->pid);
+			return;
+		} else if (priority < min_nice) {
+			priority = min_nice;
+		}
+	}
+
+	if (policy != desired.sched_policy ||
+	    to_kernel_prio(policy, priority) != desired.prio)
+		binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+			     "%d: priority %d not allowed, using %d instead\n",
+			      task->pid, desired.prio,
+			      to_kernel_prio(policy, priority));
+
+	/* Set the actual priority */
+	if (task->policy != policy || is_rt_policy(policy)) {
+		struct sched_param params;
+
+		params.sched_priority = is_rt_policy(policy) ? priority : 0;
+
+		sched_setscheduler_nocheck(task,
+					   policy | SCHED_RESET_ON_FORK,
+					   &params);
+	}
+	if (is_fair_policy(policy))
+		set_user_nice(task, priority);
+}
+
+static void binder_set_priority(struct task_struct *task,
+				struct binder_priority desired)
+{
+	binder_do_set_priority(task, desired, /* verify = */ true);
+}
+
+static void binder_restore_priority(struct task_struct *task,
+				    struct binder_priority desired)
+{
+	binder_do_set_priority(task, desired, /* verify = */ false);
+}
+
+static void binder_transaction_priority(struct task_struct *task,
+					struct binder_transaction *t,
+					struct binder_priority node_prio,
+					bool inherit_rt)
+{
+	struct binder_priority desired_prio;
+
+	if (t->set_priority_called)
+		return;
+
+	t->set_priority_called = true;
+	t->saved_priority.sched_policy = task->policy;
+	t->saved_priority.prio = task->normal_prio;
+
+	if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
+		desired_prio.prio = NICE_TO_PRIO(0);
+		desired_prio.sched_policy = SCHED_NORMAL;
+	} else {
+		desired_prio.prio = t->priority.prio;
+		desired_prio.sched_policy = t->priority.sched_policy;
+	}
+
+	if (node_prio.prio < t->priority.prio ||
+	    (node_prio.prio == t->priority.prio &&
+	     node_prio.sched_policy == SCHED_FIFO)) {
+		/*
+		 * In case the minimum priority on the node is
+		 * higher (lower value), use that priority. If
+		 * the priority is the same, but the node uses
+		 * SCHED_FIFO, prefer SCHED_FIFO, since it can
+		 * run unbounded, unlike SCHED_RR.
+		 */
+		desired_prio = node_prio;
+	}
+
+	binder_set_priority(task, desired_prio);
 }
 
 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
@@ -989,6 +1267,7 @@
 	binder_uintptr_t ptr = fp ? fp->binder : 0;
 	binder_uintptr_t cookie = fp ? fp->cookie : 0;
 	__u32 flags = fp ? fp->flags : 0;
+	s8 priority;
 
 	BUG_ON(!spin_is_locked(&proc->inner_lock));
 	while (*p) {
@@ -1020,8 +1299,12 @@
 	node->ptr = ptr;
 	node->cookie = cookie;
 	node->work.type = BINDER_WORK_NODE;
-	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+	priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+	node->sched_policy = (flags & FLAT_BINDER_FLAG_PRIORITY_MASK) >>
+		FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
+	node->min_priority = to_kernel_prio(node->sched_policy, priority);
 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+	node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
 	spin_lock_init(&node->lock);
 	INIT_LIST_HEAD(&node->work.entry);
 	INIT_LIST_HEAD(&node->async_todo);
@@ -1140,7 +1423,7 @@
 	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
 		if (list_empty(&node->work.entry)) {
 			binder_enqueue_work_ilocked(&node->work, &proc->todo);
-			wake_up_interruptible(&node->proc->wait);
+			binder_wakeup_proc_ilocked(proc);
 		}
 	} else {
 		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
@@ -2386,6 +2669,80 @@
 	return 0;
 }
 
+/**
+ * binder_proc_transaction() - sends a transaction to a process and wakes it up
+ * @t:		transaction to send
+ * @proc:	process to send the transaction to
+ * @thread:	thread in @proc to send the transaction to (may be NULL)
+ *
+ * This function queues a transaction to the specified process. It will try
+ * to find a thread in the target process to handle the transaction and
+ * wake it up. If no thread is found, the work is queued to the proc
+ * waitqueue.
+ *
+ * If the @thread parameter is not NULL, the transaction is always queued
+ * to the waitlist of that specific thread.
+ *
+ * Return:	true if the transactions was successfully queued
+ *		false if the target process or thread is dead
+ */
+static bool binder_proc_transaction(struct binder_transaction *t,
+				    struct binder_proc *proc,
+				    struct binder_thread *thread)
+{
+	struct list_head *target_list = NULL;
+	struct binder_node *node = t->buffer->target_node;
+	struct binder_priority node_prio;
+	bool oneway = !!(t->flags & TF_ONE_WAY);
+	bool wakeup = true;
+
+	BUG_ON(!node);
+	binder_node_lock(node);
+	node_prio.prio = node->min_priority;
+	node_prio.sched_policy = node->sched_policy;
+
+	if (oneway) {
+		BUG_ON(thread);
+		if (node->has_async_transaction) {
+			target_list = &node->async_todo;
+			wakeup = false;
+		} else {
+			node->has_async_transaction = 1;
+		}
+	}
+
+	binder_inner_proc_lock(proc);
+
+	if (proc->is_dead || (thread && thread->is_dead)) {
+		binder_inner_proc_unlock(proc);
+		binder_node_unlock(node);
+		return false;
+	}
+
+	if (!thread && !target_list)
+		thread = binder_select_thread_ilocked(proc);
+
+	if (thread) {
+		target_list = &thread->todo;
+		binder_transaction_priority(thread->task, t, node_prio,
+					    node->inherit_rt);
+	} else if (!target_list) {
+		target_list = &proc->todo;
+	} else {
+		BUG_ON(target_list != &node->async_todo);
+	}
+
+	binder_enqueue_work_ilocked(&t->work, target_list);
+
+	if (wakeup)
+		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
+
+	binder_inner_proc_unlock(proc);
+	binder_node_unlock(node);
+
+	return true;
+}
+
 static void binder_transaction(struct binder_proc *proc,
 			       struct binder_thread *thread,
 			       struct binder_transaction_data *tr, int reply,
@@ -2400,8 +2757,6 @@
 	struct binder_proc *target_proc = NULL;
 	struct binder_thread *target_thread = NULL;
 	struct binder_node *target_node = NULL;
-	struct list_head *target_list;
-	wait_queue_head_t *target_wait;
 	struct binder_transaction *in_reply_to = NULL;
 	struct binder_transaction_log_entry *e;
 	uint32_t return_error = 0;
@@ -2452,7 +2807,6 @@
 		}
 		thread->transaction_stack = in_reply_to->to_parent;
 		binder_inner_proc_unlock(proc);
-		binder_set_nice(in_reply_to->saved_priority);
 		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
 		if (target_thread == NULL) {
 			return_error = BR_DEAD_REPLY;
@@ -2571,14 +2925,8 @@
 		}
 		binder_inner_proc_unlock(proc);
 	}
-	if (target_thread) {
+	if (target_thread)
 		e->to_thread = target_thread->pid;
-		target_list = &target_thread->todo;
-		target_wait = &target_thread->wait;
-	} else {
-		target_list = &target_proc->todo;
-		target_wait = &target_proc->wait;
-	}
 	e->to_proc = target_proc->pid;
 
 	/* TODO: reuse incoming transaction for reply */
@@ -2631,7 +2979,15 @@
 	t->to_thread = target_thread;
 	t->code = tr->code;
 	t->flags = tr->flags;
-	t->priority = task_nice(current);
+	if (!(t->flags & TF_ONE_WAY) &&
+	    binder_supported_policy(current->policy)) {
+		/* Inherit supported policies for synchronous transactions */
+		t->priority.sched_policy = current->policy;
+		t->priority.prio = current->normal_prio;
+	} else {
+		/* Otherwise, fall back to the default priority */
+		t->priority = target_proc->default_priority;
+	}
 
 	trace_binder_transaction(reply, t, target_node);
 
@@ -2857,8 +3213,10 @@
 		}
 		BUG_ON(t->buffer->async_transaction != 0);
 		binder_pop_transaction_ilocked(target_thread, in_reply_to);
-		binder_enqueue_work_ilocked(&t->work, target_list);
+		binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
 		binder_inner_proc_unlock(target_proc);
+		wake_up_interruptible_sync(&target_thread->wait);
+		binder_restore_priority(current, in_reply_to->saved_priority);
 		binder_free_transaction(in_reply_to);
 	} else if (!(t->flags & TF_ONE_WAY)) {
 		BUG_ON(t->buffer->async_transaction != 0);
@@ -2867,47 +3225,17 @@
 		t->from_parent = thread->transaction_stack;
 		thread->transaction_stack = t;
 		binder_inner_proc_unlock(proc);
-		binder_inner_proc_lock(target_proc);
-		if (target_proc->is_dead ||
-				(target_thread && target_thread->is_dead)) {
-			binder_inner_proc_unlock(target_proc);
+		if (!binder_proc_transaction(t, target_proc, target_thread)) {
 			binder_inner_proc_lock(proc);
 			binder_pop_transaction_ilocked(thread, t);
 			binder_inner_proc_unlock(proc);
 			goto err_dead_proc_or_thread;
 		}
-		binder_enqueue_work_ilocked(&t->work, target_list);
-		binder_inner_proc_unlock(target_proc);
 	} else {
 		BUG_ON(target_node == NULL);
 		BUG_ON(t->buffer->async_transaction != 1);
-		binder_node_lock(target_node);
-		if (target_node->has_async_transaction) {
-			target_list = &target_node->async_todo;
-			target_wait = NULL;
-		} else
-			target_node->has_async_transaction = 1;
-		/*
-		 * Test/set of has_async_transaction
-		 * must be atomic with enqueue on
-		 * async_todo
-		 */
-		binder_inner_proc_lock(target_proc);
-		if (target_proc->is_dead ||
-				(target_thread && target_thread->is_dead)) {
-			binder_inner_proc_unlock(target_proc);
-			binder_node_unlock(target_node);
+		if (!binder_proc_transaction(t, target_proc, NULL))
 			goto err_dead_proc_or_thread;
-		}
-		binder_enqueue_work_ilocked(&t->work, target_list);
-		binder_inner_proc_unlock(target_proc);
-		binder_node_unlock(target_node);
-	}
-	if (target_wait) {
-		if (reply || !(tr->flags & TF_ONE_WAY))
-			wake_up_interruptible_sync(target_wait);
-		else
-			wake_up_interruptible(target_wait);
 	}
 	if (target_thread)
 		binder_thread_dec_tmpref(target_thread);
@@ -2977,6 +3305,7 @@
 
 	BUG_ON(thread->return_error.cmd != BR_OK);
 	if (in_reply_to) {
+		binder_restore_priority(current, in_reply_to->saved_priority);
 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
 		binder_enqueue_work(thread->proc,
 				    &thread->return_error.work,
@@ -3347,12 +3676,13 @@
 							&ref->death->work,
 							&thread->todo);
 					else {
-						binder_enqueue_work(
-							proc,
+						binder_inner_proc_lock(proc);
+						binder_enqueue_work_ilocked(
 							&ref->death->work,
 							&proc->todo);
-						wake_up_interruptible(
-								&proc->wait);
+						binder_wakeup_proc_ilocked(
+							proc);
+						binder_inner_proc_unlock(proc);
 					}
 				}
 			} else {
@@ -3387,8 +3717,8 @@
 						binder_enqueue_work_ilocked(
 								&death->work,
 								&proc->todo);
-						wake_up_interruptible(
-								&proc->wait);
+						binder_wakeup_proc_ilocked(
+								proc);
 					}
 				} else {
 					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
@@ -3443,7 +3773,7 @@
 					binder_enqueue_work_ilocked(
 							&death->work,
 							&proc->todo);
-					wake_up_interruptible(&proc->wait);
+					binder_wakeup_proc_ilocked(proc);
 				}
 			}
 			binder_inner_proc_unlock(proc);
@@ -3470,13 +3800,6 @@
 	}
 }
 
-static int binder_has_proc_work(struct binder_proc *proc,
-				struct binder_thread *thread)
-{
-	return !binder_worklist_empty(proc, &proc->todo) ||
-		thread->looper_need_return;
-}
-
 static int binder_has_thread_work(struct binder_thread *thread)
 {
 	return !binder_worklist_empty(thread->proc, &thread->todo) ||
@@ -3514,6 +3837,38 @@
 	return 0;
 }
 
+static int binder_wait_for_work(struct binder_thread *thread,
+				bool do_proc_work)
+{
+	DEFINE_WAIT(wait);
+	struct binder_proc *proc = thread->proc;
+	int ret = 0;
+
+	freezer_do_not_count();
+	binder_inner_proc_lock(proc);
+	for (;;) {
+		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
+		if (binder_has_work_ilocked(thread, do_proc_work))
+			break;
+		if (do_proc_work)
+			list_add(&thread->waiting_thread_node,
+				 &proc->waiting_threads);
+		binder_inner_proc_unlock(proc);
+		schedule();
+		binder_inner_proc_lock(proc);
+		list_del_init(&thread->waiting_thread_node);
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+	}
+	finish_wait(&thread->wait, &wait);
+	binder_inner_proc_unlock(proc);
+	freezer_count();
+
+	return ret;
+}
+
 static int binder_thread_read(struct binder_proc *proc,
 			      struct binder_thread *thread,
 			      binder_uintptr_t binder_buffer, size_t size,
@@ -3534,10 +3889,7 @@
 
 retry:
 	binder_inner_proc_lock(proc);
-	wait_for_proc_work = thread->transaction_stack == NULL &&
-		binder_worklist_empty_ilocked(&thread->todo);
-	if (wait_for_proc_work)
-		proc->ready_threads++;
+	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
 	binder_inner_proc_unlock(proc);
 
 	thread->looper |= BINDER_LOOPER_STATE_WAITING;
@@ -3553,24 +3905,16 @@
 			wait_event_interruptible(binder_user_error_wait,
 						 binder_stop_on_user_error < 2);
 		}
-		binder_set_nice(proc->default_priority);
-		if (non_block) {
-			if (!binder_has_proc_work(proc, thread))
-				ret = -EAGAIN;
-		} else
-			ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
-	} else {
-		if (non_block) {
-			if (!binder_has_thread_work(thread))
-				ret = -EAGAIN;
-		} else
-			ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
+		binder_restore_priority(current, proc->default_priority);
 	}
 
-	binder_inner_proc_lock(proc);
-	if (wait_for_proc_work)
-		proc->ready_threads--;
-	binder_inner_proc_unlock(proc);
+	if (non_block) {
+		if (!binder_has_work(thread, wait_for_proc_work))
+			ret = -EAGAIN;
+	} else {
+		ret = binder_wait_for_work(thread, wait_for_proc_work);
+	}
+
 	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
 
 	if (ret)
@@ -3773,16 +4117,14 @@
 		BUG_ON(t->buffer == NULL);
 		if (t->buffer->target_node) {
 			struct binder_node *target_node = t->buffer->target_node;
+			struct binder_priority node_prio;
 
 			tr.target.ptr = target_node->ptr;
 			tr.cookie =  target_node->cookie;
-			t->saved_priority = task_nice(current);
-			if (t->priority < target_node->min_priority &&
-			    !(t->flags & TF_ONE_WAY))
-				binder_set_nice(t->priority);
-			else if (!(t->flags & TF_ONE_WAY) ||
-				 t->saved_priority > target_node->min_priority)
-				binder_set_nice(target_node->min_priority);
+			node_prio.sched_policy = target_node->sched_policy;
+			node_prio.prio = target_node->min_priority;
+			binder_transaction_priority(current, t, node_prio,
+						    target_node->inherit_rt);
 			cmd = BR_TRANSACTION;
 		} else {
 			tr.target.ptr = 0;
@@ -3856,7 +4198,8 @@
 
 	*consumed = ptr - buffer;
 	binder_inner_proc_lock(proc);
-	if (proc->requested_threads + proc->ready_threads == 0 &&
+	if (proc->requested_threads == 0 &&
+	    list_empty(&thread->proc->waiting_threads) &&
 	    proc->requested_threads_started < proc->max_threads &&
 	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
 	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
@@ -3957,6 +4300,8 @@
 	binder_stats_created(BINDER_STAT_THREAD);
 	thread->proc = proc;
 	thread->pid = current->pid;
+	get_task_struct(current);
+	thread->task = current;
 	atomic_set(&thread->tmp_ref, 0);
 	init_waitqueue_head(&thread->wait);
 	INIT_LIST_HEAD(&thread->todo);
@@ -3967,7 +4312,7 @@
 	thread->return_error.cmd = BR_OK;
 	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
 	thread->reply_error.cmd = BR_OK;
-
+	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
 	return thread;
 }
 
@@ -4007,6 +4352,7 @@
 	BUG_ON(!list_empty(&thread->todo));
 	binder_stats_deleted(BINDER_STAT_THREAD);
 	binder_proc_dec_tmpref(thread->proc);
+	put_task_struct(thread->task);
 	kfree(thread);
 }
 
@@ -4080,28 +4426,24 @@
 {
 	struct binder_proc *proc = filp->private_data;
 	struct binder_thread *thread = NULL;
-	int wait_for_proc_work;
+	bool wait_for_proc_work;
 
 	thread = binder_get_thread(proc);
 
 	binder_inner_proc_lock(thread->proc);
-	wait_for_proc_work = thread->transaction_stack == NULL &&
-		binder_worklist_empty_ilocked(&thread->todo);
+	thread->looper |= BINDER_LOOPER_STATE_POLL;
+	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
+
 	binder_inner_proc_unlock(thread->proc);
 
-	if (wait_for_proc_work) {
-		if (binder_has_proc_work(proc, thread))
-			return POLLIN;
-		poll_wait(filp, &proc->wait, wait);
-		if (binder_has_proc_work(proc, thread))
-			return POLLIN;
-	} else {
-		if (binder_has_thread_work(thread))
-			return POLLIN;
-		poll_wait(filp, &thread->wait, wait);
-		if (binder_has_thread_work(thread))
-			return POLLIN;
-	}
+	if (binder_has_work(thread, wait_for_proc_work))
+		return POLLIN;
+
+	poll_wait(filp, &thread->wait, wait);
+
+	if (binder_has_thread_work(thread))
+		return POLLIN;
+
 	return 0;
 }
 
@@ -4148,8 +4490,10 @@
 					 &bwr.read_consumed,
 					 filp->f_flags & O_NONBLOCK);
 		trace_binder_read_done(ret);
-		if (!binder_worklist_empty(proc, &proc->todo))
-			wake_up_interruptible(&proc->wait);
+		binder_inner_proc_lock(proc);
+		if (!binder_worklist_empty_ilocked(&proc->todo))
+			binder_wakeup_proc_ilocked(proc);
+		binder_inner_proc_unlock(proc);
 		if (ret < 0) {
 			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
 				ret = -EFAULT;
@@ -4216,6 +4560,30 @@
 	return ret;
 }
 
+static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
+				struct binder_node_debug_info *info) {
+	struct rb_node *n;
+	binder_uintptr_t ptr = info->ptr;
+
+	memset(info, 0, sizeof(*info));
+
+	binder_inner_proc_lock(proc);
+	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+		struct binder_node *node = rb_entry(n, struct binder_node,
+						    rb_node);
+		if (node->ptr > ptr) {
+			info->ptr = node->ptr;
+			info->cookie = node->cookie;
+			info->has_strong_ref = node->has_strong_ref;
+			info->has_weak_ref = node->has_weak_ref;
+			break;
+		}
+	}
+	binder_inner_proc_unlock(proc);
+
+	return 0;
+}
+
 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	int ret;
@@ -4283,6 +4651,24 @@
 		}
 		break;
 	}
+	case BINDER_GET_NODE_DEBUG_INFO: {
+		struct binder_node_debug_info info;
+
+		if (copy_from_user(&info, ubuf, sizeof(info))) {
+			ret = -EFAULT;
+			goto err;
+		}
+
+		ret = binder_ioctl_get_node_debug_info(proc, &info);
+		if (ret < 0)
+			goto err;
+
+		if (copy_to_user(ubuf, &info, sizeof(info))) {
+			ret = -EFAULT;
+			goto err;
+		}
+		break;
+	}
 	default:
 		ret = -EINVAL;
 		goto err;
@@ -4389,8 +4775,14 @@
 	get_task_struct(current->group_leader);
 	proc->tsk = current->group_leader;
 	INIT_LIST_HEAD(&proc->todo);
-	init_waitqueue_head(&proc->wait);
-	proc->default_priority = task_nice(current);
+	if (binder_supported_policy(current->policy)) {
+		proc->default_priority.sched_policy = current->policy;
+		proc->default_priority.prio = current->normal_prio;
+	} else {
+		proc->default_priority.sched_policy = SCHED_NORMAL;
+		proc->default_priority.prio = NICE_TO_PRIO(0);
+	}
+
 	binder_dev = container_of(filp->private_data, struct binder_device,
 				  miscdev);
 	proc->context = &binder_dev->context;
@@ -4399,6 +4791,7 @@
 	binder_stats_created(BINDER_STAT_PROC);
 	proc->pid = current->group_leader->pid;
 	INIT_LIST_HEAD(&proc->delivered_death);
+	INIT_LIST_HEAD(&proc->waiting_threads);
 	filp->private_data = proc;
 
 	mutex_lock(&binder_procs_lock);
@@ -4450,7 +4843,6 @@
 		}
 	}
 	binder_inner_proc_unlock(proc);
-	wake_up_interruptible_all(&proc->wait);
 
 	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
 		     "binder_flush: %d woke %d threads\n", proc->pid,
@@ -4519,7 +4911,7 @@
 		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
 		binder_enqueue_work_ilocked(&ref->death->work,
 					    &ref->proc->todo);
-		wake_up_interruptible(&ref->proc->wait);
+		binder_wakeup_proc_ilocked(ref->proc);
 		binder_inner_proc_unlock(ref->proc);
 	}
 
@@ -4683,13 +5075,14 @@
 	spin_lock(&t->lock);
 	to_proc = t->to_proc;
 	seq_printf(m,
-		   "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+		   "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
 		   prefix, t->debug_id, t,
 		   t->from ? t->from->proc->pid : 0,
 		   t->from ? t->from->pid : 0,
 		   to_proc ? to_proc->pid : 0,
 		   t->to_thread ? t->to_thread->pid : 0,
-		   t->code, t->flags, t->priority, t->need_reply);
+		   t->code, t->flags, t->priority.sched_policy,
+		   t->priority.prio, t->need_reply);
 	spin_unlock(&t->lock);
 
 	if (proc != to_proc) {
@@ -4812,8 +5205,9 @@
 	hlist_for_each_entry(ref, &node->refs, node_entry)
 		count++;
 
-	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
+	seq_printf(m, "  node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
+		   node->sched_policy, node->min_priority,
 		   node->has_strong_ref, node->has_weak_ref,
 		   node->local_strong_refs, node->local_weak_refs,
 		   node->internal_strong_refs, count, node->tmp_refs);
@@ -5007,23 +5401,29 @@
 				    struct binder_proc *proc)
 {
 	struct binder_work *w;
+	struct binder_thread *thread;
 	struct rb_node *n;
-	int count, strong, weak;
+	int count, strong, weak, ready_threads;
 	size_t free_async_space =
 		binder_alloc_get_free_async_space(&proc->alloc);
 
 	seq_printf(m, "proc %d\n", proc->pid);
 	seq_printf(m, "context %s\n", proc->context->name);
 	count = 0;
+	ready_threads = 0;
 	binder_inner_proc_lock(proc);
 	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
 		count++;
+
+	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
+		ready_threads++;
+
 	seq_printf(m, "  threads: %d\n", count);
 	seq_printf(m, "  requested threads: %d+%d/%d\n"
 			"  ready threads %d\n"
 			"  free async space %zd\n", proc->requested_threads,
 			proc->requested_threads_started, proc->max_threads,
-			proc->ready_threads,
+			ready_threads,
 			free_async_space);
 	count = 0;
 	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index e023066..8c7d0f3 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1029,8 +1029,6 @@
 
 	spin_unlock_irq(&dev->power.lock);
 
-	dev_pm_domain_set(dev, &genpd->domain);
-
 	return gpd_data;
 
  err_free:
@@ -1044,8 +1042,6 @@
 static void genpd_free_dev_data(struct device *dev,
 				struct generic_pm_domain_data *gpd_data)
 {
-	dev_pm_domain_set(dev, NULL);
-
 	spin_lock_irq(&dev->power.lock);
 
 	dev->power.subsys_data->domain_data = NULL;
@@ -1082,6 +1078,8 @@
 	if (ret)
 		goto out;
 
+	dev_pm_domain_set(dev, &genpd->domain);
+
 	genpd->device_count++;
 	genpd->max_off_time_changed = true;
 
@@ -1143,6 +1141,8 @@
 	if (genpd->detach_dev)
 		genpd->detach_dev(genpd, dev);
 
+	dev_pm_domain_set(dev, NULL);
+
 	list_del_init(&pdd->list_node);
 
 	mutex_unlock(&genpd->lock);
@@ -1244,7 +1244,7 @@
 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
 			      struct generic_pm_domain *subdomain)
 {
-	struct gpd_link *link;
+	struct gpd_link *l, *link;
 	int ret = -EINVAL;
 
 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
@@ -1260,7 +1260,7 @@
 		goto out;
 	}
 
-	list_for_each_entry(link, &genpd->master_links, master_node) {
+	list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
 		if (link->slave != subdomain)
 			continue;
 
@@ -1607,12 +1607,12 @@
  */
 void of_genpd_del_provider(struct device_node *np)
 {
-	struct of_genpd_provider *cp;
+	struct of_genpd_provider *cp, *tmp;
 	struct generic_pm_domain *gpd;
 
 	mutex_lock(&gpd_list_lock);
 	mutex_lock(&of_genpd_mutex);
-	list_for_each_entry(cp, &of_genpd_providers, link) {
+	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
 		if (cp->node == np) {
 			/*
 			 * For each PM domain associated with the
@@ -1752,14 +1752,14 @@
  */
 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
 {
-	struct generic_pm_domain *gpd, *genpd = ERR_PTR(-ENOENT);
+	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
 	int ret;
 
 	if (IS_ERR_OR_NULL(np))
 		return ERR_PTR(-EINVAL);
 
 	mutex_lock(&gpd_list_lock);
-	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
 		if (gpd->provider == &np->fwnode) {
 			ret = genpd_remove(gpd);
 			genpd = ret ? ERR_PTR(ret) : gpd;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a7b4679..39efa7e 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -268,6 +268,8 @@
 			value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
 		else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
 			value = PM_QOS_LATENCY_ANY;
+		else
+			return -EINVAL;
 	}
 	ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
 	return ret < 0 ? ret : n;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 270cdd4..90c16d8 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -61,6 +61,8 @@
 
 static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
 
+DEFINE_STATIC_SRCU(wakeup_srcu);
+
 static struct wakeup_source deleted_ws = {
 	.name = "deleted",
 	.lock =  __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
@@ -199,7 +201,7 @@
 	spin_lock_irqsave(&events_lock, flags);
 	list_del_rcu(&ws->entry);
 	spin_unlock_irqrestore(&events_lock, flags);
-	synchronize_rcu();
+	synchronize_srcu(&wakeup_srcu);
 }
 EXPORT_SYMBOL_GPL(wakeup_source_remove);
 
@@ -333,12 +335,12 @@
 void device_wakeup_arm_wake_irqs(void)
 {
 	struct wakeup_source *ws;
+	int srcuidx;
 
-	rcu_read_lock();
+	srcuidx = srcu_read_lock(&wakeup_srcu);
 	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
 		dev_pm_arm_wake_irq(ws->wakeirq);
-
-	rcu_read_unlock();
+	srcu_read_unlock(&wakeup_srcu, srcuidx);
 }
 
 /**
@@ -349,12 +351,12 @@
 void device_wakeup_disarm_wake_irqs(void)
 {
 	struct wakeup_source *ws;
+	int srcuidx;
 
-	rcu_read_lock();
+	srcuidx = srcu_read_lock(&wakeup_srcu);
 	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
 		dev_pm_disarm_wake_irq(ws->wakeirq);
-
-	rcu_read_unlock();
+	srcu_read_unlock(&wakeup_srcu, srcuidx);
 }
 
 /**
@@ -837,10 +839,10 @@
 void pm_print_active_wakeup_sources(void)
 {
 	struct wakeup_source *ws;
-	int active = 0;
+	int srcuidx, active = 0;
 	struct wakeup_source *last_activity_ws = NULL;
 
-	rcu_read_lock();
+	srcuidx = srcu_read_lock(&wakeup_srcu);
 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
 		if (ws->active) {
 			pr_info("active wakeup source: %s\n", ws->name);
@@ -856,7 +858,7 @@
 	if (!active && last_activity_ws)
 		pr_info("last active wakeup source: %s\n",
 			last_activity_ws->name);
-	rcu_read_unlock();
+	srcu_read_unlock(&wakeup_srcu, srcuidx);
 }
 EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
 
@@ -983,8 +985,9 @@
 {
 	struct wakeup_source *ws;
 	ktime_t now = ktime_get();
+	int srcuidx;
 
-	rcu_read_lock();
+	srcuidx = srcu_read_lock(&wakeup_srcu);
 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
 		spin_lock_irq(&ws->lock);
 		if (ws->autosleep_enabled != set) {
@@ -998,7 +1001,7 @@
 		}
 		spin_unlock_irq(&ws->lock);
 	}
-	rcu_read_unlock();
+	srcu_read_unlock(&wakeup_srcu, srcuidx);
 }
 #endif /* CONFIG_PM_AUTOSLEEP */
 
@@ -1059,15 +1062,16 @@
 static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
 {
 	struct wakeup_source *ws;
+	int srcuidx;
 
 	seq_puts(m, "name\t\t\t\t\tactive_count\tevent_count\twakeup_count\t"
 		"expire_count\tactive_since\ttotal_time\tmax_time\t"
 		"last_change\tprevent_suspend_time\n");
 
-	rcu_read_lock();
+	srcuidx = srcu_read_lock(&wakeup_srcu);
 	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
 		print_wakeup_source_stats(m, ws);
-	rcu_read_unlock();
+	srcu_read_unlock(&wakeup_srcu, srcuidx);
 
 	print_wakeup_source_stats(m, &deleted_ws);
 
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 7bc263c..031ba29 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1399,6 +1399,14 @@
 
 	if (fl->profile)
 		getnstimeofday(&invoket);
+
+	VERIFY(err, fl->sctx != NULL);
+	if (err)
+		goto bail;
+	VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
+	if (err)
+		goto bail;
+
 	if (!kernel) {
 		VERIFY(err, 0 == context_restore_interrupted(fl, inv,
 								&ctx));
@@ -2172,6 +2180,9 @@
 		kref_init(&me->channel[cid].kref);
 		pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
 						MAJOR(me->dev_no), cid);
+		err = glink_queue_rx_intent(me->channel[cid].chan, NULL, 64);
+		if (err)
+			pr_info("adsprpc: initial intent failed for %d\n", cid);
 		if (me->channel[cid].ssrcount !=
 				 me->channel[cid].prevssrcount) {
 			me->channel[cid].prevssrcount =
@@ -2235,6 +2246,9 @@
 		if (err)
 			goto bail;
 	}
+	VERIFY(err, fl->sctx != NULL);
+	if (err)
+		goto bail;
 	*info = (fl->sctx->smmu.enabled ? 1 : 0);
 bail:
 	return err;
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index e3fe064..0441451 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -100,7 +100,7 @@
 #define VERIFY(err, val) \
 do {\
 	VERIFY_IPRINTF(__FILE_LINE__"info: calling: " #val "\n");\
-	if (0 == (val)) {\
+	if ((val) == 0) {\
 		(err) = (err) == 0 ? -1 : (err);\
 		VERIFY_EPRINTF(__FILE_LINE__"error: %d: " #val "\n", (err));\
 	} else {\
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 26e91f9..8051d5d 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -580,6 +580,7 @@
 	unsigned char *buf_feature_mask_update;
 	uint8_t hdlc_disabled;
 	struct mutex hdlc_disable_mutex;
+	struct mutex hdlc_recovery_mutex;
 	struct timer_list hdlc_reset_timer;
 	struct mutex diag_hdlc_mutex;
 	unsigned char *hdlc_buf;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 5b507df..e4397c5 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -975,14 +975,34 @@
 	else
 		hdlc_disabled = driver->hdlc_disabled;
 	if (hdlc_disabled) {
+		if (len < 4) {
+			pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
+			__func__, len);
+			return -EBADMSG;
+		}
 		payload = *(uint16_t *)(buf + 2);
+		if (payload > DIAG_MAX_HDLC_BUF_SIZE) {
+			pr_err("diag: Dropping packet, payload size is %d\n",
+				payload);
+			return -EBADMSG;
+		}
 		driver->hdlc_encode_buf_len = payload;
 		/*
-		 * Adding 4 bytes for start (1 byte), version (1 byte) and
-		 * payload (2 bytes)
+		 * Adding 5 bytes for start (1 byte), version (1 byte),
+		 * payload (2 bytes) and end (1 byte)
 		 */
-		memcpy(driver->hdlc_encode_buf, buf + 4, payload);
-		goto send_data;
+		if (len == (payload + 5)) {
+			/*
+			 * Adding 4 bytes for start (1 byte), version (1 byte)
+			 * and payload (2 bytes)
+			 */
+			memcpy(driver->hdlc_encode_buf, buf + 4, payload);
+			goto send_data;
+		} else {
+			pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
+			__func__, len);
+			return -EBADMSG;
+		}
 	}
 
 	if (hdlc_flag) {
@@ -3468,6 +3488,7 @@
 	mutex_init(&driver->delayed_rsp_mutex);
 	mutex_init(&apps_data_mutex);
 	mutex_init(&driver->msg_mask_lock);
+	mutex_init(&driver->hdlc_recovery_mutex);
 	for (i = 0; i < NUM_PERIPHERALS; i++)
 		mutex_init(&driver->diagfwd_channel_mutex[i]);
 	init_waitqueue_head(&driver->wait_q);
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index cd49f00..3f00a7e 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1443,7 +1443,9 @@
 
 	if (start_ptr) {
 		/* Discard any partial packet reads */
+		mutex_lock(&driver->hdlc_recovery_mutex);
 		driver->incoming_pkt.processing = 0;
+		mutex_unlock(&driver->hdlc_recovery_mutex);
 		diag_process_non_hdlc_pkt(start_ptr, len - i, info);
 	}
 }
@@ -1457,18 +1459,24 @@
 	const uint32_t header_len = sizeof(struct diag_pkt_frame_t);
 	struct diag_pkt_frame_t *actual_pkt = NULL;
 	unsigned char *data_ptr = NULL;
-	struct diag_partial_pkt_t *partial_pkt = &driver->incoming_pkt;
+	struct diag_partial_pkt_t *partial_pkt = NULL;
 
-	if (!buf || len <= 0)
+	mutex_lock(&driver->hdlc_recovery_mutex);
+	if (!buf || len <= 0) {
+		mutex_unlock(&driver->hdlc_recovery_mutex);
 		return;
-
-	if (!partial_pkt->processing)
+	}
+	partial_pkt = &driver->incoming_pkt;
+	if (!partial_pkt->processing) {
+		mutex_unlock(&driver->hdlc_recovery_mutex);
 		goto start;
+	}
 
 	if (partial_pkt->remaining > len) {
 		if ((partial_pkt->read_len + len) > partial_pkt->capacity) {
 			pr_err("diag: Invalid length %d, %d received in %s\n",
 			       partial_pkt->read_len, len, __func__);
+			mutex_unlock(&driver->hdlc_recovery_mutex);
 			goto end;
 		}
 		memcpy(partial_pkt->data + partial_pkt->read_len, buf, len);
@@ -1482,6 +1490,7 @@
 			pr_err("diag: Invalid length during partial read %d, %d received in %s\n",
 			       partial_pkt->read_len,
 			       partial_pkt->remaining, __func__);
+			mutex_unlock(&driver->hdlc_recovery_mutex);
 			goto end;
 		}
 		memcpy(partial_pkt->data + partial_pkt->read_len, buf,
@@ -1495,20 +1504,27 @@
 	if (partial_pkt->remaining == 0) {
 		actual_pkt = (struct diag_pkt_frame_t *)(partial_pkt->data);
 		data_ptr = partial_pkt->data + header_len;
-		if (*(uint8_t *)(data_ptr + actual_pkt->length) != CONTROL_CHAR)
+		if (*(uint8_t *)(data_ptr + actual_pkt->length) !=
+						CONTROL_CHAR) {
+			mutex_unlock(&driver->hdlc_recovery_mutex);
 			diag_hdlc_start_recovery(buf, len, info);
+			mutex_lock(&driver->hdlc_recovery_mutex);
+		}
 		err = diag_process_apps_pkt(data_ptr,
 					    actual_pkt->length, info);
 		if (err) {
 			pr_err("diag: In %s, unable to process incoming data packet, err: %d\n",
 			       __func__, err);
+			mutex_unlock(&driver->hdlc_recovery_mutex);
 			goto end;
 		}
 		partial_pkt->read_len = 0;
 		partial_pkt->total_len = 0;
 		partial_pkt->processing = 0;
+		mutex_unlock(&driver->hdlc_recovery_mutex);
 		goto start;
 	}
+	mutex_unlock(&driver->hdlc_recovery_mutex);
 	goto end;
 
 start:
@@ -1521,14 +1537,14 @@
 			diag_send_error_rsp(buf, len);
 			goto end;
 		}
-
+		mutex_lock(&driver->hdlc_recovery_mutex);
 		if (pkt_len + header_len > partial_pkt->capacity) {
 			pr_err("diag: In %s, incoming data is too large for the request buffer %d\n",
 			       __func__, pkt_len);
+			mutex_unlock(&driver->hdlc_recovery_mutex);
 			diag_hdlc_start_recovery(buf, len, info);
 			break;
 		}
-
 		if ((pkt_len + header_len) > (len - read_bytes)) {
 			partial_pkt->read_len = len - read_bytes;
 			partial_pkt->total_len = pkt_len + header_len;
@@ -1536,19 +1552,27 @@
 						 partial_pkt->read_len;
 			partial_pkt->processing = 1;
 			memcpy(partial_pkt->data, buf, partial_pkt->read_len);
+			mutex_unlock(&driver->hdlc_recovery_mutex);
 			break;
 		}
 		data_ptr = buf + header_len;
-		if (*(uint8_t *)(data_ptr + actual_pkt->length) != CONTROL_CHAR)
+		if (*(uint8_t *)(data_ptr + actual_pkt->length) !=
+						CONTROL_CHAR) {
+			mutex_unlock(&driver->hdlc_recovery_mutex);
 			diag_hdlc_start_recovery(buf, len, info);
+			mutex_lock(&driver->hdlc_recovery_mutex);
+		}
 		else
 			hdlc_reset = 0;
 		err = diag_process_apps_pkt(data_ptr,
 					    actual_pkt->length, info);
-		if (err)
+		if (err) {
+			mutex_unlock(&driver->hdlc_recovery_mutex);
 			break;
+		}
 		read_bytes += header_len + pkt_len + 1;
 		buf += header_len + pkt_len + 1; /* advance to next pkt */
+		mutex_unlock(&driver->hdlc_recovery_mutex);
 	}
 end:
 	return;
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index c975654..5282e02 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -68,7 +68,6 @@
 
 	driver->feature[peripheral].sent_feature_mask = 0;
 	driver->feature[peripheral].rcvd_feature_mask = 0;
-	flush_workqueue(driver->cntl_wq);
 	reg_dirty |= PERIPHERAL_MASK(peripheral);
 	diag_cmd_remove_reg_by_proc(peripheral);
 	driver->feature[peripheral].stm_support = DISABLE_STM;
diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c
index 6476684..e9683e0 100644
--- a/drivers/char/diag/diagfwd_glink.c
+++ b/drivers/char/diag/diagfwd_glink.c
@@ -375,8 +375,10 @@
 			struct diag_glink_read_work, work);
 	struct diag_glink_info *glink_info = read_work->glink_info;
 
-	if (!glink_info || !glink_info->hdl)
+	if (!glink_info || !glink_info->hdl) {
+		kfree(read_work);
 		return;
+	}
 
 	diagfwd_channel_read_done(glink_info->fwd_ctxt,
 			(unsigned char *)(read_work->ptr_read_done),
@@ -388,6 +390,7 @@
 		"diag: Rx done for packet %pK of len: %d periph: %d ch: %d\n",
 		read_work->ptr_rx_done, (int)read_work->ptr_read_size,
 		glink_info->peripheral, glink_info->type);
+	kfree(read_work);
 }
 
 static void diag_glink_notify_rx(void *hdl, const void *priv,
@@ -411,6 +414,7 @@
 	if (!read_work) {
 		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
 			"diag: Could not allocate read_work\n");
+		glink_rx_done(glink_info->hdl, ptr, true);
 		return;
 	}
 
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 3b8203f..dd5a552 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -897,6 +897,9 @@
 	if (!fwd_info)
 		return -EIO;
 
+	if (fwd_info->type == TYPE_CNTL)
+		flush_workqueue(driver->cntl_wq);
+
 	mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
 	fwd_info->ch_open = 0;
 	if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close)
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index fcdd886..172a9dc 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -3877,6 +3877,9 @@
 	 * because the lower layer is allowed to hold locks while calling
 	 * message delivery.
 	 */
+
+	rcu_read_lock();
+
 	if (!run_to_completion)
 		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
 	if (intf->curr_msg == NULL && !intf->in_shutdown) {
@@ -3899,6 +3902,8 @@
 	if (newmsg)
 		intf->handlers->sender(intf->send_info, newmsg);
 
+	rcu_read_unlock();
+
 	handle_new_recv_msgs(intf);
 }
 
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 6958b5c..510fc10 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -762,6 +762,11 @@
 			       result, len, data[2]);
 		} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
 			   || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
+			/*
+			 * Don't abort here, maybe it was a queued
+			 * response to a previous command.
+			 */
+			ipmi_ssif_unlock_cond(ssif_info, flags);
 			pr_warn(PFX "Invalid response getting flags: %x %x\n",
 				data[0], data[1]);
 		} else {
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index bb7c862..3e13186 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -283,10 +283,14 @@
 			       const struct clk_div_table *table, u8 width,
 			       unsigned long flags)
 {
+	struct clk_hw *parent = clk_hw_get_parent(hw);
 	int i, bestdiv = 0;
 	unsigned long parent_rate, best = 0, now, maxdiv;
 	unsigned long parent_rate_saved = *best_parent_rate;
 
+	if (!parent)
+		return -EINVAL;
+
 	if (!rate)
 		rate = 1;
 
@@ -317,8 +321,7 @@
 			*best_parent_rate = parent_rate_saved;
 			return i;
 		}
-		parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
-					       rate * i);
+		parent_rate = clk_hw_round_rate(parent, rate * i);
 		now = DIV_ROUND_UP_ULL((u64)parent_rate, i);
 		if (_is_best_div(rate, now, best, flags)) {
 			bestdiv = i;
@@ -329,7 +332,7 @@
 
 	if (!bestdiv) {
 		bestdiv = _get_maxdiv(table, width, flags);
-		*best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1);
+		*best_parent_rate = clk_hw_round_rate(parent, 1);
 	}
 
 	return bestdiv;
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 46ced08..fa0ca36 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -287,20 +287,30 @@
 
 static int clk_branch2_prepare(struct clk_hw *hw)
 {
-	struct clk_branch *branch = to_clk_branch(hw);
-	struct clk_hw *parent = clk_hw_get_parent(hw);
-	unsigned long curr_rate, branch_rate = branch->rate;
+	struct clk_branch *branch;
+	struct clk_hw *parent;
+	unsigned long curr_rate;
 	int ret = 0;
 
+	if (!hw)
+		return -EINVAL;
+
+	branch = to_clk_branch(hw);
+	parent = clk_hw_get_parent(hw);
+	if (!branch)
+		return -EINVAL;
+
 	/*
 	 * Do the rate aggregation and scaling of the RCG in the prepare/
 	 * unprepare functions to avoid potential RPM(/h) communication due to
 	 * votes on the voltage rails.
 	 */
 	if (branch->aggr_sibling_rates) {
+		if (!parent)
+			return -EINVAL;
 		curr_rate = clk_aggregate_rate(hw, parent->core);
-		if (branch_rate > curr_rate) {
-			ret = clk_set_rate(parent->clk, branch_rate);
+		if (branch->rate > curr_rate) {
+			ret = clk_set_rate(parent->clk, branch->rate);
 			if (ret)
 				goto exit;
 		}
@@ -316,13 +326,23 @@
 
 static void clk_branch2_unprepare(struct clk_hw *hw)
 {
-	struct clk_branch *branch = to_clk_branch(hw);
-	struct clk_hw *parent = clk_hw_get_parent(hw);
-	unsigned long curr_rate, new_rate, branch_rate = branch->rate;
+	struct clk_branch *branch;
+	struct clk_hw *parent;
+	unsigned long curr_rate, new_rate;
+
+	if (!hw)
+		return;
+
+	branch = to_clk_branch(hw);
+	parent = clk_hw_get_parent(hw);
+	if (!branch)
+		return;
 
 	if (branch->aggr_sibling_rates) {
+		if (!parent)
+			return;
 		new_rate = clk_aggregate_rate(hw, parent->core);
-		curr_rate = max(new_rate, branch_rate);
+		curr_rate = max(new_rate, branch->rate);
 		if (new_rate < curr_rate)
 			if (clk_set_rate(parent->clk, new_rate))
 				pr_err("Failed to scale %s to %lu\n",
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index 1d63a86..1e49722 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -45,6 +45,7 @@
 #include "clk-debug.h"
 
 #define OSM_INIT_RATE			300000000UL
+#define XO_RATE				19200000UL
 #define OSM_TABLE_SIZE			40
 #define SINGLE_CORE			1
 #define MAX_CLUSTER_CNT			3
@@ -450,6 +451,7 @@
 }
 
 static bool is_v2;
+static bool osm_tz_enabled;
 
 static inline struct clk_osm *to_clk_osm(struct clk_hw *_hw)
 {
@@ -518,6 +520,9 @@
 	int i;
 	unsigned long rrate = 0;
 
+	if (!hw)
+		return -EINVAL;
+
 	/*
 	 * If the rate passed in is 0, return the first frequency in the
 	 * FMAX table.
@@ -541,23 +546,12 @@
 
 static int clk_osm_search_table(struct osm_entry *table, int entries, long rate)
 {
-	int quad_core_index, single_core_index = 0;
-	int core_count;
+	int index = 0;
 
-	for (quad_core_index = 0; quad_core_index < entries;
-						quad_core_index++) {
-		core_count = CORE_COUNT_VAL(table[quad_core_index].freq_data);
-		if (rate == table[quad_core_index].frequency &&
-					core_count == SINGLE_CORE) {
-			single_core_index = quad_core_index;
-			continue;
-		}
-		if (rate == table[quad_core_index].frequency &&
-					core_count == MAX_CORE_COUNT)
-			return quad_core_index;
+	for (index = 0; index < entries; index++) {
+		if (rate == table[index].frequency)
+			return index;
 	}
-	if (single_core_index)
-		return single_core_index;
 
 	return -EINVAL;
 }
@@ -639,7 +633,7 @@
 }
 
 
-const struct clk_ops clk_ops_l3_osm = {
+static struct clk_ops clk_ops_l3_osm = {
 	.enable = clk_osm_enable,
 	.round_rate = clk_osm_round_rate,
 	.list_rate = clk_osm_list_rate,
@@ -2104,6 +2098,49 @@
 	return rc;
 }
 
+static int clk_osm_read_lut(struct platform_device *pdev, struct clk_osm *c)
+{
+	u32 data, src, lval, i, j = OSM_TABLE_SIZE;
+
+	for (i = 0; i < OSM_TABLE_SIZE; i++) {
+		data = clk_osm_read_reg(c, FREQ_REG + i * OSM_REG_SIZE);
+		src = ((data & GENMASK(31, 30)) >> 30);
+		lval = (data & GENMASK(7, 0));
+
+		if (!src)
+			c->osm_table[i].frequency = OSM_INIT_RATE;
+		else
+			c->osm_table[i].frequency = XO_RATE * lval;
+
+		data = clk_osm_read_reg(c, VOLT_REG + i * OSM_REG_SIZE);
+		c->osm_table[i].virtual_corner =
+					((data & GENMASK(21, 16)) >> 16);
+		c->osm_table[i].open_loop_volt = (data & GENMASK(11, 0));
+
+		pr_debug("index=%d freq=%ld virtual_corner=%d open_loop_voltage=%u\n",
+			 i, c->osm_table[i].frequency,
+			 c->osm_table[i].virtual_corner,
+			 c->osm_table[i].open_loop_volt);
+
+		if (i > 0 && j == OSM_TABLE_SIZE && c->osm_table[i].frequency ==
+					c->osm_table[i - 1].frequency)
+			j = i;
+	}
+
+	osm_clks_init[c->cluster_num].rate_max = devm_kcalloc(&pdev->dev,
+						 j, sizeof(unsigned long),
+						       GFP_KERNEL);
+	if (!osm_clks_init[c->cluster_num].rate_max)
+		return -ENOMEM;
+
+	for (i = 0; i < j; i++)
+		osm_clks_init[c->cluster_num].rate_max[i] =
+					c->osm_table[i].frequency;
+
+	c->num_entries = osm_clks_init[c->cluster_num].num_rate_max = j;
+	return 0;
+}
+
 static int clk_osm_parse_acd_dt_configs(struct platform_device *pdev)
 {
 	struct device_node *of = pdev->dev.of_node;
@@ -2579,6 +2616,12 @@
 		return -ENOMEM;
 	}
 
+	/* Check if OSM has been enabled already by trustzone.  */
+	if (readl_relaxed(l3_clk.vbases[OSM_BASE] + ENABLE_REG)) {
+		dev_info(&pdev->dev, "OSM has been initialized and enabled by TZ software\n");
+		osm_tz_enabled = true;
+	}
+
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 						"osm_pwrcl_base");
 	if (!res) {
@@ -2612,6 +2655,9 @@
 		return -ENOMEM;
 	}
 
+	if (osm_tz_enabled)
+		return rc;
+
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l3_pll");
 	if (!res) {
 		dev_err(&pdev->dev,
@@ -3018,77 +3064,6 @@
 
 	clk_data->clk_num = num_clks;
 
-	if (l3_clk.vbases[EFUSE_BASE]) {
-		/* Multiple speed-bins are supported */
-		pte_efuse = readl_relaxed(l3_clk.vbases[EFUSE_BASE]);
-		l3_clk.speedbin = ((pte_efuse >> L3_EFUSE_SHIFT) &
-						    L3_EFUSE_MASK);
-		snprintf(l3speedbinstr, ARRAY_SIZE(l3speedbinstr),
-			 "qcom,l3-speedbin%d-v%d", l3_clk.speedbin, pvs_ver);
-	}
-
-	dev_info(&pdev->dev, "using L3 speed bin %u and pvs_ver %d\n",
-		 l3_clk.speedbin, pvs_ver);
-
-	rc = clk_osm_get_lut(pdev, &l3_clk, l3speedbinstr);
-	if (rc) {
-		dev_err(&pdev->dev, "Unable to get OSM LUT for L3, rc=%d\n",
-			rc);
-		return rc;
-	}
-
-	if (pwrcl_clk.vbases[EFUSE_BASE]) {
-		/* Multiple speed-bins are supported */
-		pte_efuse = readl_relaxed(pwrcl_clk.vbases[EFUSE_BASE]);
-		pwrcl_clk.speedbin = ((pte_efuse >> PWRCL_EFUSE_SHIFT) &
-						    PWRCL_EFUSE_MASK);
-		snprintf(pwrclspeedbinstr, ARRAY_SIZE(pwrclspeedbinstr),
-			 "qcom,pwrcl-speedbin%d-v%d", pwrcl_clk.speedbin,
-							pvs_ver);
-	}
-
-	dev_info(&pdev->dev, "using pwrcl speed bin %u and pvs_ver %d\n",
-		 pwrcl_clk.speedbin, pvs_ver);
-
-	rc = clk_osm_get_lut(pdev, &pwrcl_clk, pwrclspeedbinstr);
-	if (rc) {
-		dev_err(&pdev->dev, "Unable to get OSM LUT for power cluster, rc=%d\n",
-			rc);
-		return rc;
-	}
-
-	if (perfcl_clk.vbases[EFUSE_BASE]) {
-		/* Multiple speed-bins are supported */
-		pte_efuse = readl_relaxed(perfcl_clk.vbases[EFUSE_BASE]);
-		perfcl_clk.speedbin = ((pte_efuse >> PERFCL_EFUSE_SHIFT) &
-							PERFCL_EFUSE_MASK);
-		snprintf(perfclspeedbinstr, ARRAY_SIZE(perfclspeedbinstr),
-			 "qcom,perfcl-speedbin%d-v%d", perfcl_clk.speedbin,
-							pvs_ver);
-	}
-
-	dev_info(&pdev->dev, "using perfcl speed bin %u and pvs_ver %d\n",
-		 perfcl_clk.speedbin, pvs_ver);
-
-	rc = clk_osm_get_lut(pdev, &perfcl_clk, perfclspeedbinstr);
-	if (rc) {
-		dev_err(&pdev->dev, "Unable to get OSM LUT for perf cluster, rc=%d\n",
-			rc);
-		return rc;
-	}
-
-	rc = clk_osm_parse_dt_configs(pdev);
-	if (rc) {
-		dev_err(&pdev->dev, "Unable to parse OSM device tree configurations\n");
-		return rc;
-	}
-
-	rc = clk_osm_parse_acd_dt_configs(pdev);
-	if (rc) {
-		dev_err(&pdev->dev, "Unable to parse ACD device tree configurations\n");
-		return rc;
-	}
-
 	rc = clk_osm_resources_init(pdev);
 	if (rc) {
 		if (rc != -EPROBE_DEFER)
@@ -3097,170 +3072,282 @@
 		return rc;
 	}
 
-	rc = clk_osm_acd_resources_init(pdev);
-	if (rc) {
-		dev_err(&pdev->dev, "ACD resources init failed, rc=%d\n",
-			rc);
-		return rc;
-	}
+	if (!osm_tz_enabled) {
+		if (l3_clk.vbases[EFUSE_BASE]) {
+			/* Multiple speed-bins are supported */
+			pte_efuse = readl_relaxed(l3_clk.vbases[EFUSE_BASE]);
+			l3_clk.speedbin = ((pte_efuse >> L3_EFUSE_SHIFT) &
+							L3_EFUSE_MASK);
+			snprintf(l3speedbinstr, ARRAY_SIZE(l3speedbinstr),
+			 "qcom,l3-speedbin%d-v%d", l3_clk.speedbin, pvs_ver);
+		}
 
-	rc = clk_osm_resolve_open_loop_voltages(&l3_clk);
-	if (rc) {
-		if (rc == -EPROBE_DEFER)
+		dev_info(&pdev->dev, "using L3 speed bin %u and pvs_ver %d\n",
+					l3_clk.speedbin, pvs_ver);
+
+		rc = clk_osm_get_lut(pdev, &l3_clk, l3speedbinstr);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to get OSM LUT for L3, rc=%d\n",
+				rc);
 			return rc;
-		dev_err(&pdev->dev, "Unable to determine open-loop voltages for L3, rc=%d\n",
-			rc);
-		return rc;
-	}
+		}
 
-	rc = clk_osm_resolve_open_loop_voltages(&pwrcl_clk);
-	if (rc) {
-		if (rc == -EPROBE_DEFER)
+		if (pwrcl_clk.vbases[EFUSE_BASE]) {
+			/* Multiple speed-bins are supported */
+			pte_efuse = readl_relaxed(pwrcl_clk.vbases[EFUSE_BASE]);
+			pwrcl_clk.speedbin = ((pte_efuse >> PWRCL_EFUSE_SHIFT) &
+							PWRCL_EFUSE_MASK);
+			snprintf(pwrclspeedbinstr, ARRAY_SIZE(pwrclspeedbinstr),
+			 "qcom,pwrcl-speedbin%d-v%d", pwrcl_clk.speedbin,
+							pvs_ver);
+		}
+
+		dev_info(&pdev->dev, "using pwrcl speed bin %u and pvs_ver %d\n",
+					pwrcl_clk.speedbin, pvs_ver);
+
+		rc = clk_osm_get_lut(pdev, &pwrcl_clk, pwrclspeedbinstr);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to get OSM LUT for power cluster, rc=%d\n",
+				rc);
 			return rc;
-		dev_err(&pdev->dev, "Unable to determine open-loop voltages for power cluster, rc=%d\n",
-			rc);
-		return rc;
-	}
+		}
 
-	rc = clk_osm_resolve_open_loop_voltages(&perfcl_clk);
-	if (rc) {
-		if (rc == -EPROBE_DEFER)
+		if (perfcl_clk.vbases[EFUSE_BASE]) {
+			/* Multiple speed-bins are supported */
+			pte_efuse =
+				readl_relaxed(perfcl_clk.vbases[EFUSE_BASE]);
+			perfcl_clk.speedbin = ((pte_efuse >> PERFCL_EFUSE_SHIFT)
+						& PERFCL_EFUSE_MASK);
+			snprintf(perfclspeedbinstr,
+				ARRAY_SIZE(perfclspeedbinstr),
+				"qcom,perfcl-speedbin%d-v%d",
+				perfcl_clk.speedbin, pvs_ver);
+		}
+
+		dev_info(&pdev->dev, "using perfcl speed bin %u and pvs_ver %d\n",
+					perfcl_clk.speedbin, pvs_ver);
+
+		rc = clk_osm_get_lut(pdev, &perfcl_clk, perfclspeedbinstr);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to get OSM LUT for perf cluster, rc=%d\n",
+				rc);
 			return rc;
-		dev_err(&pdev->dev, "Unable to determine open-loop voltages for perf cluster, rc=%d\n",
-			rc);
-		return rc;
-	}
+		}
 
-	rc = clk_osm_resolve_crossover_corners(&l3_clk, pdev);
-	if (rc)
-		dev_info(&pdev->dev,
-			"No APM crossover corner programmed for L3\n");
+		rc = clk_osm_parse_dt_configs(pdev);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to parse OSM device tree configurations\n");
+			return rc;
+		}
 
-	rc = clk_osm_resolve_crossover_corners(&pwrcl_clk, pdev);
-	if (rc)
-		dev_info(&pdev->dev,
-			"No APM crossover corner programmed for pwrcl_clk\n");
+		rc = clk_osm_parse_acd_dt_configs(pdev);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to parse ACD device tree configurations\n");
+			return rc;
+		}
 
-	rc = clk_osm_resolve_crossover_corners(&perfcl_clk, pdev);
-	if (rc)
-		dev_info(&pdev->dev, "No MEM-ACC crossover corner programmed\n");
+		rc = clk_osm_acd_resources_init(pdev);
+		if (rc) {
+			dev_err(&pdev->dev, "ACD resources init failed, rc=%d\n",
+				rc);
+			return rc;
+		}
 
-	clk_osm_setup_cycle_counters(&l3_clk);
-	clk_osm_setup_cycle_counters(&pwrcl_clk);
-	clk_osm_setup_cycle_counters(&perfcl_clk);
+		rc = clk_osm_resolve_open_loop_voltages(&l3_clk);
+		if (rc) {
+			if (rc == -EPROBE_DEFER)
+				return rc;
+			dev_err(&pdev->dev, "Unable to determine open-loop voltages for L3, rc=%d\n",
+				rc);
+			return rc;
+		}
+		rc = clk_osm_resolve_open_loop_voltages(&pwrcl_clk);
+		if (rc) {
+			if (rc == -EPROBE_DEFER)
+				return rc;
+			dev_err(&pdev->dev, "Unable to determine open-loop voltages for power cluster, rc=%d\n",
+				rc);
+			return rc;
+		}
+		rc = clk_osm_resolve_open_loop_voltages(&perfcl_clk);
+		if (rc) {
+			if (rc == -EPROBE_DEFER)
+				return rc;
+			dev_err(&pdev->dev, "Unable to determine open-loop voltages for perf cluster, rc=%d\n",
+				rc);
+			return rc;
+		}
 
-	clk_osm_print_osm_table(&l3_clk);
-	clk_osm_print_osm_table(&pwrcl_clk);
-	clk_osm_print_osm_table(&perfcl_clk);
+		rc = clk_osm_resolve_crossover_corners(&l3_clk, pdev);
+		if (rc)
+			dev_info(&pdev->dev,
+				"No APM crossover corner programmed for L3\n");
+		rc = clk_osm_resolve_crossover_corners(&pwrcl_clk, pdev);
+		if (rc)
+			dev_info(&pdev->dev,
+				"No APM crossover corner programmed for pwrcl_clk\n");
+		rc = clk_osm_resolve_crossover_corners(&perfcl_clk, pdev);
+		if (rc)
+			dev_info(&pdev->dev, "No MEM-ACC crossover corner programmed\n");
 
-	rc = clk_osm_setup_hw_table(&l3_clk);
-	if (rc) {
-		dev_err(&pdev->dev, "failed to setup l3 hardware table\n");
-		goto exit;
-	}
-	rc = clk_osm_setup_hw_table(&pwrcl_clk);
-	if (rc) {
-		dev_err(&pdev->dev, "failed to setup power cluster hardware table\n");
-		goto exit;
-	}
-	rc = clk_osm_setup_hw_table(&perfcl_clk);
-	if (rc) {
-		dev_err(&pdev->dev, "failed to setup perf cluster hardware table\n");
-		goto exit;
-	}
+		clk_osm_setup_cycle_counters(&l3_clk);
+		clk_osm_setup_cycle_counters(&pwrcl_clk);
+		clk_osm_setup_cycle_counters(&perfcl_clk);
 
-	/* Policy tuning */
-	rc = clk_osm_set_cc_policy(pdev);
-	if (rc < 0) {
-		dev_err(&pdev->dev, "cc policy setup failed");
-		goto exit;
-	}
+		clk_osm_print_osm_table(&l3_clk);
+		clk_osm_print_osm_table(&pwrcl_clk);
+		clk_osm_print_osm_table(&perfcl_clk);
 
-	/* LLM Freq Policy Tuning */
-	rc = clk_osm_set_llm_freq_policy(pdev);
-	if (rc < 0) {
-		dev_err(&pdev->dev, "LLM Frequency Policy setup failed");
-		goto exit;
-	}
+		rc = clk_osm_setup_hw_table(&l3_clk);
+		if (rc) {
+			dev_err(&pdev->dev, "failed to setup l3 hardware table\n");
+			goto exit;
+		}
+		rc = clk_osm_setup_hw_table(&pwrcl_clk);
+		if (rc) {
+			dev_err(&pdev->dev, "failed to setup power cluster hardware table\n");
+			goto exit;
+		}
+		rc = clk_osm_setup_hw_table(&perfcl_clk);
+		if (rc) {
+			dev_err(&pdev->dev, "failed to setup perf cluster hardware table\n");
+			goto exit;
+		}
 
-	/* LLM Voltage Policy Tuning */
-	rc = clk_osm_set_llm_volt_policy(pdev);
-	if (rc < 0) {
-		dev_err(&pdev->dev, "Failed to set LLM voltage Policy");
-		goto exit;
-	}
+		/* Policy tuning */
+		rc = clk_osm_set_cc_policy(pdev);
+		if (rc < 0) {
+			dev_err(&pdev->dev, "cc policy setup failed");
+			goto exit;
+		}
 
-	clk_osm_setup_fsms(&l3_clk);
-	clk_osm_setup_fsms(&pwrcl_clk);
-	clk_osm_setup_fsms(&perfcl_clk);
+		/* LLM Freq Policy Tuning */
+		rc = clk_osm_set_llm_freq_policy(pdev);
+		if (rc < 0) {
+			dev_err(&pdev->dev, "LLM Frequency Policy setup failed");
+			goto exit;
+		}
 
-	/* Program VC at which the array power supply needs to be switched */
-	clk_osm_write_reg(&perfcl_clk, perfcl_clk.apm_threshold_vc,
+		/* LLM Voltage Policy Tuning */
+		rc = clk_osm_set_llm_volt_policy(pdev);
+		if (rc < 0) {
+			dev_err(&pdev->dev, "Failed to set LLM voltage Policy");
+			goto exit;
+		}
+
+		clk_osm_setup_fsms(&l3_clk);
+		clk_osm_setup_fsms(&pwrcl_clk);
+		clk_osm_setup_fsms(&perfcl_clk);
+
+		/*
+		 * Program the VC at which the array power supply
+		 * needs to be switched.
+		 */
+		clk_osm_write_reg(&perfcl_clk, perfcl_clk.apm_threshold_vc,
 				APM_CROSSOVER_VC, OSM_BASE);
-	if (perfcl_clk.secure_init) {
-		clk_osm_write_seq_reg(&perfcl_clk, perfcl_clk.apm_crossover_vc,
-				DATA_MEM(77));
-		clk_osm_write_seq_reg(&perfcl_clk,
+		if (perfcl_clk.secure_init) {
+			clk_osm_write_seq_reg(&perfcl_clk,
+				perfcl_clk.apm_crossover_vc, DATA_MEM(77));
+			clk_osm_write_seq_reg(&perfcl_clk,
 				(0x39 | (perfcl_clk.apm_threshold_vc << 6)),
 				DATA_MEM(111));
-	} else {
-		scm_io_write(perfcl_clk.pbases[SEQ_BASE] + DATA_MEM(77),
-				perfcl_clk.apm_crossover_vc);
-		scm_io_write(perfcl_clk.pbases[SEQ_BASE] + DATA_MEM(111),
+		} else {
+			scm_io_write(perfcl_clk.pbases[SEQ_BASE] + DATA_MEM(77),
+					perfcl_clk.apm_crossover_vc);
+			scm_io_write(perfcl_clk.pbases[SEQ_BASE] +
+								DATA_MEM(111),
 				(0x39 | (perfcl_clk.apm_threshold_vc << 6)));
-	}
+		}
 
-	/*
-	 * Perform typical secure-world HW initialization
-	 * as necessary.
-	 */
-	clk_osm_do_additional_setup(&l3_clk, pdev);
-	clk_osm_do_additional_setup(&pwrcl_clk, pdev);
-	clk_osm_do_additional_setup(&perfcl_clk, pdev);
+		/*
+		 * Perform typical secure-world HW initialization
+		 * as necessary.
+		 */
+		clk_osm_do_additional_setup(&l3_clk, pdev);
+		clk_osm_do_additional_setup(&pwrcl_clk, pdev);
+		clk_osm_do_additional_setup(&perfcl_clk, pdev);
 
-	/* MEM-ACC Programming */
-	clk_osm_program_mem_acc_regs(&l3_clk);
-	clk_osm_program_mem_acc_regs(&pwrcl_clk);
-	clk_osm_program_mem_acc_regs(&perfcl_clk);
+		/* MEM-ACC Programming */
+		clk_osm_program_mem_acc_regs(&l3_clk);
+		clk_osm_program_mem_acc_regs(&pwrcl_clk);
+		clk_osm_program_mem_acc_regs(&perfcl_clk);
 
-	if (of_property_read_bool(pdev->dev.of_node, "qcom,osm-pll-setup")) {
-		clk_osm_setup_cluster_pll(&l3_clk);
-		clk_osm_setup_cluster_pll(&pwrcl_clk);
-		clk_osm_setup_cluster_pll(&perfcl_clk);
-	}
+		if (of_property_read_bool(pdev->dev.of_node,
+					"qcom,osm-pll-setup")) {
+			clk_osm_setup_cluster_pll(&l3_clk);
+			clk_osm_setup_cluster_pll(&pwrcl_clk);
+			clk_osm_setup_cluster_pll(&perfcl_clk);
+		}
 
-	/* Misc programming */
-	clk_osm_misc_programming(&l3_clk);
-	clk_osm_misc_programming(&pwrcl_clk);
-	clk_osm_misc_programming(&perfcl_clk);
+		/* Misc programming */
+		clk_osm_misc_programming(&l3_clk);
+		clk_osm_misc_programming(&pwrcl_clk);
+		clk_osm_misc_programming(&perfcl_clk);
 
-	pwrcl_clk.per_core_dcvs = perfcl_clk.per_core_dcvs =
+		rc = clk_osm_acd_init(&l3_clk);
+		if (rc) {
+			pr_err("failed to initialize ACD for L3, rc=%d\n", rc);
+			goto exit;
+		}
+		rc = clk_osm_acd_init(&pwrcl_clk);
+		if (rc) {
+			pr_err("failed to initialize ACD for pwrcl, rc=%d\n",
+									rc);
+			goto exit;
+		}
+		rc = clk_osm_acd_init(&perfcl_clk);
+		if (rc) {
+			pr_err("failed to initialize ACD for perfcl, rc=%d\n",
+									rc);
+			goto exit;
+		}
+
+		pwrcl_clk.per_core_dcvs = perfcl_clk.per_core_dcvs =
 			of_property_read_bool(pdev->dev.of_node,
 				"qcom,enable-per-core-dcvs");
-	if (pwrcl_clk.per_core_dcvs) {
+		if (pwrcl_clk.per_core_dcvs) {
+			val = clk_osm_read_reg(&pwrcl_clk, CORE_DCVS_CTRL);
+			val |= BIT(0);
+			clk_osm_write_reg(&pwrcl_clk, val, CORE_DCVS_CTRL,
+							OSM_BASE);
+			val = clk_osm_read_reg(&perfcl_clk, CORE_DCVS_CTRL);
+			val |= BIT(0);
+			clk_osm_write_reg(&perfcl_clk, val, CORE_DCVS_CTRL,
+							OSM_BASE);
+		}
+	} else {
+		/* OSM has been enabled already by trustzone */
+		rc = clk_osm_read_lut(pdev, &l3_clk);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to read OSM LUT for L3, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		rc = clk_osm_read_lut(pdev, &pwrcl_clk);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to read OSM LUT for power cluster, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		rc = clk_osm_read_lut(pdev, &perfcl_clk);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to read OSM LUT for perf cluster, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		/* Check if per-core DCVS is enabled/not */
 		val = clk_osm_read_reg(&pwrcl_clk, CORE_DCVS_CTRL);
-		val |= BIT(0);
-		clk_osm_write_reg(&pwrcl_clk, val, CORE_DCVS_CTRL, OSM_BASE);
+		if (val && BIT(0))
+			pwrcl_clk.per_core_dcvs = true;
 
 		val = clk_osm_read_reg(&perfcl_clk, CORE_DCVS_CTRL);
-		val |= BIT(0);
-		clk_osm_write_reg(&perfcl_clk, val, CORE_DCVS_CTRL, OSM_BASE);
-	}
+		if (val && BIT(0))
+			perfcl_clk.per_core_dcvs = true;
 
-	rc = clk_osm_acd_init(&l3_clk);
-	if (rc) {
-		pr_err("failed to initialize ACD for L3, rc=%d\n", rc);
-		goto exit;
-	}
-	rc = clk_osm_acd_init(&pwrcl_clk);
-	if (rc) {
-		pr_err("failed to initialize ACD for pwrcl, rc=%d\n", rc);
-		goto exit;
-	}
-	rc = clk_osm_acd_init(&perfcl_clk);
-	if (rc) {
-		pr_err("failed to initialize ACD for perfcl, rc=%d\n", rc);
-		goto exit;
+		clk_ops_l3_osm.enable = NULL;
 	}
 
 	spin_lock_init(&l3_clk.lock);
@@ -3287,7 +3374,23 @@
 
 	get_online_cpus();
 
-	/* Set the L3 clock to run off GPLL0 and enable OSM for the domain */
+	if (!osm_tz_enabled) {
+		populate_debugfs_dir(&l3_clk);
+		populate_debugfs_dir(&pwrcl_clk);
+		populate_debugfs_dir(&perfcl_clk);
+
+		/* Configure default rate to lowest frequency */
+		for (i = 0; i < MAX_CORE_COUNT; i++) {
+			osm_set_index(&pwrcl_clk, 0, i);
+			osm_set_index(&perfcl_clk, 0, i);
+		}
+	}
+	/*
+	 * Set the L3 clock to run off GPLL0 and enable OSM for the domain.
+	 * In the case that trustzone has already enabled OSM, bring the L3
+	 * clock rate to a safe level until the devfreq driver comes up and
+	 * votes for its desired frequency.
+	 */
 	rc = clk_set_rate(l3_clk.hw.clk, OSM_INIT_RATE);
 	if (rc) {
 		dev_err(&pdev->dev, "Unable to set init rate on L3 cluster, rc=%d\n",
@@ -3295,21 +3398,12 @@
 		goto provider_err;
 	}
 	WARN(clk_prepare_enable(l3_cluster0_vote_clk.hw.clk),
-		     "clk: Failed to enable cluster0 clock for L3\n");
+			"clk: Failed to enable cluster0 clock for L3\n");
 	WARN(clk_prepare_enable(l3_cluster1_vote_clk.hw.clk),
-		     "clk: Failed to enable cluster1 clock for L3\n");
+			"clk: Failed to enable cluster1 clock for L3\n");
 	udelay(300);
 
-	/* Configure default rate to lowest frequency */
-	for (i = 0; i < MAX_CORE_COUNT; i++) {
-		osm_set_index(&pwrcl_clk, 0, i);
-		osm_set_index(&perfcl_clk, 0, i);
-	}
-
 	populate_opp_table(pdev);
-	populate_debugfs_dir(&l3_clk);
-	populate_debugfs_dir(&pwrcl_clk);
-	populate_debugfs_dir(&perfcl_clk);
 
 	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
 	register_cpu_cycle_counter_cb(&cb);
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index b63c3c3..7382cfa 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -138,9 +138,6 @@
 	int ret;
 	u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
 
-	if (rcg->flags & DFS_ENABLE_RCG)
-		return 0;
-
 	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
 				 CFG_SRC_SEL_MASK, cfg);
 	if (ret)
@@ -350,8 +347,9 @@
 	struct clk_hw *hw = &rcg->clkr.hw;
 	int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
 
+	/* Skip configuration if DFS control has been enabled for the RCG. */
 	if (rcg->flags & DFS_ENABLE_RCG)
-		return -EPERM;
+		return 0;
 
 	if (index < 0)
 		return index;
@@ -481,7 +479,7 @@
 	}
 
 	ret = clk_rcg2_configure(rcg, f);
-	if (ret && ret != -EPERM)
+	if (ret)
 		return ret;
 
 	if (rcg->flags & FORCE_ENABLE_RCG) {
@@ -934,10 +932,11 @@
 EXPORT_SYMBOL_GPL(clk_byte2_ops);
 
 static const struct frac_entry frac_table_pixel[] = {
+	{ 1, 1 },
+	{ 2, 3 },
+	{ 4, 9 },
 	{ 3, 8 },
 	{ 2, 9 },
-	{ 4, 9 },
-	{ 1, 1 },
 	{ }
 };
 
@@ -1028,6 +1027,7 @@
 			unsigned long parent_rate)
 {
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+	struct clk_hw *parent = clk_hw_get_parent(hw);
 	struct freq_tbl f = { 0 };
 	unsigned long src_rate;
 	unsigned long num, den;
@@ -1035,7 +1035,12 @@
 	u32 hid_div, cfg;
 	int i, num_parents = clk_hw_get_num_parents(hw);
 
-	src_rate = clk_get_rate(clk_hw_get_parent(hw)->clk);
+	if (!parent) {
+		pr_err("RCG parent isn't initialized\n");
+		return -EINVAL;
+	}
+
+	src_rate = clk_get_rate(parent->clk);
 	if (src_rate <= 0) {
 		pr_err("Invalid RCG parent rate\n");
 		return -EINVAL;
@@ -1196,13 +1201,15 @@
 		u32 *mode, u32 *pre_div)
 {
 	struct clk_rcg2 *rcg;
-	int num_parents = clk_hw_get_num_parents(hw);
+	int num_parents;
 	u32 cfg, mask;
 	int i, ret;
 
 	if (!hw)
 		return -EINVAL;
 
+	num_parents = clk_hw_get_num_parents(hw);
+
 	rcg = to_clk_rcg2(hw);
 
 	ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + offset, &cfg);
@@ -1348,7 +1355,9 @@
 		"RCG flags %x\n", i, dfs_freq_tbl[i].freq, dfs_freq_tbl[i].src,
 				dfs_freq_tbl[i].pre_div, dfs_freq_tbl[i].m,
 				dfs_freq_tbl[i].n, rcg_flags);
-
+	/* Skip the safe configuration if DFS has been enabled for the RCG. */
+	if (clk->enable_safe_config)
+		clk->enable_safe_config = false;
 	clk->flags |= rcg_flags;
 	clk->freq_tbl = dfs_freq_tbl;
 err:
diff --git a/drivers/clk/qcom/clk-regmap.c b/drivers/clk/qcom/clk-regmap.c
index 1c856d3..aa024c2d 100644
--- a/drivers/clk/qcom/clk-regmap.c
+++ b/drivers/clk/qcom/clk-regmap.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -103,9 +103,12 @@
  */
 int devm_clk_register_regmap(struct device *dev, struct clk_regmap *rclk)
 {
-	if (dev && dev_get_regmap(dev, NULL))
+	if (!dev || !rclk)
+		return -EINVAL;
+
+	if (dev_get_regmap(dev, NULL))
 		rclk->regmap = dev_get_regmap(dev, NULL);
-	else if (dev && dev->parent)
+	else if (dev->parent)
 		rclk->regmap = dev_get_regmap(dev->parent, NULL);
 
 	return devm_clk_hw_register(dev, &rclk->hw);
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 1450b91..66bf435 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -182,6 +182,22 @@
 	"core_bi_pll_test_se",
 };
 
+static const struct parent_map gcc_parent_map_10[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_MAIN, 1 },
+	{ P_GPLL4_OUT_MAIN, 5 },
+	{ P_GPLL0_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_10[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll4",
+	"gpll0_out_even",
+	"core_bi_pll_test_se",
+};
+
 static struct clk_dummy measure_only_snoc_clk = {
 	.rrate = 1000,
 	.hw.init = &(struct clk_init_data){
@@ -241,6 +257,28 @@
 	},
 };
 
+static struct clk_alpha_pll gpll4 = {
+	.offset = 0x76000,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.type = FABIA_PLL,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(4),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll4",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_fabia_fixed_pll_ops,
+			VDD_CX_FMAX_MAP4(
+				MIN, 615000000,
+				LOW, 1066000000,
+				LOW_L1, 1600000000,
+				NOMINAL, 2000000000),
+		},
+	},
+};
+
 static const struct clk_div_table post_div_table_fabia_even[] = {
 	{ 0x0, 1 },
 	{ 0x1, 2 },
@@ -476,6 +514,7 @@
 	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
 	F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+	F(38400000, P_GPLL0_OUT_EVEN, 1, 16, 125),
 	F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
 	F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
 	F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
@@ -830,6 +869,17 @@
 	F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
 	F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
 	F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+	F(201500000, P_GPLL4_OUT_MAIN, 4, 0, 0),
+	{ }
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src_sdm845_v2[] = {
+	F(400000, P_BI_TCXO, 12, 1, 4),
+	F(9600000, P_BI_TCXO, 2, 0, 0),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+	F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+	F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
 	F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
 	{ }
 };
@@ -838,12 +888,12 @@
 	.cmd_rcgr = 0x1400c,
 	.mnd_width = 8,
 	.hid_width = 5,
-	.parent_map = gcc_parent_map_5,
+	.parent_map = gcc_parent_map_10,
 	.freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
 	.enable_safe_config = true,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gcc_sdcc2_apps_clk_src",
-		.parent_names = gcc_parent_names_5,
+		.parent_names = gcc_parent_names_10,
 		.num_parents = 5,
 		.flags = CLK_SET_RATE_PARENT,
 		.ops = &clk_rcg2_ops,
@@ -851,7 +901,7 @@
 			MIN, 9600000,
 			LOWER, 19200000,
 			LOW, 100000000,
-			LOW_L1, 200000000),
+			LOW_L1, 201500000),
 	},
 };
 
@@ -1449,6 +1499,7 @@
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_camera_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -1475,6 +1526,7 @@
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_camera_xo_clk",
+			.flags = CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -1569,7 +1621,7 @@
 				"gcc_cpuss_ahb_clk_src",
 			},
 			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
+			.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -1583,6 +1635,7 @@
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_cpuss_dvm_bus_clk",
+			.flags = CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -1598,6 +1651,7 @@
 		.enable_mask = BIT(22),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_cpuss_gnoc_clk",
+			.flags = CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -1644,6 +1698,7 @@
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_disp_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -1704,6 +1759,7 @@
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_disp_xo_clk",
+			.flags = CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -1773,6 +1829,7 @@
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_gpu_cfg_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -2728,7 +2785,7 @@
 				"gcc_cpuss_ahb_clk_src",
 			},
 			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
+			.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -3501,6 +3558,7 @@
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_video_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -3527,6 +3585,7 @@
 		.enable_mask = BIT(0),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_video_xo_clk",
+			.flags = CLK_IS_CRITICAL,
 			.ops = &clk_branch2_ops,
 		},
 	},
@@ -3775,6 +3834,7 @@
 	[GCC_VSENSOR_CLK_SRC] = &gcc_vsensor_clk_src.clkr,
 	[GPLL0] = &gpll0.clkr,
 	[GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+	[GPLL4] = &gpll4.clkr,
 };
 
 static const struct qcom_reset_map gcc_sdm845_resets[] = {
@@ -3952,6 +4012,9 @@
 		50000000;
 	gcc_qupv3_wrap1_s7_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
 		128000000;
+	gcc_sdcc2_apps_clk_src.freq_tbl = ftbl_gcc_sdcc2_apps_clk_src_sdm845_v2;
+	gcc_sdcc2_apps_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+		200000000;
 	gcc_ufs_card_axi_clk_src.freq_tbl =
 		ftbl_gcc_ufs_card_axi_clk_src_sdm845_v2;
 	gcc_ufs_card_axi_clk_src.clkr.hw.init->rate_max[VDD_CX_HIGH] =
@@ -4030,22 +4093,9 @@
 	regmap_update_bits(regmap, GCC_MMSS_MISC, 0x3, 0x3);
 	regmap_update_bits(regmap, GCC_GPU_MISC, 0x3, 0x3);
 
-	/* Keep these CPUSS clocks enabled always */
-	clk_prepare_enable(gcc_cpuss_ahb_clk.clkr.hw.clk);
-	clk_prepare_enable(gcc_sys_noc_cpuss_ahb_clk.clkr.hw.clk);
-	clk_prepare_enable(gcc_cpuss_dvm_bus_clk.clkr.hw.clk);
-	clk_prepare_enable(gcc_cpuss_gnoc_clk.clkr.hw.clk);
-
-	/* Keep the core XO clock enabled always */
-	clk_prepare_enable(gcc_camera_xo_clk.clkr.hw.clk);
-	clk_prepare_enable(gcc_disp_xo_clk.clkr.hw.clk);
-	clk_prepare_enable(gcc_video_xo_clk.clkr.hw.clk);
-
-	/* Enable for core register access */
-	clk_prepare_enable(gcc_gpu_cfg_ahb_clk.clkr.hw.clk);
-	clk_prepare_enable(gcc_disp_ahb_clk.clkr.hw.clk);
-	clk_prepare_enable(gcc_camera_ahb_clk.clkr.hw.clk);
-	clk_prepare_enable(gcc_video_ahb_clk.clkr.hw.clk);
+	/* Keep this clock on all the time on SDM845 v1 */
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,gcc-sdm845"))
+		clk_prepare_enable(gcc_aggre_noc_pcie_tbu_clk.clkr.hw.clk);
 
 	/* DFS clock registration */
 	ret = qcom_cc_register_rcg_dfs(pdev, &gcc_sdm845_dfs_desc);
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
index 93ad1b0..eb6c658 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -94,7 +94,6 @@
 
 struct dsi_pll_regs {
 	u32 pll_prop_gain_rate;
-	u32 pll_outdiv_rate;
 	u32 pll_lockdet_rate;
 	u32 decimal_div_start;
 	u32 frac_div_start_low;
@@ -134,6 +133,165 @@
 	struct dsi_pll_regs reg_setup;
 };
 
+static inline int pll_reg_read(void *context, unsigned int reg,
+					unsigned int *val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	*val = MDSS_PLL_REG_R(rsc->pll_base, reg);
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
+static inline int pll_reg_write(void *context, unsigned int reg,
+					unsigned int val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	MDSS_PLL_REG_W(rsc->pll_base, reg, val);
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
+static inline int phy_reg_read(void *context, unsigned int reg,
+					unsigned int *val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	*val = MDSS_PLL_REG_R(rsc->phy_base, reg);
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
+static inline int phy_reg_write(void *context, unsigned int reg,
+					unsigned int val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	MDSS_PLL_REG_W(rsc->phy_base, reg, val);
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
+static inline int phy_reg_update_bits_sub(struct mdss_pll_resources *rsc,
+		unsigned int reg, unsigned int mask, unsigned int val)
+{
+	u32 reg_val;
+	int rc = 0;
+
+	reg_val = MDSS_PLL_REG_R(rsc->phy_base, reg);
+	reg_val &= ~mask;
+	reg_val |= (val & mask);
+	MDSS_PLL_REG_W(rsc->phy_base, reg, reg_val);
+
+	return rc;
+}
+
+static inline int phy_reg_update_bits(void *context, unsigned int reg,
+				unsigned int mask, unsigned int val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = phy_reg_update_bits_sub(rsc, reg, mask, val);
+	if (!rc && rsc->slave)
+		rc = phy_reg_update_bits_sub(rsc->slave, reg, mask, val);
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
+static inline int pclk_mux_read_sel(void *context, unsigned int reg,
+					unsigned int *val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc)
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+	else
+		*val = (MDSS_PLL_REG_R(rsc->pll_base, reg) & 0x3);
+
+	(void)mdss_pll_resource_enable(rsc, false);
+	return rc;
+}
+
+
+static inline int pclk_mux_write_sel_sub(struct mdss_pll_resources *rsc,
+				unsigned int reg, unsigned int val)
+{
+	u32 reg_val;
+	int rc = 0;
+
+	reg_val = MDSS_PLL_REG_R(rsc->phy_base, reg);
+	reg_val &= ~0x03;
+	reg_val |= val;
+
+	MDSS_PLL_REG_W(rsc->phy_base, reg, reg_val);
+
+	return rc;
+}
+
+static inline int pclk_mux_write_sel(void *context, unsigned int reg,
+					unsigned int val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = pclk_mux_write_sel_sub(rsc, reg, val);
+	if (!rc && rsc->slave)
+		rc = pclk_mux_write_sel_sub(rsc->slave, reg, val);
+
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
 static struct mdss_pll_resources *pll_rsc_db[DSI_PLL_MAX];
 static struct dsi_pll_10nm plls[DSI_PLL_MAX];
 
@@ -203,54 +361,14 @@
 {
 	struct dsi_pll_config *config = &pll->pll_configuration;
 	struct dsi_pll_regs *regs = &pll->reg_setup;
-	u64 target_freq;
 	u64 fref = rsc->vco_ref_clk_rate;
-	u32 computed_output_div, div_log = 0;
 	u64 pll_freq;
 	u64 divider;
 	u64 dec, dec_multiple;
 	u32 frac;
 	u64 multiplier;
-	u32 i;
 
-	target_freq = rsc->vco_current_rate;
-	pr_debug("target_freq = %llu\n", target_freq);
-
-	if (config->div_override) {
-		computed_output_div = config->output_div;
-
-		/*
-		 * Computed_output_div = 2 ^ div_log
-		 * To get div_log from output div just get the index of the
-		 * 1 bit in the value.
-		 * div_log ranges from 0-3. so check the 4 lsbs
-		 */
-
-		for (i = 0; i < 4; i++) {
-			if (computed_output_div & (1 << i)) {
-				div_log = i;
-				break;
-			}
-		}
-
-	} else {
-		if (target_freq < MHZ_250) {
-			computed_output_div = 8;
-			div_log = 3;
-		} else if (target_freq < MHZ_500) {
-			computed_output_div = 4;
-			div_log = 2;
-		} else if (target_freq < MHZ_1000) {
-			computed_output_div = 2;
-			div_log = 1;
-		} else {
-			computed_output_div = 1;
-			div_log = 0;
-		}
-	}
-	pr_debug("computed_output_div = %d\n", computed_output_div);
-
-	pll_freq = target_freq * computed_output_div;
+	pll_freq = rsc->vco_current_rate;
 
 	if (config->disable_prescaler)
 		divider = fref;
@@ -274,7 +392,6 @@
 	else
 		regs->pll_clock_inverters = 0;
 
-	regs->pll_outdiv_rate = div_log;
 	regs->pll_lockdet_rate = config->lock_timer;
 	regs->decimal_div_start = dec;
 	regs->frac_div_start_low = (frac & 0xff);
@@ -394,7 +511,6 @@
 	MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_HIGH_1,
 		       reg->frac_div_start_high);
 	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_1, 0x40);
-	MDSS_PLL_REG_W(pll_base, PLL_PLL_OUTDIV_RATE, reg->pll_outdiv_rate);
 	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x06);
 	MDSS_PLL_REG_W(pll_base, PLL_CMODE, 0x10);
 	MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
@@ -605,7 +721,9 @@
 	}
 	pll->cached_cfg0 = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
 	pll->cached_cfg1 = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
-	pr_debug("cfg0=%d,cfg1=%d\n", pll->cached_cfg0, pll->cached_cfg1);
+	pll->cached_outdiv = MDSS_PLL_REG_R(pll->pll_base, PLL_PLL_OUTDIV_RATE);
+	pr_debug("cfg0=%d,cfg1=%d, outdiv=%d\n", pll->cached_cfg0,
+			pll->cached_cfg1, pll->cached_outdiv);
 
 	pll->vco_cached_rate = clk_hw_get_rate(hw);
 	dsi_pll_disable(vco);
@@ -646,6 +764,8 @@
 					pll->cached_cfg0);
 		MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1,
 					pll->cached_cfg1);
+		MDSS_PLL_REG_W(pll->pll_base, PLL_PLL_OUTDIV_RATE,
+					pll->cached_outdiv);
 	}
 
 	rc = dsi_pll_enable(vco);
@@ -855,176 +975,6 @@
 	return rc;
 }
 
-static int post_vco_clk_get_div(void *context, unsigned int reg,
-			unsigned int *div)
-{
-	int rc;
-	struct mdss_pll_resources *pll = context;
-	u32 reg_val;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
-	reg_val &= 0x3;
-
-	if (reg_val == 2)
-		*div = 1;
-	else if (reg_val == 3)
-		*div = 4;
-	else
-		*div = 1;
-
-	/**
-	 *Common clock framework the divider value is interpreted as one less
-	 * hence we return one less for all dividers except when zero
-	 */
-	if (*div != 0)
-		*div -= 1;
-
-	(void)mdss_pll_resource_enable(pll, false);
-
-	return rc;
-}
-
-static int post_vco_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
-{
-	u32 reg_val;
-	int rc = 0;
-
-	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
-	reg_val &= ~0x03;
-	if (div == 1) {
-		reg_val |= 0x2;
-	} else if (div == 4) {
-		reg_val |= 0x3;
-	} else {
-		rc = -EINVAL;
-		pr_err("unsupported divider %d\n", div);
-		goto error;
-	}
-
-	MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1, reg_val);
-
-error:
-	return rc;
-}
-
-static int post_vco_clk_set_div(void *context, unsigned int reg,
-		unsigned int div)
-{
-	int rc = 0;
-	struct mdss_pll_resources *pll = context;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	/**
-	 * In common clock framework the divider value provided is one less and
-	 * and hence adjusting the divider value by one prior to writing it to
-	 * hardware
-	 */
-	div++;
-	rc = post_vco_clk_set_div_sub(pll, div);
-	if (!rc && pll->slave)
-		rc = post_vco_clk_set_div_sub(pll->slave, div);
-
-	(void)mdss_pll_resource_enable(pll, false);
-
-	return rc;
-}
-
-static int post_bit_clk_get_div(void *context, unsigned int reg,
-			unsigned int *div)
-{
-	int rc;
-	struct mdss_pll_resources *pll = context;
-	u32 reg_val;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
-	reg_val &= 0x3;
-
-	if (reg_val == 0)
-		*div = 1;
-	else if (reg_val == 1)
-		*div = 2;
-	else
-		*div = 1;
-
-	/**
-	 *Common clock framework the divider value is interpreted as one less
-	 * hence we return one less for all dividers except when zero
-	 */
-	if (*div != 0)
-		*div -= 1;
-
-	(void)mdss_pll_resource_enable(pll, false);
-
-	return rc;
-}
-
-static int post_bit_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
-{
-	int rc = 0;
-	u32 reg_val;
-
-	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
-	reg_val &= ~0x03;
-	if (div == 1) {
-		reg_val |= 0x0;
-	} else if (div == 2) {
-		reg_val |= 0x1;
-	} else {
-		rc = -EINVAL;
-		pr_err("unsupported divider %d\n", div);
-		goto error;
-	}
-
-	MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1, reg_val);
-
-error:
-	return rc;
-}
-
-static int post_bit_clk_set_div(void *context, unsigned int reg,
-		unsigned int div)
-{
-	int rc = 0;
-	struct mdss_pll_resources *pll = context;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
-		return rc;
-	}
-
-	/**
-	 * In common clock framework the divider value provided is one less and
-	 * and hence adjusting the divider value by one prior to writing it to
-	 * hardware
-	 */
-	div++;
-	rc = post_bit_clk_set_div_sub(pll, div);
-	if (!rc && pll->slave)
-		rc = post_bit_clk_set_div_sub(pll->slave, div);
-
-	(void)mdss_pll_resource_enable(pll, false);
-
-	return rc;
-}
-
 static struct regmap_config dsi_pll_10nm_config = {
 	.reg_bits = 32,
 	.reg_stride = 4,
@@ -1032,14 +982,14 @@
 	.max_register = 0x7c0,
 };
 
-static struct regmap_bus post_vco_regmap_bus = {
-	.reg_write = post_vco_clk_set_div,
-	.reg_read = post_vco_clk_get_div,
+static struct regmap_bus pll_regmap_bus = {
+	.reg_write = pll_reg_write,
+	.reg_read = pll_reg_read,
 };
 
-static struct regmap_bus post_bit_regmap_bus = {
-	.reg_write = post_bit_clk_set_div,
-	.reg_read = post_bit_clk_get_div,
+static struct regmap_bus pclk_mux_regmap_bus = {
+	.reg_read = phy_reg_read,
+	.reg_write = pclk_mux_write_sel,
 };
 
 static struct regmap_bus pclk_src_regmap_bus = {
@@ -1073,23 +1023,30 @@
  *                  |    vco_clk    |
  *                  +-------+-------+
  *                          |
- *                          +--------------------------------------+
- *                          |                                      |
- *                  +-------v-------+                              |
- *                  |  bitclk_src   |                              |
- *                  |  DIV(1..15)   |                              |
- *                  +-------+-------+                              |
- *                          |                                      |
- *                          +--------------------+                 |
- *   Shadow Path            |                    |                 |
- *       +          +-------v-------+     +------v------+   +------v-------+
- *       |          |  byteclk_src  |     |post_bit_div |   |post_vco_div  |
- *       |          |  DIV(8)       |     |DIV(1,2)     |   |DIV(1,4)      |
- *       |          +-------+-------+     +------+------+   +------+-------+
- *       |                  |                    |                 |
- *       |                  |                    +------+     +----+
- *       |         +--------+                           |     |
- *       |         |                               +----v-----v------+
+ *                          |
+ *                  +---------------+
+ *                  |  pll_out_div  |
+ *                  |  DIV(1,2,4,8) |
+ *                  +-------+-------+
+ *                          |
+ *                          +-----------------------------+--------+
+ *                          |                             |        |
+ *                  +-------v-------+                     |        |
+ *                  |  bitclk_src   |                     |        |
+ *                  |  DIV(1..15)   |                     |        |
+ *                  +-------+-------+                     |        |
+ *                          |                             |        |
+ *                          +----------+---------+        |        |
+ *   Shadow Path            |          |         |        |        |
+ *       +          +-------v-------+  |  +------v------+ | +------v-------+
+ *       |          |  byteclk_src  |  |  |post_bit_div | | |post_vco_div  |
+ *       |          |  DIV(8)       |  |  |DIV (2)      | | |DIV(4)        |
+ *       |          +-------+-------+  |  +------+------+ | +------+-------+
+ *       |                  |          |         |      | |        |
+ *       |                  |          |         +------+ |        |
+ *       |                  |          +-------------+  | |   +----+
+ *       |         +--------+                        |  | |   |
+ *       |         |                               +-v--v-v---v------+
  *     +-v---------v----+                           \  pclk_src_mux /
  *     \  byteclk_mux /                              \             /
  *      \            /                                +-----+-----+
@@ -1140,13 +1097,45 @@
 	},
 };
 
+static struct clk_regmap_div dsi0pll_pll_out_div = {
+	.reg = PLL_PLL_OUTDIV_RATE,
+	.shift = 0,
+	.width = 2,
+	.flags = CLK_DIVIDER_POWER_OF_TWO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_pll_out_div",
+			.parent_names = (const char *[]){"dsi0pll_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi1pll_pll_out_div = {
+	.reg = PLL_PLL_OUTDIV_RATE,
+	.shift = 0,
+	.width = 2,
+	.flags = CLK_DIVIDER_POWER_OF_TWO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_pll_out_div",
+			.parent_names = (const char *[]){"dsi1pll_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi0pll_bitclk_src = {
 	.shift = 0,
 	.width = 4,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi0pll_bitclk_src",
-			.parent_names = (const char *[]){"dsi0pll_vco_clk"},
+			.parent_names = (const char *[]){"dsi0pll_pll_out_div"},
 			.num_parents = 1,
 			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
 			.ops = &clk_regmap_div_ops,
@@ -1160,7 +1149,7 @@
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi1pll_bitclk_src",
-			.parent_names = (const char *[]){"dsi1pll_vco_clk"},
+			.parent_names = (const char *[]){"dsi1pll_pll_out_div"},
 			.num_parents = 1,
 			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
 			.ops = &clk_regmap_div_ops,
@@ -1168,31 +1157,27 @@
 	},
 };
 
-static struct clk_regmap_div dsi0pll_post_vco_div = {
-	.shift = 0,
-	.width = 2,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_post_vco_div",
-			.parent_names = (const char *[]){"dsi0pll_vco_clk"},
-			.num_parents = 1,
-			.flags = CLK_GET_RATE_NOCACHE,
-			.ops = &clk_regmap_div_ops,
-		},
+static struct clk_fixed_factor dsi0pll_post_vco_div = {
+	.div = 4,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_post_vco_div",
+		.parent_names = (const char *[]){"dsi0pll_pll_out_div"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
 	},
 };
 
-static struct clk_regmap_div dsi1pll_post_vco_div = {
-	.shift = 0,
-	.width = 2,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_post_vco_div",
-			.parent_names = (const char *[]){"dsi1pll_vco_clk"},
-			.num_parents = 1,
-			.flags = CLK_GET_RATE_NOCACHE,
-			.ops = &clk_regmap_div_ops,
-		},
+static struct clk_fixed_factor dsi1pll_post_vco_div = {
+	.div = 4,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_post_vco_div",
+		.parent_names = (const char *[]){"dsi1pll_pll_out_div"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
 	},
 };
 
@@ -1220,31 +1205,27 @@
 	},
 };
 
-static struct clk_regmap_div dsi0pll_post_bit_div = {
-	.shift = 0,
-	.width = 1,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi0pll_post_bit_div",
-			.parent_names = (const char *[]){"dsi0pll_bitclk_src"},
-			.num_parents = 1,
-			.flags = CLK_GET_RATE_NOCACHE,
-			.ops = &clk_regmap_div_ops,
-		},
+static struct clk_fixed_factor dsi0pll_post_bit_div = {
+	.div = 2,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_post_bit_div",
+		.parent_names = (const char *[]){"dsi0pll_bitclk_src"},
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_fixed_factor_ops,
 	},
 };
 
-static struct clk_regmap_div dsi1pll_post_bit_div = {
-	.shift = 0,
-	.width = 1,
-	.clkr = {
-		.hw.init = &(struct clk_init_data){
-			.name = "dsi1pll_post_bit_div",
-			.parent_names = (const char *[]){"dsi1pll_bitclk_src"},
-			.num_parents = 1,
-			.flags = CLK_GET_RATE_NOCACHE,
-			.ops = &clk_regmap_div_ops,
-		},
+static struct clk_fixed_factor dsi1pll_post_bit_div = {
+	.div = 2,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_post_bit_div",
+		.parent_names = (const char *[]){"dsi1pll_bitclk_src"},
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_fixed_factor_ops,
 	},
 };
 
@@ -1277,30 +1258,36 @@
 };
 
 static struct clk_regmap_mux dsi0pll_pclk_src_mux = {
+	.reg = PHY_CMN_CLK_CFG1,
 	.shift = 0,
-	.width = 1,
+	.width = 2,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi0pll_pclk_src_mux",
-			.parent_names = (const char *[]){"dsi0pll_post_bit_div",
-						"dsi0pll_post_vco_div"},
-			.num_parents = 2,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.parent_names = (const char *[]){"dsi0pll_bitclk_src",
+					"dsi0pll_post_bit_div",
+					"dsi0pll_pll_out_div",
+					"dsi0pll_post_vco_div"},
+			.num_parents = 4,
+			.flags = CLK_GET_RATE_NOCACHE,
 			.ops = &clk_regmap_mux_closest_ops,
 		},
 	},
 };
 
 static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
+	.reg = PHY_CMN_CLK_CFG1,
 	.shift = 0,
-	.width = 1,
+	.width = 2,
 	.clkr = {
 		.hw.init = &(struct clk_init_data){
 			.name = "dsi1pll_pclk_src_mux",
-			.parent_names = (const char *[]){"dsi1pll_post_bit_div",
-						"dsi1pll_post_vco_div"},
-			.num_parents = 2,
-			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.parent_names = (const char *[]){"dsi1pll_bitclk_src",
+					"dsi1pll_post_bit_div",
+					"dsi1pll_pll_out_div",
+					"dsi1pll_post_vco_div"},
+			.num_parents = 4,
+			.flags = CLK_GET_RATE_NOCACHE,
 			.ops = &clk_regmap_mux_closest_ops,
 		},
 	},
@@ -1366,24 +1353,25 @@
 
 static struct clk_hw *mdss_dsi_pllcc_10nm[] = {
 	[VCO_CLK_0] = &dsi0pll_vco_clk.hw,
+	[PLL_OUT_DIV_0_CLK] = &dsi0pll_pll_out_div.clkr.hw,
 	[BITCLK_SRC_0_CLK] = &dsi0pll_bitclk_src.clkr.hw,
 	[BYTECLK_SRC_0_CLK] = &dsi0pll_byteclk_src.hw,
-	[POST_BIT_DIV_0_CLK] = &dsi0pll_post_bit_div.clkr.hw,
-	[POST_VCO_DIV_0_CLK] = &dsi0pll_post_vco_div.clkr.hw,
+	[POST_BIT_DIV_0_CLK] = &dsi0pll_post_bit_div.hw,
+	[POST_VCO_DIV_0_CLK] = &dsi0pll_post_vco_div.hw,
 	[BYTECLK_MUX_0_CLK] = &dsi0pll_byteclk_mux.clkr.hw,
 	[PCLK_SRC_MUX_0_CLK] = &dsi0pll_pclk_src_mux.clkr.hw,
 	[PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
 	[PCLK_MUX_0_CLK] = &dsi0pll_pclk_mux.clkr.hw,
 	[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
+	[PLL_OUT_DIV_1_CLK] = &dsi1pll_pll_out_div.clkr.hw,
 	[BITCLK_SRC_1_CLK] = &dsi1pll_bitclk_src.clkr.hw,
 	[BYTECLK_SRC_1_CLK] = &dsi1pll_byteclk_src.hw,
-	[POST_BIT_DIV_1_CLK] = &dsi1pll_post_bit_div.clkr.hw,
-	[POST_VCO_DIV_1_CLK] = &dsi1pll_post_vco_div.clkr.hw,
+	[POST_BIT_DIV_1_CLK] = &dsi1pll_post_bit_div.hw,
+	[POST_VCO_DIV_1_CLK] = &dsi1pll_post_vco_div.hw,
 	[BYTECLK_MUX_1_CLK] = &dsi1pll_byteclk_mux.clkr.hw,
 	[PCLK_SRC_MUX_1_CLK] = &dsi1pll_pclk_src_mux.clkr.hw,
 	[PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
 	[PCLK_MUX_1_CLK] = &dsi1pll_pclk_mux.clkr.hw,
-
 };
 
 int dsi_pll_clock_register_10nm(struct platform_device *pdev,
@@ -1428,13 +1416,10 @@
 
 	/* Establish client data */
 	if (ndx == 0) {
-		rmap = devm_regmap_init(&pdev->dev, &post_vco_regmap_bus,
-				pll_res, &dsi_pll_10nm_config);
-		dsi0pll_post_vco_div.clkr.regmap = rmap;
 
-		rmap = devm_regmap_init(&pdev->dev, &post_bit_regmap_bus,
+		rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
-		dsi0pll_post_bit_div.clkr.regmap = rmap;
+		dsi0pll_pll_out_div.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
@@ -1448,10 +1433,9 @@
 				pll_res, &dsi_pll_10nm_config);
 		dsi0pll_pclk_mux.clkr.regmap = rmap;
 
-		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+		rmap = devm_regmap_init(&pdev->dev, &pclk_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi0pll_pclk_src_mux.clkr.regmap = rmap;
-
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi0pll_byteclk_mux.clkr.regmap = rmap;
@@ -1475,13 +1459,9 @@
 
 
 	} else {
-		rmap = devm_regmap_init(&pdev->dev, &post_vco_regmap_bus,
+		rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
-		dsi1pll_post_vco_div.clkr.regmap = rmap;
-
-		rmap = devm_regmap_init(&pdev->dev, &post_bit_regmap_bus,
-				pll_res, &dsi_pll_10nm_config);
-		dsi1pll_post_bit_div.clkr.regmap = rmap;
+		dsi1pll_pll_out_div.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
@@ -1491,14 +1471,13 @@
 				pll_res, &dsi_pll_10nm_config);
 		dsi1pll_pclk_src.clkr.regmap = rmap;
 
-		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+		rmap = devm_regmap_init(&pdev->dev, &pclk_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi1pll_pclk_mux.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi1pll_pclk_src_mux.clkr.regmap = rmap;
-
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_10nm_config);
 		dsi1pll_byteclk_mux.clkr.regmap = rmap;
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
index 033462d..2f92270 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -99,6 +99,7 @@
 	unsigned long	vco_cached_rate;
 	u32		cached_cfg0;
 	u32		cached_cfg1;
+	u32		cached_outdiv;
 
 	/* dsi/edp/hmdi pll interface type */
 	u32		pll_interface_type;
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index e11ea50..8286818 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -1031,9 +1031,7 @@
 	if (predicted && (idx < (cluster->nlevels - 1))) {
 		struct power_params *pwr_params = &cluster->levels[idx].pwr;
 
-		tick_broadcast_exit();
 		clusttimer_start(cluster, pwr_params->max_residency + tmr_add);
-		tick_broadcast_enter();
 	}
 
 	return 0;
@@ -1086,10 +1084,8 @@
 			struct power_params *pwr_params =
 						&cluster->levels[0].pwr;
 
-			tick_broadcast_exit();
 			clusttimer_start(cluster,
 					pwr_params->max_residency + tmr_add);
-			tick_broadcast_enter();
 		}
 	}
 
@@ -1196,9 +1192,6 @@
 	 * next wakeup within a cluster, in which case, CPU switches over to
 	 * use broadcast timer.
 	 */
-	if (from_idle && cpu_level->use_bc_timer)
-		tick_broadcast_enter();
-
 	if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
 		|| (cpu_level->mode ==
 			MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
@@ -1218,9 +1211,6 @@
 	struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index];
 	bool jtag_save_restore = cpu->levels[cpu_index].jtag_save_restore;
 
-	if (from_idle && cpu_level->use_bc_timer)
-		tick_broadcast_exit();
-
 	if (from_idle && ((cpu_level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
 		|| (cpu_level->mode ==
 			MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)
@@ -1272,6 +1262,11 @@
 	/*
 	 * idx = 0 is the default LPM state
 	 */
+	if (from_idle && cpu->levels[idx].use_bc_timer) {
+		if (tick_broadcast_enter())
+			return false;
+	}
+
 	if (!idx) {
 		stop_critical_timings();
 		wfi();
@@ -1290,6 +1285,10 @@
 	start_critical_timings();
 	update_debug_pc_event(CPU_EXIT, state_id,
 			success, 0xdeaffeed, true);
+
+	if (from_idle && cpu->levels[idx].use_bc_timer)
+		tick_broadcast_exit();
+
 	return success;
 }
 
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 97e3479..6fcf25f 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1000,7 +1000,9 @@
 	ctx->flags |= SHA_FLAGS_FINUP;
 
 	err1 = atmel_sha_update(req);
-	if (err1 == -EINPROGRESS || err1 == -EBUSY)
+	if (err1 == -EINPROGRESS ||
+	    (err1 == -EBUSY && (ahash_request_flags(req) &
+				CRYPTO_TFM_REQ_MAY_BACKLOG)))
 		return err1;
 
 	/*
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 3bda6e5..0d743c6 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2014,10 +2014,10 @@
 {
 	struct ablkcipher_request *req = context;
 	struct ablkcipher_edesc *edesc;
-#ifdef DEBUG
 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 
+#ifdef DEBUG
 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 #endif
 
@@ -2037,6 +2037,14 @@
 #endif
 
 	ablkcipher_unmap(jrdev, edesc, req);
+
+	/*
+	 * The crypto API expects us to set the IV (req->info) to the last
+	 * ciphertext block. This is used e.g. by the CTS mode.
+	 */
+	scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+				 ivsize, 0);
+
 	kfree(edesc);
 
 	ablkcipher_request_complete(req, err);
@@ -2047,10 +2055,10 @@
 {
 	struct ablkcipher_request *req = context;
 	struct ablkcipher_edesc *edesc;
-#ifdef DEBUG
 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 
+#ifdef DEBUG
 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 #endif
 
@@ -2069,6 +2077,14 @@
 #endif
 
 	ablkcipher_unmap(jrdev, edesc, req);
+
+	/*
+	 * The crypto API expects us to set the IV (req->info) to the last
+	 * ciphertext block.
+	 */
+	scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
+				 ivsize, 0);
+
 	kfree(edesc);
 
 	ablkcipher_request_complete(req, err);
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 2474f14..631337c 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -491,7 +491,7 @@
 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
 	if (!ret) {
 		/* in progress */
-		wait_for_completion_interruptible(&result.completion);
+		wait_for_completion(&result.completion);
 		ret = result.err;
 #ifdef DEBUG
 		print_hex_dump(KERN_ERR,
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index e1eaf4f..3ce1d5c 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -103,7 +103,7 @@
 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
 	if (!ret) {
 		/* in progress */
-		wait_for_completion_interruptible(&result.completion);
+		wait_for_completion(&result.completion);
 		ret = result.err;
 #ifdef DEBUG
 		print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index d9ebe113..82a316b 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -33,12 +33,17 @@
 #include <crypto/hash.h>
 #include <crypto/sha.h>
 #include <soc/qcom/socinfo.h>
+#include <asm/dma-iommu.h>
+#include <linux/iommu.h>
 
 #include "qce.h"
 #include "qce50.h"
 #include "qcryptohw_50.h"
 #include "qce_ota.h"
 
+#define CRYPTO_SMMU_IOVA_START 0x10000000
+#define CRYPTO_SMMU_IOVA_SIZE 0x40000000
+
 #define CRYPTO_CONFIG_RESET 0xE01EF
 #define MAX_SPS_DESC_FIFO_SIZE 0xfff0
 #define QCE_MAX_NUM_DSCR    0x200
@@ -156,6 +161,8 @@
 	atomic_t last_intr_seq;
 	bool cadence_flag;
 	uint8_t *dummyreq_in_buf;
+	struct dma_iommu_mapping *smmu_mapping;
+	bool bypass_s1_smmu;
 };
 
 static void print_notify_debug(struct sps_event_notify *notify);
@@ -5703,6 +5710,10 @@
 		pr_info("CE operating frequency is not defined, setting to default 100MHZ\n");
 		pce_dev->ce_opp_freq_hz = CE_CLK_100MHZ;
 	}
+
+	if (of_property_read_bool((&pdev->dev)->of_node, "qcom,smmu-s1-bypass"))
+		pce_dev->bypass_s1_smmu = true;
+
 	pce_dev->ce_bam_info.dest_pipe_index	=
 			2 * pce_dev->ce_bam_info.pipe_pair_index;
 	pce_dev->ce_bam_info.src_pipe_index	=
@@ -5936,6 +5947,48 @@
 	return 0;
 }
 
+static void qce_iommu_release_iomapping(struct qce_device *pce_dev)
+{
+	if (pce_dev->smmu_mapping)
+		arm_iommu_release_mapping(pce_dev->smmu_mapping);
+
+	pce_dev->smmu_mapping = NULL;
+}
+
+static int qce_smmu_init(struct qce_device *pce_dev)
+{
+	struct dma_iommu_mapping *mapping;
+	int s1_bypass = 1;
+	int ret = 0;
+
+	mapping = arm_iommu_create_mapping(&platform_bus_type,
+				CRYPTO_SMMU_IOVA_START, CRYPTO_SMMU_IOVA_SIZE);
+	if (IS_ERR(mapping)) {
+		ret = PTR_ERR(mapping);
+		pr_err("Create mapping failed, err = %d\n", ret);
+		return ret;
+	}
+
+	ret = iommu_domain_set_attr(mapping->domain,
+				DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
+	if (ret < 0) {
+		pr_err("Set s1_bypass attribute failed, err = %d\n", ret);
+		goto ext_fail_set_attr;
+	}
+
+	ret = arm_iommu_attach_device(pce_dev->pdev, mapping);
+	if (ret < 0) {
+		pr_err("Attach device failed, err = %d\n", ret);
+		goto ext_fail_set_attr;
+	}
+	pce_dev->smmu_mapping = mapping;
+	return ret;
+
+ext_fail_set_attr:
+	qce_iommu_release_iomapping(pce_dev);
+	return ret;
+}
+
 /* crypto engine open function. */
 void *qce_open(struct platform_device *pdev, int *rc)
 {
@@ -5993,6 +6046,13 @@
 	if (*rc)
 		goto err_enable_clk;
 
+	if (pce_dev->bypass_s1_smmu) {
+		if (qce_smmu_init(pce_dev)) {
+			*rc = -EIO;
+			goto err_smmu;
+		}
+	}
+
 	if (_probe_ce_engine(pce_dev)) {
 		*rc = -ENXIO;
 		goto err;
@@ -6019,6 +6079,9 @@
 	mutex_unlock(&qce_iomap_mutex);
 	return pce_dev;
 err:
+	if (pce_dev->bypass_s1_smmu)
+		qce_iommu_release_iomapping(pce_dev);
+err_smmu:
 	qce_disable_clk(pce_dev);
 
 err_enable_clk:
@@ -6060,6 +6123,9 @@
 	kfree(pce_dev->dummyreq_in_buf);
 	kfree(pce_dev->iovec_vmem);
 
+	if (pce_dev->bypass_s1_smmu)
+		qce_iommu_release_iomapping(pce_dev);
+
 	qce_disable_clk(pce_dev);
 	__qce_deinit_clk(pce_dev);
 	mutex_unlock(&qce_iomap_mutex);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 0418a2f..571de2f 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -816,7 +816,7 @@
  * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
  */
 #define TALITOS_CRA_PRIORITY_AEAD_HSNA	(TALITOS_CRA_PRIORITY - 1)
-#define TALITOS_MAX_KEY_SIZE		96
+#define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
 #define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
 
 struct talitos_ctx {
@@ -1495,6 +1495,11 @@
 {
 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
 
+	if (keylen > TALITOS_MAX_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
 	memcpy(&ctx->key, key, keylen);
 	ctx->keylen = keylen;
 
diff --git a/drivers/edac/kryo3xx_arm64_edac.c b/drivers/edac/kryo3xx_arm64_edac.c
index f5bb3ed..5ca93a6 100644
--- a/drivers/edac/kryo3xx_arm64_edac.c
+++ b/drivers/edac/kryo3xx_arm64_edac.c
@@ -16,6 +16,7 @@
 #include <linux/platform_device.h>
 #include <linux/smp.h>
 #include <linux/cpu.h>
+#include <linux/cpu_pm.h>
 #include <linux/interrupt.h>
 #include <linux/of_irq.h>
 
@@ -125,6 +126,7 @@
 struct erp_drvdata {
 	struct edac_device_ctl_info *edev_ctl;
 	struct erp_drvdata __percpu **erp_cpu_drvdata;
+	struct notifier_block nb_pm;
 	int ppi;
 };
 
@@ -358,6 +360,19 @@
 	return IRQ_HANDLED;
 }
 
+static int kryo3xx_pmu_cpu_pm_notify(struct notifier_block *self,
+				unsigned long action, void *v)
+{
+	switch (action) {
+	case CPU_PM_EXIT:
+		kryo3xx_check_l3_scu_error(panic_handler_drvdata->edev_ctl);
+		kryo3xx_check_l1_l2_ecc(panic_handler_drvdata->edev_ctl);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
 static void initialize_registers(void *info)
 {
 	set_errxctlr_el1();
@@ -400,6 +415,7 @@
 	drv->edev_ctl->ctl_name = "cache";
 	drv->edev_ctl->panic_on_ce = ARM64_ERP_PANIC_ON_CE;
 	drv->edev_ctl->panic_on_ue = ARM64_ERP_PANIC_ON_UE;
+	drv->nb_pm.notifier_call = kryo3xx_pmu_cpu_pm_notify;
 	platform_set_drvdata(pdev, drv);
 
 	rc = edac_device_add_device(drv->edev_ctl);
@@ -424,6 +440,8 @@
 		goto out_dev;
 	}
 
+	cpu_pm_register_notifier(&(drv->nb_pm));
+
 	return 0;
 
 out_dev:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dcaf691..264899d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1419,6 +1419,9 @@
 	if (size & 0x3 || *pos & 0x3)
 		return -EINVAL;
 
+	if (*pos >= adev->mc.mc_vram_size)
+		return -ENXIO;
+
 	while (size) {
 		unsigned long flags;
 		uint32_t value;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index f59771d..db7890c 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -330,6 +330,13 @@
 			return false;
 		}
 
+		/*
+		 * ignore out-of-order messages or messages that are part of a
+		 * failed transaction
+		 */
+		if (!recv_hdr.somt && !msg->have_somt)
+			return false;
+
 		/* get length contained in this portion */
 		msg->curchunk_len = recv_hdr.msg_len;
 		msg->curchunk_hdrlen = hdrlen;
@@ -2168,7 +2175,7 @@
 }
 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
 
-static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
 {
 	int len;
 	u8 replyblock[32];
@@ -2183,12 +2190,12 @@
 			       replyblock, len);
 	if (ret != len) {
 		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
-		return;
+		return false;
 	}
 	ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
 	if (!ret) {
 		DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
-		return;
+		return false;
 	}
 	replylen = msg->curchunk_len + msg->curchunk_hdrlen;
 
@@ -2200,21 +2207,32 @@
 		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
 				    replyblock, len);
 		if (ret != len) {
-			DRM_DEBUG_KMS("failed to read a chunk\n");
+			DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
+				      len, ret);
+			return false;
 		}
+
 		ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
-		if (ret == false)
+		if (!ret) {
 			DRM_DEBUG_KMS("failed to build sideband msg\n");
+			return false;
+		}
+
 		curreply += len;
 		replylen -= len;
 	}
+	return true;
 }
 
 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
 {
 	int ret = 0;
 
-	drm_dp_get_one_sb_msg(mgr, false);
+	if (!drm_dp_get_one_sb_msg(mgr, false)) {
+		memset(&mgr->down_rep_recv, 0,
+		       sizeof(struct drm_dp_sideband_msg_rx));
+		return 0;
+	}
 
 	if (mgr->down_rep_recv.have_eomt) {
 		struct drm_dp_sideband_msg_tx *txmsg;
@@ -2270,7 +2288,12 @@
 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
 {
 	int ret = 0;
-	drm_dp_get_one_sb_msg(mgr, true);
+
+	if (!drm_dp_get_one_sb_msg(mgr, true)) {
+		memset(&mgr->up_req_recv, 0,
+		       sizeof(struct drm_dp_sideband_msg_rx));
+		return 0;
+	}
 
 	if (mgr->up_req_recv.have_eomt) {
 		struct drm_dp_sideband_msg_req_body msg;
@@ -2322,7 +2345,9 @@
 			DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
 		}
 
-		drm_dp_put_mst_branch_device(mstb);
+		if (mstb)
+			drm_dp_put_mst_branch_device(mstb);
+
 		memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
 	}
 	return ret;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
index 3625ed0..5e76ce7 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
@@ -56,6 +56,8 @@
 		dsi_ctrl_hw_cmn_trigger_cmd_test_pattern;
 	ctrl->ops.clear_phy0_ln_err = dsi_ctrl_hw_dln0_phy_err;
 	ctrl->ops.phy_reset_config = dsi_ctrl_hw_cmn_phy_reset_config;
+	ctrl->ops.setup_misr = dsi_ctrl_hw_cmn_setup_misr;
+	ctrl->ops.collect_misr = dsi_ctrl_hw_cmn_collect_misr;
 
 	switch (version) {
 	case DSI_CTRL_VERSION_1_4:
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
index 2d7b174..e8a6ab4 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -147,6 +147,12 @@
 void dsi_ctrl_hw_cmn_phy_sw_reset(struct dsi_ctrl_hw *ctrl);
 void dsi_ctrl_hw_cmn_soft_reset(struct dsi_ctrl_hw *ctrl);
 
+void dsi_ctrl_hw_cmn_setup_misr(struct dsi_ctrl_hw *ctrl,
+			enum dsi_op_mode panel_mode,
+			bool enable, u32 frame_count);
+u32 dsi_ctrl_hw_cmn_collect_misr(struct dsi_ctrl_hw *ctrl,
+			enum dsi_op_mode panel_mode);
+
 void dsi_ctrl_hw_cmn_kickoff_command(struct dsi_ctrl_hw *ctrl,
 			struct dsi_ctrl_cmd_dma_info *cmd,
 			u32 flags);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index b2aef9c..21ef811 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -2258,6 +2258,28 @@
 }
 
 /**
+ * _dsi_ctrl_cache_misr - Cache frame MISR value
+ * @dsi_ctrl: Pointer to associated dsi_ctrl structure
+ */
+static void _dsi_ctrl_cache_misr(struct dsi_ctrl *dsi_ctrl)
+{
+	u32 misr;
+
+	if (!dsi_ctrl || !dsi_ctrl->hw.ops.collect_misr)
+		return;
+
+	misr = dsi_ctrl->hw.ops.collect_misr(&dsi_ctrl->hw,
+				dsi_ctrl->host_config.panel_mode);
+
+	if (misr)
+		dsi_ctrl->misr_cache = misr;
+
+	pr_debug("DSI_%d misr_cache = %x\n", dsi_ctrl->cell_index,
+		dsi_ctrl->misr_cache);
+
+}
+
+/**
  * dsi_ctrl_set_power_state() - set power state for dsi controller
  * @dsi_ctrl:          DSI controller handle.
  * @state:             Power state.
@@ -2295,6 +2317,9 @@
 			goto error;
 		}
 	} else if (state == DSI_CTRL_POWER_VREG_OFF) {
+		if (dsi_ctrl->misr_enable)
+			_dsi_ctrl_cache_misr(dsi_ctrl);
+
 		rc = dsi_ctrl_enable_supplies(dsi_ctrl, false);
 		if (rc) {
 			pr_err("[%d]failed to disable vreg supplies, rc=%d\n",
@@ -2609,6 +2634,59 @@
 }
 
 /**
+ * dsi_ctrl_setup_misr() - Setup frame MISR
+ * @dsi_ctrl:              DSI controller handle.
+ * @enable:                enable/disable MISR.
+ * @frame_count:           Number of frames to accumulate MISR.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_setup_misr(struct dsi_ctrl *dsi_ctrl,
+			bool enable,
+			u32 frame_count)
+{
+	if (!dsi_ctrl) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	if (!dsi_ctrl->hw.ops.setup_misr)
+		return 0;
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+	dsi_ctrl->misr_enable = enable;
+	dsi_ctrl->hw.ops.setup_misr(&dsi_ctrl->hw,
+			dsi_ctrl->host_config.panel_mode,
+			enable, frame_count);
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return 0;
+}
+
+/**
+ * dsi_ctrl_collect_misr() - Read frame MISR
+ * @dsi_ctrl:              DSI controller handle.
+ *
+ * Return: MISR value.
+ */
+u32 dsi_ctrl_collect_misr(struct dsi_ctrl *dsi_ctrl)
+{
+	u32 misr;
+
+	if (!dsi_ctrl || !dsi_ctrl->hw.ops.collect_misr)
+		return 0;
+
+	misr = dsi_ctrl->hw.ops.collect_misr(&dsi_ctrl->hw,
+				dsi_ctrl->host_config.panel_mode);
+	if (!misr)
+		misr = dsi_ctrl->misr_cache;
+
+	pr_debug("DSI_%d cached misr = %x, final = %x\n",
+		dsi_ctrl->cell_index, dsi_ctrl->misr_cache, misr);
+
+	return misr;
+}
+
+/**
  * dsi_ctrl_drv_register() - register platform driver for dsi controller
  */
 void dsi_ctrl_drv_register(void)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index ec535ce11..95dac1c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -188,6 +188,8 @@
  * @vaddr:		 CPU virtual address of cmd buffer.
  * @cmd_buffer_size:     Size of command buffer.
  * @debugfs_root:        Root for debugfs entries.
+ * @misr_enable:         Frame MISR enable/disable
+ * @misr_cache:          Cached Frame MISR value
  */
 struct dsi_ctrl {
 	struct platform_device *pdev;
@@ -226,6 +228,10 @@
 	/* Debug Information */
 	struct dentry *debugfs_root;
 
+	/* MISR */
+	bool misr_enable;
+	u32 misr_cache;
+
 };
 
 /**
@@ -571,6 +577,26 @@
 		struct dsi_ctrl *dsi_ctrl, uint32_t intr_idx);
 
 /**
+ * dsi_ctrl_setup_misr() - Setup frame MISR
+ * @dsi_ctrl:              DSI controller handle.
+ * @enable:                enable/disable MISR.
+ * @frame_count:           Number of frames to accumulate MISR.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_setup_misr(struct dsi_ctrl *dsi_ctrl,
+			bool enable,
+			u32 frame_count);
+
+/**
+ * dsi_ctrl_collect_misr() - Read frame MISR
+ * @dsi_ctrl:              DSI controller handle.
+ *
+ * Return: MISR value.
+ */
+u32 dsi_ctrl_collect_misr(struct dsi_ctrl *dsi_ctrl);
+
+/**
  * dsi_ctrl_drv_register() - register platform driver for dsi controller
  */
 void dsi_ctrl_drv_register(void);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
index 74be279..2130144 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -675,6 +675,26 @@
 	ssize_t (*reg_dump_to_buffer)(struct dsi_ctrl_hw *ctrl,
 				      char *buf,
 				      u32 size);
+
+	/**
+	 * setup_misr() - Setup frame MISR
+	 * @ctrl:         Pointer to the controller host hardware.
+	 * @panel_mode:   CMD or VIDEO mode indicator
+	 * @enable:       Enable/disable MISR.
+	 * @frame_count:  Number of frames to accumulate MISR.
+	 */
+	void (*setup_misr)(struct dsi_ctrl_hw *ctrl,
+			   enum dsi_op_mode panel_mode,
+			   bool enable, u32 frame_count);
+
+	/**
+	 * collect_misr() - Read frame MISR
+	 * @ctrl:         Pointer to the controller host hardware.
+	 * @panel_mode:   CMD or VIDEO mode indicator
+	 */
+	u32 (*collect_misr)(struct dsi_ctrl_hw *ctrl,
+			    enum dsi_op_mode panel_mode);
+
 };
 
 /*
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_0.c
index c22849a..6421dc2 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_0.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_2_0.c
@@ -157,6 +157,10 @@
 	len += snprintf((buf + len), (size - len),
 			DUMP_REG_VALUE(DSI_RESET_SW_TRIGGER));
 	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_MISR_CMD_CTRL));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_MISR_VIDEO_CTRL));
+	len += snprintf((buf + len), (size - len),
 			DUMP_REG_VALUE(DSI_LANE_STATUS));
 	len += snprintf((buf + len), (size - len),
 			DUMP_REG_VALUE(DSI_LANE_CTRL));
@@ -193,6 +197,12 @@
 	len += snprintf((buf + len), (size - len),
 			DUMP_REG_VALUE(DSI_AXI2AHB_CTRL));
 	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_MISR_CMD_MDP0_32BIT));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_MISR_CMD_MDP1_32BIT));
+	len += snprintf((buf + len), (size - len),
+			DUMP_REG_VALUE(DSI_MISR_VIDEO_32BIT));
+	len += snprintf((buf + len), (size - len),
 			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL2));
 	len += snprintf((buf + len), (size - len),
 			DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_CTRL));
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index 0af6f25..8e8e353 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -20,9 +20,12 @@
 #include "dsi_ctrl_reg.h"
 #include "dsi_hw.h"
 #include "dsi_panel.h"
+#include "dsi_catalog.h"
 
 #define MMSS_MISC_CLAMP_REG_OFF           0x0014
 #define DSI_CTRL_DYNAMIC_FORCE_ON         (0x23F|BIT(8)|BIT(9)|BIT(11)|BIT(21))
+#define DSI_CTRL_CMD_MISR_ENABLE          BIT(28)
+#define DSI_CTRL_VIDEO_MISR_ENABLE        BIT(16)
 
 /* Unsupported formats default to RGB888 */
 static const u8 cmd_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
@@ -149,6 +152,70 @@
 }
 
 /**
+ * setup_misr() - Setup frame MISR
+ * @ctrl:	  Pointer to the controller host hardware.
+ * @panel_mode:   CMD or VIDEO mode indicator
+ * @enable:	  Enable/disable MISR.
+ * @frame_count:  Number of frames to accumulate MISR.
+ */
+void dsi_ctrl_hw_cmn_setup_misr(struct dsi_ctrl_hw *ctrl,
+			enum dsi_op_mode panel_mode,
+			bool enable,
+			u32 frame_count)
+{
+	u32 addr;
+	u32 config = 0;
+
+	if (panel_mode == DSI_OP_CMD_MODE) {
+		addr = DSI_MISR_CMD_CTRL;
+		if (enable)
+			config = DSI_CTRL_CMD_MISR_ENABLE;
+	} else {
+		addr = DSI_MISR_VIDEO_CTRL;
+		if (enable)
+			config = DSI_CTRL_VIDEO_MISR_ENABLE;
+		if (frame_count > 255)
+			frame_count = 255;
+		config |= frame_count << 8;
+	}
+
+	pr_debug("[DSI_%d] MISR ctrl: 0x%x\n", ctrl->index,
+			config);
+	DSI_W32(ctrl, addr, config);
+	wmb(); /* make sure MISR is configured */
+}
+
+/**
+ * collect_misr() - Read frame MISR
+ * @ctrl:	  Pointer to the controller host hardware.
+ * @panel_mode:   CMD or VIDEO mode indicator
+ */
+u32 dsi_ctrl_hw_cmn_collect_misr(struct dsi_ctrl_hw *ctrl,
+			enum dsi_op_mode panel_mode)
+{
+	u32 addr;
+	u32 enabled;
+	u32 misr = 0;
+
+	if (panel_mode == DSI_OP_CMD_MODE) {
+		addr = DSI_MISR_CMD_MDP0_32BIT;
+		enabled = DSI_R32(ctrl, DSI_MISR_CMD_CTRL) &
+				DSI_CTRL_CMD_MISR_ENABLE;
+	} else {
+		addr = DSI_MISR_VIDEO_32BIT;
+		enabled = DSI_R32(ctrl, DSI_MISR_VIDEO_CTRL) &
+				DSI_CTRL_VIDEO_MISR_ENABLE;
+	}
+
+	if (enabled)
+		misr = DSI_R32(ctrl, addr);
+
+	pr_debug("[DSI_%d] MISR enabled %x value: 0x%x\n", ctrl->index,
+			enabled, misr);
+	return misr;
+}
+
+/**
  * set_video_timing() - set up the timing for video frame
  * @ctrl:          Pointer to controller host hardware.
  * @mode:          Video mode information.
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index a4a9fb5..195584f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -32,6 +32,8 @@
 #define to_dsi_display(x) container_of(x, struct dsi_display, host)
 #define INT_BASE_10 10
 
+#define MISR_BUFF_SIZE	256
+
 static DEFINE_MUTEX(dsi_display_list_lock);
 static LIST_HEAD(dsi_display_list);
 static char dsi_display_primary[MAX_CMDLINE_PARAM_LEN];
@@ -128,9 +130,45 @@
 	return format;
 }
 
+static void _dsi_display_setup_misr(struct dsi_display *display)
+{
+	int i;
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		dsi_ctrl_setup_misr(display->ctrl[i].ctrl,
+				display->misr_enable,
+				display->misr_frame_count);
+	}
+}
+
+int dsi_display_set_power(struct drm_connector *connector,
+		int power_mode, void *disp)
+{
+	struct dsi_display *display = disp;
+	int rc = 0;
+
+	if (!display || !display->panel) {
+		pr_err("invalid display/panel\n");
+		return -EINVAL;
+	}
+
+	switch (power_mode) {
+	case SDE_MODE_DPMS_LP1:
+		rc = dsi_panel_set_lp1(display->panel);
+		break;
+	case SDE_MODE_DPMS_LP2:
+		rc = dsi_panel_set_lp2(display->panel);
+		break;
+	default:
+		rc = dsi_panel_set_nolp(display->panel);
+		break;
+	}
+	return rc;
+}
+
 static ssize_t debugfs_dump_info_read(struct file *file,
-				      char __user *buff,
-				      size_t count,
+				      char __user *user_buf,
+				      size_t user_len,
 				      loff_t *ppos)
 {
 	struct dsi_display *display = file->private_data;
@@ -168,7 +206,7 @@
 			"\tClock master = %s\n",
 			display->ctrl[display->clk_master_idx].ctrl->name);
 
-	if (copy_to_user(buff, buf, len)) {
+	if (copy_to_user(user_buf, buf, len)) {
 		kfree(buf);
 		return -EFAULT;
 	}
@@ -179,16 +217,151 @@
 	return len;
 }
 
+static ssize_t debugfs_misr_setup(struct file *file,
+				  const char __user *user_buf,
+				  size_t user_len,
+				  loff_t *ppos)
+{
+	struct dsi_display *display = file->private_data;
+	char *buf;
+	int rc = 0;
+	size_t len;
+	u32 enable, frame_count;
+
+	if (!display)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	buf = kzalloc(MISR_BUFF_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* leave room for termination char */
+	len = min_t(size_t, user_len, MISR_BUFF_SIZE - 1);
+	if (copy_from_user(buf, user_buf, len)) {
+		rc = -EINVAL;
+		goto error;
+	}
+
+	buf[len] = '\0'; /* terminate the string */
+
+	if (sscanf(buf, "%u %u", &enable, &frame_count) != 2) {
+		rc = -EINVAL;
+		goto error;
+	}
+
+	display->misr_enable = enable;
+	display->misr_frame_count = frame_count;
+
+	mutex_lock(&display->display_lock);
+	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+			DSI_CORE_CLK, DSI_CLK_ON);
+	if (rc) {
+		pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
+		       display->name, rc);
+		goto unlock;
+	}
+
+	_dsi_display_setup_misr(display);
+
+	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+			DSI_CORE_CLK, DSI_CLK_OFF);
+	if (rc) {
+		pr_err("[%s] failed to disable DSI core clocks, rc=%d\n",
+		       display->name, rc);
+		goto unlock;
+	}
+
+	rc = user_len;
+unlock:
+	mutex_unlock(&display->display_lock);
+error:
+	kfree(buf);
+	return rc;
+}
+
+static ssize_t debugfs_misr_read(struct file *file,
+				 char __user *user_buf,
+				 size_t user_len,
+				 loff_t *ppos)
+{
+	struct dsi_display *display = file->private_data;
+	char *buf;
+	u32 len = 0;
+	int rc = 0;
+	struct dsi_ctrl *dsi_ctrl;
+	int i;
+	u32 misr;
+	size_t max_len = min_t(size_t, user_len, MISR_BUFF_SIZE);
+
+	if (!display)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	buf = kzalloc(max_len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mutex_lock(&display->display_lock);
+	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+			DSI_CORE_CLK, DSI_CLK_ON);
+	if (rc) {
+		pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		dsi_ctrl = display->ctrl[i].ctrl;
+		misr = dsi_ctrl_collect_misr(display->ctrl[i].ctrl);
+
+		len += snprintf((buf + len), max_len - len,
+			"DSI_%d MISR: 0x%x\n", dsi_ctrl->cell_index, misr);
+
+		if (len >= max_len)
+			break;
+	}
+
+	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
+			DSI_CORE_CLK, DSI_CLK_OFF);
+	if (rc) {
+		pr_err("[%s] failed to disable DSI core clocks, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+	if (copy_to_user(user_buf, buf, len)) {
+		rc = -EFAULT;
+		goto error;
+	}
+
+	*ppos += len;
+
+error:
+	mutex_unlock(&display->display_lock);
+	kfree(buf);
+	return len;
+}
 
 static const struct file_operations dump_info_fops = {
 	.open = simple_open,
 	.read = debugfs_dump_info_read,
 };
 
+static const struct file_operations misr_data_fops = {
+	.open = simple_open,
+	.read = debugfs_misr_read,
+	.write = debugfs_misr_setup,
+};
+
 static int dsi_display_debugfs_init(struct dsi_display *display)
 {
 	int rc = 0;
-	struct dentry *dir, *dump_file;
+	struct dentry *dir, *dump_file, *misr_data;
 
 	dir = debugfs_create_dir(display->name, NULL);
 	if (IS_ERR_OR_NULL(dir)) {
@@ -199,13 +372,25 @@
 	}
 
 	dump_file = debugfs_create_file("dump_info",
-					0444,
+					0400,
 					dir,
 					display,
 					&dump_info_fops);
 	if (IS_ERR_OR_NULL(dump_file)) {
 		rc = PTR_ERR(dump_file);
-		pr_err("[%s] debugfs create file failed, rc=%d\n",
+		pr_err("[%s] debugfs create dump info file failed, rc=%d\n",
+		       display->name, rc);
+		goto error_remove_dir;
+	}
+
+	misr_data = debugfs_create_file("misr_data",
+					0600,
+					dir,
+					display,
+					&misr_data_fops);
+	if (IS_ERR_OR_NULL(misr_data)) {
+		rc = PTR_ERR(misr_data);
+		pr_err("[%s] debugfs create misr datafile failed, rc=%d\n",
 		       display->name, rc);
 		goto error_remove_dir;
 	}
@@ -3533,6 +3718,10 @@
 {
 	int rc = 0;
 
+	/* check and setup MISR */
+	if (display->misr_enable)
+		_dsi_display_setup_misr(display);
+
 	rc = dsi_display_set_roi(display, params->rois);
 
 	return rc;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index b382e4a..0ded247 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -154,6 +154,8 @@
  * @dsi_clk_handle:   DSI clock handle.
  * @mdp_clk_handle:   MDP clock handle.
  * @root:             Debugfs root directory
+ * @misr_enable       Frame MISR enable/disable
+ * @misr_frame_count  Number of frames to accumulate the MISR value
  */
 struct dsi_display {
 	struct platform_device *pdev;
@@ -201,6 +203,9 @@
 
 	/* DEBUG FS */
 	struct dentry *root;
+
+	bool misr_enable;
+	u32 misr_frame_count;
 };
 
 int dsi_display_dev_probe(struct platform_device *pdev);
@@ -472,6 +477,22 @@
  */
 int dsi_display_soft_reset(void *display);
 
+/**
+ * dsi_display_set_power - update power/dpms setting
+ * @connector: Pointer to drm connector structure
+ * @power_mode: One of the following,
+ *              SDE_MODE_DPMS_ON
+ *              SDE_MODE_DPMS_LP1
+ *              SDE_MODE_DPMS_LP2
+ *              SDE_MODE_DPMS_STANDBY
+ *              SDE_MODE_DPMS_SUSPEND
+ *              SDE_MODE_DPMS_OFF
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+int dsi_display_set_power(struct drm_connector *connector,
+		int power_mode, void *display);
+
 /*
  * dsi_display_pre_kickoff - program kickoff-time features
  * @display: Pointer to private display structure
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 8bc82f5..81da506 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -1333,6 +1333,9 @@
 	"qcom,video-to-cmd-mode-switch-commands",
 	"qcom,video-to-cmd-mode-post-switch-commands",
 	"qcom,mdss-dsi-panel-status-command",
+	"qcom,mdss-dsi-lp1-command",
+	"qcom,mdss-dsi-lp2-command",
+	"qcom,mdss-dsi-nolp-command",
 	"PPS not parsed from DTSI, generated dynamically",
 	"ROI not parsed from DTSI, generated dynamically",
 };
@@ -1352,6 +1355,9 @@
 	"qcom,video-to-cmd-mode-switch-commands-state",
 	"qcom,video-to-cmd-mode-post-switch-commands-state",
 	"qcom,mdss-dsi-panel-status-command-state",
+	"qcom,mdss-dsi-lp1-command-state",
+	"qcom,mdss-dsi-lp2-command-state",
+	"qcom,mdss-dsi-nolp-command-state",
 	"PPS not parsed from DTSI, generated dynamically",
 	"ROI not parsed from DTSI, generated dynamically",
 };
@@ -2745,6 +2751,60 @@
 	return rc;
 }
 
+int dsi_panel_set_lp1(struct dsi_panel *panel)
+{
+	int rc = 0;
+
+	if (!panel) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&panel->panel_lock);
+	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_LP1);
+	if (rc)
+		pr_err("[%s] failed to send DSI_CMD_SET_LP1 cmd, rc=%d\n",
+		       panel->name, rc);
+	mutex_unlock(&panel->panel_lock);
+	return rc;
+}
+
+int dsi_panel_set_lp2(struct dsi_panel *panel)
+{
+	int rc = 0;
+
+	if (!panel) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&panel->panel_lock);
+	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_LP2);
+	if (rc)
+		pr_err("[%s] failed to send DSI_CMD_SET_LP2 cmd, rc=%d\n",
+		       panel->name, rc);
+	mutex_unlock(&panel->panel_lock);
+	return rc;
+}
+
+int dsi_panel_set_nolp(struct dsi_panel *panel)
+{
+	int rc = 0;
+
+	if (!panel) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&panel->panel_lock);
+	rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_NOLP);
+	if (rc)
+		pr_err("[%s] failed to send DSI_CMD_SET_NOLP cmd, rc=%d\n",
+		       panel->name, rc);
+	mutex_unlock(&panel->panel_lock);
+	return rc;
+}
+
 int dsi_panel_prepare(struct dsi_panel *panel)
 {
 	int rc = 0;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index 5380049..ef9bb0c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -55,6 +55,9 @@
 	DSI_CMD_SET_VID_TO_CMD_SWITCH,
 	DSI_CMD_SET_POST_VID_TO_CMD_SWITCH,
 	DSI_CMD_SET_PANEL_STATUS,
+	DSI_CMD_SET_LP1,
+	DSI_CMD_SET_LP2,
+	DSI_CMD_SET_NOLP,
 	DSI_CMD_SET_PPS,
 	DSI_CMD_SET_ROI,
 	DSI_CMD_SET_MAX
@@ -230,6 +233,12 @@
 
 int dsi_panel_pre_prepare(struct dsi_panel *panel);
 
+int dsi_panel_set_lp1(struct dsi_panel *panel);
+
+int dsi_panel_set_lp2(struct dsi_panel *panel);
+
+int dsi_panel_set_nolp(struct dsi_panel *panel);
+
 int dsi_panel_prepare(struct dsi_panel *panel);
 
 int dsi_panel_enable(struct dsi_panel *panel);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index d97e4ef..f05d760 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -617,7 +617,7 @@
 			goto fail;
 		}
 
-		aspace = msm_gem_smmu_address_space_create(&pdev->dev,
+		aspace = msm_gem_smmu_address_space_create(dev,
 				mmu, "mdp5");
 		if (IS_ERR(aspace)) {
 			ret = PTR_ERR(aspace);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index d2ac684..33ef04b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -121,13 +121,37 @@
 		msm_drm_helper_hotplug_event(dev);
 }
 
+/**
+ * msm_atomic_helper_check - validate state object
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * This is a wrapper for the drm_atomic_helper_check to check the modeset
+ * and state checking for planes. Additionally it checks if any secure
+ * transition(moving CRTC and planes between secure and non-secure states and
+ * vice versa) is allowed or not. When going to secure state, planes
+ * with fb_mode as dir translated only can be staged on the CRTC, and only one
+ * CRTC should be active.
+ * Also mixing of secure and non-secure is not allowed.
+ *
+ * RETURNS
+ * Zero for success or -errorno.
+ */
 int msm_atomic_check(struct drm_device *dev,
 			    struct drm_atomic_state *state)
 {
+	struct msm_drm_private *priv;
+
 	if (msm_is_suspend_blocked(dev)) {
 		DRM_DEBUG("rejecting commit during suspend\n");
 		return -EBUSY;
 	}
+
+	priv = dev->dev_private;
+	if (priv && priv->kms && priv->kms->funcs &&
+			priv->kms->funcs->atomic_check)
+		return priv->kms->funcs->atomic_check(priv->kms, state);
+
 	return drm_atomic_helper_check(dev, state);
 }
 
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 96ab883..5b8a6b8 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -662,13 +662,57 @@
 
 /* For SDE  display */
 struct msm_gem_address_space *
-msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
+msm_gem_smmu_address_space_create(struct drm_device *dev, struct msm_mmu *mmu,
 		const char *name);
 
+/**
+ * msm_gem_add_obj_to_aspace_active_list: adds obj to active obj list in aspace
+ */
+void msm_gem_add_obj_to_aspace_active_list(
+		struct msm_gem_address_space *aspace,
+		struct drm_gem_object *obj);
+
+/**
+ * msm_gem_remove_obj_from_aspace_active_list: removes obj from  active obj
+ * list in aspace
+ */
+void msm_gem_remove_obj_from_aspace_active_list(
+		struct msm_gem_address_space *aspace,
+		struct drm_gem_object *obj);
+
+/**
+ * msm_gem_smmu_address_space_get: returns the aspace pointer for the requested
+ * domain
+ */
 struct msm_gem_address_space *
 msm_gem_smmu_address_space_get(struct drm_device *dev,
 		unsigned int domain);
 
+/**
+ * msm_gem_aspace_domain_attach_detach: function to inform the attach/detach
+ * of the domain for this aspace
+ */
+void msm_gem_aspace_domain_attach_detach_update(
+		struct msm_gem_address_space *aspace,
+		bool is_detach);
+
+/**
+ * msm_gem_address_space_register_cb: function to register callback for attach
+ * and detach of the domain
+ */
+int msm_gem_address_space_register_cb(
+		struct msm_gem_address_space *aspace,
+		void (*cb)(void *, bool),
+		void *cb_data);
+
+/**
+ * msm_gem_address_space_register_cb: function to unregister callback
+ */
+int msm_gem_address_space_unregister_cb(
+		struct msm_gem_address_space *aspace,
+		void (*cb)(void *, bool),
+		void *cb_data);
+
 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 		struct drm_file *file);
 
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index a7d06d1..d64dcc6 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -311,6 +311,10 @@
 		if (iommu_present(&platform_bus_type)) {
 			msm_gem_unmap_vma(domain->aspace, domain,
 				msm_obj->sgt, get_dmabuf_ptr(obj));
+
+			msm_gem_remove_obj_from_aspace_active_list(
+					domain->aspace,
+					obj);
 		}
 
 		obj_remove_domain(domain);
@@ -390,10 +394,12 @@
 			msm_obj->flags);
 	}
 
-	if (!ret && domain)
+	if (!ret && domain) {
 		*iova = domain->iova;
-	else
+		msm_gem_add_obj_to_aspace_active_list(aspace, obj);
+	} else {
 		obj_remove_domain(domain);
+	}
 
 	return ret;
 }
@@ -441,6 +447,63 @@
 	// things that are no longer needed..
 }
 
+void msm_gem_aspace_domain_attach_detach_update(
+		struct msm_gem_address_space *aspace,
+		bool is_detach)
+{
+	struct msm_gem_object *msm_obj;
+	struct drm_gem_object *obj;
+	struct aspace_client *aclient;
+	int ret;
+	uint32_t iova;
+
+	if (!aspace)
+		return;
+
+	mutex_lock(&aspace->dev->struct_mutex);
+	if (is_detach) {
+		/* Indicate to clients domain is getting detached */
+		list_for_each_entry(aclient, &aspace->clients, list) {
+			if (aclient->cb)
+				aclient->cb(aclient->cb_data,
+						is_detach);
+		}
+
+		/**
+		 * Unmap active buffers,
+		 * typically clients should do this when the callback is called,
+		 * but this needs to be done for the framebuffers which are not
+		 * attached to any planes. (background apps)
+		 */
+		list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
+			obj = &msm_obj->base;
+			if (obj->import_attach) {
+				put_iova(obj);
+				put_pages(obj);
+			}
+		}
+	} else {
+		/* map active buffers */
+		list_for_each_entry(msm_obj, &aspace->active_list,
+				iova_list) {
+			obj = &msm_obj->base;
+			ret = msm_gem_get_iova_locked(obj, aspace, &iova);
+			if (ret) {
+				mutex_unlock(&obj->dev->struct_mutex);
+				return;
+			}
+		}
+
+		/* Indicate to clients domain is attached */
+		list_for_each_entry(aclient, &aspace->clients, list) {
+			if (aclient->cb)
+				aclient->cb(aclient->cb_data,
+						is_detach);
+		}
+	}
+	mutex_unlock(&aspace->dev->struct_mutex);
+}
+
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 		struct drm_mode_create_dumb *args)
 {
@@ -869,6 +932,7 @@
 
 	INIT_LIST_HEAD(&msm_obj->submit_entry);
 	INIT_LIST_HEAD(&msm_obj->domains);
+	INIT_LIST_HEAD(&msm_obj->iova_list);
 
 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 9d41a00..c50c453 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -25,6 +25,8 @@
 #define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
 #define MSM_BO_KEEPATTRS     0x20000000     /* keep h/w bus attributes */
 
+struct msm_gem_object;
+
 struct msm_gem_aspace_ops {
 	int (*map)(struct msm_gem_address_space *, struct msm_gem_vma *,
 		struct sg_table *sgt, void *priv, unsigned int flags);
@@ -33,12 +35,35 @@
 		struct sg_table *sgt, void *priv);
 
 	void (*destroy)(struct msm_gem_address_space *);
+	void (*add_to_active)(struct msm_gem_address_space *,
+		struct msm_gem_object *);
+	void (*remove_from_active)(struct msm_gem_address_space *,
+		struct msm_gem_object *);
+	int (*register_cb)(struct msm_gem_address_space *,
+			void (*cb)(void *, bool),
+			void *);
+	int (*unregister_cb)(struct msm_gem_address_space *,
+			void (*cb)(void *, bool),
+			void *);
 };
 
+struct aspace_client {
+	void (*cb)(void *, bool);
+	void *cb_data;
+	struct list_head list;
+};
+
+
 struct msm_gem_address_space {
 	const char *name;
 	struct msm_mmu *mmu;
 	const struct msm_gem_aspace_ops *ops;
+	bool domain_attached;
+	struct drm_device *dev;
+	/* list of mapped objects */
+	struct list_head active_list;
+	/* list of clients */
+	struct list_head clients;
 };
 
 struct msm_gem_vma {
@@ -96,6 +121,7 @@
 	 * an IOMMU.  Also used for stolen/splashscreen buffer.
 	 */
 	struct drm_mm_node *vram_node;
+	struct list_head iova_list;
 };
 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 8e56871..d02228a 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -44,6 +44,9 @@
 	struct dma_buf *buf = priv;
 	int ret;
 
+	if (!aspace || !aspace->domain_attached)
+		return -EINVAL;
+
 	if (buf)
 		ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt, buf,
 			DMA_BIDIRECTIONAL, flags);
@@ -62,15 +65,109 @@
 	aspace->mmu->funcs->destroy(aspace->mmu);
 }
 
+static void smmu_aspace_add_to_active(
+		struct msm_gem_address_space *aspace,
+		struct msm_gem_object *msm_obj)
+{
+	WARN_ON(!mutex_is_locked(&aspace->dev->struct_mutex));
+	list_move_tail(&msm_obj->iova_list, &aspace->active_list);
+}
+
+static void smmu_aspace_remove_from_active(
+		struct msm_gem_address_space *aspace,
+		struct msm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj, *next;
+
+	WARN_ON(!mutex_is_locked(&aspace->dev->struct_mutex));
+
+	list_for_each_entry_safe(msm_obj, next, &aspace->active_list,
+			iova_list) {
+		if (msm_obj == obj) {
+			list_del(&msm_obj->iova_list);
+			break;
+		}
+	}
+}
+
+static int smmu_aspace_register_cb(
+		struct msm_gem_address_space *aspace,
+		void (*cb)(void *, bool),
+		void *cb_data)
+{
+	struct aspace_client *aclient = NULL;
+	struct aspace_client *temp;
+
+	if (!aspace)
+		return -EINVAL;
+
+	if (!aspace->domain_attached)
+		return -EACCES;
+
+	aclient = kzalloc(sizeof(*aclient), GFP_KERNEL);
+	if (!aclient)
+		return -ENOMEM;
+
+	aclient->cb = cb;
+	aclient->cb_data = cb_data;
+	INIT_LIST_HEAD(&aclient->list);
+
+	/* check if callback is already registered */
+	mutex_lock(&aspace->dev->struct_mutex);
+	list_for_each_entry(temp, &aspace->clients, list) {
+		if ((temp->cb == aclient->cb) &&
+			(temp->cb_data == aclient->cb_data)) {
+			kfree(aclient);
+			mutex_unlock(&aspace->dev->struct_mutex);
+			return -EEXIST;
+		}
+	}
+
+	list_move_tail(&aclient->list, &aspace->clients);
+	mutex_unlock(&aspace->dev->struct_mutex);
+
+	return 0;
+}
+
+static int smmu_aspace_unregister_cb(
+		struct msm_gem_address_space *aspace,
+		void (*cb)(void *, bool),
+		void *cb_data)
+{
+	struct aspace_client *aclient = NULL;
+	int rc = -ENOENT;
+
+	if (!aspace || !cb)
+		return -EINVAL;
+
+	mutex_lock(&aspace->dev->struct_mutex);
+	list_for_each_entry(aclient, &aspace->clients, list) {
+		if ((aclient->cb == cb) &&
+			(aclient->cb_data == cb_data)) {
+			list_del(&aclient->list);
+			kfree(aclient);
+			rc = 0;
+			break;
+		}
+	}
+	mutex_unlock(&aspace->dev->struct_mutex);
+
+	return rc;
+}
+
 
 static const struct msm_gem_aspace_ops smmu_aspace_ops = {
 	.map = smmu_aspace_map_vma,
 	.unmap = smmu_aspace_unmap_vma,
-	.destroy = smmu_aspace_destroy
+	.destroy = smmu_aspace_destroy,
+	.add_to_active = smmu_aspace_add_to_active,
+	.remove_from_active = smmu_aspace_remove_from_active,
+	.register_cb = smmu_aspace_register_cb,
+	.unregister_cb = smmu_aspace_unregister_cb,
 };
 
 struct msm_gem_address_space *
-msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
+msm_gem_smmu_address_space_create(struct drm_device *dev, struct msm_mmu *mmu,
 		const char *name)
 {
 	struct msm_gem_address_space *aspace;
@@ -82,9 +179,12 @@
 	if (!aspace)
 		return ERR_PTR(-ENOMEM);
 
+	aspace->dev = dev;
 	aspace->name = name;
 	aspace->mmu = mmu;
 	aspace->ops = &smmu_aspace_ops;
+	INIT_LIST_HEAD(&aspace->active_list);
+	INIT_LIST_HEAD(&aspace->clients);
 
 	return aspace;
 }
@@ -218,3 +318,44 @@
 
 	kfree(aspace);
 }
+
+void msm_gem_add_obj_to_aspace_active_list(
+		struct msm_gem_address_space *aspace,
+		struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+	if (aspace && aspace->ops && aspace->ops->add_to_active)
+		aspace->ops->add_to_active(aspace, msm_obj);
+}
+
+void msm_gem_remove_obj_from_aspace_active_list(
+		struct msm_gem_address_space *aspace,
+		struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+	if (aspace && aspace->ops && aspace->ops->remove_from_active)
+		aspace->ops->remove_from_active(aspace, msm_obj);
+}
+
+int msm_gem_address_space_register_cb(struct msm_gem_address_space *aspace,
+		void (*cb)(void *, bool),
+		void *cb_data)
+{
+	if (aspace && aspace->ops && aspace->ops->register_cb)
+		return aspace->ops->register_cb(aspace, cb, cb_data);
+
+	return -EINVAL;
+}
+
+int msm_gem_address_space_unregister_cb(struct msm_gem_address_space *aspace,
+		void (*cb)(void *, bool),
+		void *cb_data)
+{
+	if (aspace && aspace->ops && aspace->ops->unregister_cb)
+		return aspace->ops->unregister_cb(aspace, cb, cb_data);
+
+	return -EINVAL;
+}
+
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 7692bef..0375979 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -75,6 +75,9 @@
 			const struct msm_format *msm_fmt,
 			const struct drm_mode_fb_cmd2 *cmd,
 			struct drm_gem_object **bos);
+	/* perform complete atomic check of given atomic state */
+	int (*atomic_check)(struct msm_kms *kms,
+			struct drm_atomic_state *state);
 	/* misc: */
 	long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
 			struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 5af26e2..08e6f79 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -45,6 +45,7 @@
 	void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
 			struct dma_buf *dma_buf, int dir);
 	void (*destroy)(struct msm_mmu *mmu);
+	bool (*is_domain_secure)(struct msm_mmu *mmu);
 };
 
 struct msm_mmu {
diff --git a/drivers/gpu/drm/msm/msm_prop.c b/drivers/gpu/drm/msm/msm_prop.c
index f2996dd..d1991a4 100644
--- a/drivers/gpu/drm/msm/msm_prop.c
+++ b/drivers/gpu/drm/msm/msm_prop.c
@@ -21,8 +21,6 @@
 		uint32_t blob_count,
 		uint32_t state_size)
 {
-	int i;
-
 	/* prevent access if any of these are NULL */
 	if (!base || !dev || !property_array || !property_data) {
 		property_count = 0;
@@ -60,10 +58,6 @@
 				0,
 				sizeof(struct msm_property_data) *
 				property_count);
-		INIT_LIST_HEAD(&info->dirty_list);
-
-		for (i = 0; i < property_count; ++i)
-			INIT_LIST_HEAD(&property_data[i].dirty_node);
 	}
 }
 
@@ -72,9 +66,6 @@
 	if (!info)
 		return;
 
-	/* reset dirty list */
-	INIT_LIST_HEAD(&info->dirty_list);
-
 	/* free state cache */
 	while (info->state_cache_size > 0)
 		kfree(info->state_cache[--(info->state_cache_size)]);
@@ -82,24 +73,25 @@
 	mutex_destroy(&info->property_lock);
 }
 
-int msm_property_pop_dirty(struct msm_property_info *info)
+int msm_property_pop_dirty(struct msm_property_info *info,
+		struct msm_property_state *property_state)
 {
 	struct list_head *item;
 	int rc = 0;
 
-	if (!info) {
-		DRM_ERROR("invalid info\n");
+	if (!info || !property_state || !property_state->values) {
+		DRM_ERROR("invalid argument(s)\n");
 		return -EINVAL;
 	}
 
 	mutex_lock(&info->property_lock);
-	if (list_empty(&info->dirty_list)) {
+	if (list_empty(&property_state->dirty_list)) {
 		rc = -EAGAIN;
 	} else {
-		item = info->dirty_list.next;
+		item = property_state->dirty_list.next;
 		list_del_init(item);
-		rc = container_of(item, struct msm_property_data, dirty_node)
-			- info->property_data;
+		rc = container_of(item, struct msm_property_value, dirty_node)
+			- property_state->values;
 		DRM_DEBUG_KMS("property %d dirty\n", rc);
 	}
 	mutex_unlock(&info->property_lock);
@@ -112,26 +104,28 @@
  *                                   This function doesn't mutex protect the
  *                                   dirty linked list.
  * @info: Pointer to property info container struct
+ * @property_state: Pointer to property state container struct
  * @property_idx: Property index
  */
 static void _msm_property_set_dirty_no_lock(
 		struct msm_property_info *info,
+		struct msm_property_state *property_state,
 		uint32_t property_idx)
 {
-	if (!info || property_idx >= info->property_count) {
-		DRM_ERROR("invalid argument(s), info %pK, idx %u\n",
-				info, property_idx);
+	if (!info || !property_state || !property_state->values ||
+			property_idx >= info->property_count) {
+		DRM_ERROR("invalid argument(s), idx %u\n", property_idx);
 		return;
 	}
 
 	/* avoid re-inserting if already dirty */
-	if (!list_empty(&info->property_data[property_idx].dirty_node)) {
+	if (!list_empty(&property_state->values[property_idx].dirty_node)) {
 		DRM_DEBUG_KMS("property %u already dirty\n", property_idx);
 		return;
 	}
 
-	list_add_tail(&info->property_data[property_idx].dirty_node,
-			&info->dirty_list);
+	list_add_tail(&property_state->values[property_idx].dirty_node,
+			&property_state->dirty_list);
 }
 
 /**
@@ -371,35 +365,36 @@
 	return rc;
 }
 
-int msm_property_set_dirty(struct msm_property_info *info, int property_idx)
+int msm_property_set_dirty(struct msm_property_info *info,
+		struct msm_property_state *property_state,
+		int property_idx)
 {
-	if (!info) {
-		DRM_ERROR("invalid property info\n");
+	if (!info || !property_state || !property_state->values) {
+		DRM_ERROR("invalid argument(s)\n");
 		return -EINVAL;
 	}
 	mutex_lock(&info->property_lock);
-	_msm_property_set_dirty_no_lock(info, property_idx);
+	_msm_property_set_dirty_no_lock(info, property_state, property_idx);
 	mutex_unlock(&info->property_lock);
 	return 0;
 }
 
 int msm_property_atomic_set(struct msm_property_info *info,
-		uint64_t *property_values,
-		struct drm_property_blob **property_blobs,
+		struct msm_property_state *property_state,
 		struct drm_property *property, uint64_t val)
 {
 	struct drm_property_blob *blob;
 	int property_idx, rc = -EINVAL;
 
 	property_idx = msm_property_index(info, property);
-	if (!info || (property_idx == -EINVAL) || !property_values) {
-		DRM_DEBUG("Invalid argument(s)\n");
+	if (!info || !property_state ||
+			(property_idx == -EINVAL) || !property_state->values) {
+		DRM_DEBUG("invalid argument(s)\n");
 	} else {
 		/* extra handling for incoming properties */
 		mutex_lock(&info->property_lock);
 		if ((property->flags & DRM_MODE_PROP_BLOB) &&
-			(property_idx < info->blob_count) &&
-			property_blobs) {
+			(property_idx < info->blob_count)) {
 			/* DRM lookup also takes a reference */
 			blob = drm_property_lookup_blob(info->dev,
 				(uint32_t)val);
@@ -411,18 +406,21 @@
 				val = blob->base.id;
 
 				/* save blob - need to clear previous ref */
-				if (property_blobs[property_idx])
+				if (property_state->values[property_idx].blob)
 					drm_property_unreference_blob(
-						property_blobs[property_idx]);
-				property_blobs[property_idx] = blob;
+						property_state->values[
+						property_idx].blob);
+				property_state->values[property_idx].blob =
+					blob;
 			}
 		}
 
 		/* update value and flag as dirty */
-		if (property_values[property_idx] != val ||
+		if (property_state->values[property_idx].value != val ||
 				info->property_data[property_idx].force_dirty) {
-			property_values[property_idx] = val;
-			_msm_property_set_dirty_no_lock(info, property_idx);
+			property_state->values[property_idx].value = val;
+			_msm_property_set_dirty_no_lock(info, property_state,
+					property_idx);
 
 			DBG("%s - %lld", property->name, val);
 		}
@@ -434,18 +432,18 @@
 }
 
 int msm_property_atomic_get(struct msm_property_info *info,
-		uint64_t *property_values,
-		struct drm_property_blob **property_blobs,
+		struct msm_property_state *property_state,
 		struct drm_property *property, uint64_t *val)
 {
 	int property_idx, rc = -EINVAL;
 
 	property_idx = msm_property_index(info, property);
-	if (!info || (property_idx == -EINVAL) || !property_values || !val) {
+	if (!info || (property_idx == -EINVAL) ||
+			!property_state->values || !val) {
 		DRM_DEBUG("Invalid argument(s)\n");
 	} else {
 		mutex_lock(&info->property_lock);
-		*val = property_values[property_idx];
+		*val = property_state->values[property_idx].value;
 		mutex_unlock(&info->property_lock);
 		rc = 0;
 	}
@@ -495,8 +493,8 @@
 }
 
 void msm_property_reset_state(struct msm_property_info *info, void *state,
-		uint64_t *property_values,
-		struct drm_property_blob **property_blobs)
+		struct msm_property_state *property_state,
+		struct msm_property_value *property_values)
 {
 	uint32_t i;
 
@@ -508,24 +506,29 @@
 	if (state)
 		memset(state, 0, info->state_size);
 
+	if (property_state) {
+		property_state->property_count = info->property_count;
+		property_state->values = property_values;
+		INIT_LIST_HEAD(&property_state->dirty_list);
+	}
+
 	/*
 	 * Assign default property values. This helper is mostly used
 	 * to initialize newly created state objects.
 	 */
 	if (property_values)
-		for (i = 0; i < info->property_count; ++i)
-			property_values[i] =
+		for (i = 0; i < info->property_count; ++i) {
+			property_values[i].value =
 				info->property_data[i].default_value;
-
-	if (property_blobs)
-		for (i = 0; i < info->blob_count; ++i)
-			property_blobs[i] = 0;
+			property_values[i].blob = NULL;
+			INIT_LIST_HEAD(&property_values[i].dirty_node);
+		}
 }
 
 void msm_property_duplicate_state(struct msm_property_info *info,
 		void *old_state, void *state,
-		uint64_t *property_values,
-		struct drm_property_blob **property_blobs)
+		struct msm_property_state *property_state,
+		struct msm_property_value *property_values)
 {
 	uint32_t i;
 
@@ -536,17 +539,24 @@
 
 	memcpy(state, old_state, info->state_size);
 
-	if (property_blobs) {
-		/* add ref count for blobs */
-		for (i = 0; i < info->blob_count; ++i)
-			if (property_blobs[i])
-				drm_property_reference_blob(property_blobs[i]);
-	}
+	if (!property_state)
+		return;
+
+	INIT_LIST_HEAD(&property_state->dirty_list);
+	property_state->values = property_values;
+
+	if (property_state->values)
+		/* add ref count for blobs and initialize dirty nodes */
+		for (i = 0; i < info->property_count; ++i) {
+			if (property_state->values[i].blob)
+				drm_property_reference_blob(
+						property_state->values[i].blob);
+			INIT_LIST_HEAD(&property_state->values[i].dirty_node);
+		}
 }
 
 void msm_property_destroy_state(struct msm_property_info *info, void *state,
-		uint64_t *property_values,
-		struct drm_property_blob **property_blobs)
+		struct msm_property_state *property_state)
 {
 	uint32_t i;
 
@@ -554,19 +564,21 @@
 		DRM_ERROR("invalid argument(s)\n");
 		return;
 	}
-	if (property_blobs) {
+	if (property_state && property_state->values) {
 		/* remove ref count for blobs */
-		for (i = 0; i < info->blob_count; ++i)
-			if (property_blobs[i])
+		for (i = 0; i < info->property_count; ++i)
+			if (property_state->values[i].blob) {
 				drm_property_unreference_blob(
-						property_blobs[i]);
+						property_state->values[i].blob);
+				property_state->values[i].blob = NULL;
+			}
 	}
 
 	_msm_property_free_state(info, state);
 }
 
 void *msm_property_get_blob(struct msm_property_info *info,
-		struct drm_property_blob **property_blobs,
+		struct msm_property_state *property_state,
 		size_t *byte_len,
 		uint32_t property_idx)
 {
@@ -574,10 +586,11 @@
 	size_t len = 0;
 	void *rc = 0;
 
-	if (!info || !property_blobs || (property_idx >= info->blob_count)) {
+	if (!info || !property_state || !property_state->values ||
+			(property_idx >= info->blob_count)) {
 		DRM_ERROR("invalid argument(s)\n");
 	} else {
-		blob = property_blobs[property_idx];
+		blob = property_state->values[property_idx].blob;
 		if (blob) {
 			len = blob->length;
 			rc = &blob->data;
@@ -636,14 +649,15 @@
 }
 
 int msm_property_set_property(struct msm_property_info *info,
-		uint64_t *property_values,
+		struct msm_property_state *property_state,
 		uint32_t property_idx,
 		uint64_t val)
 {
 	int rc = -EINVAL;
 
 	if (!info || (property_idx >= info->property_count) ||
-			property_idx < info->blob_count || !property_values) {
+			property_idx < info->blob_count ||
+			!property_state || !property_state->values) {
 		DRM_ERROR("invalid argument(s)\n");
 	} else {
 		struct drm_property *drm_prop;
@@ -651,8 +665,7 @@
 		mutex_lock(&info->property_lock);
 
 		/* update cached value */
-		if (property_values)
-			property_values[property_idx] = val;
+		property_state->values[property_idx].value = val;
 
 		/* update the new default value for immutables */
 		drm_prop = info->property_array[property_idx];
diff --git a/drivers/gpu/drm/msm/msm_prop.h b/drivers/gpu/drm/msm/msm_prop.h
index e54c796..9a53e56 100644
--- a/drivers/gpu/drm/msm/msm_prop.h
+++ b/drivers/gpu/drm/msm/msm_prop.h
@@ -22,17 +22,28 @@
  * struct msm_property_data - opaque structure for tracking per
  *                            drm-object per property stuff
  * @default_value: Default property value for this drm object
- * @dirty_node: Linked list node to track if property is dirty or not
  * @force_dirty: Always dirty property on incoming sets, rather than checking
  *               for modified values
  */
 struct msm_property_data {
 	uint64_t default_value;
-	struct list_head dirty_node;
 	bool force_dirty;
 };
 
 /**
+ * struct msm_property_value - opaque structure for tracking per
+ *                             drm-object per property stuff
+ * @value: Current property value for this drm object
+ * @blob: Pointer to associated blob data, if available
+ * @dirty_node: Linked list node to track if property is dirty or not
+ */
+struct msm_property_value {
+	uint64_t value;
+	struct drm_property_blob *blob;
+	struct list_head dirty_node;
+};
+
+/**
  * struct msm_property_info: Structure for property/state helper functions
  * @base: Pointer to base drm object (plane/crtc/etc.)
  * @dev: Pointer to drm device object
@@ -43,8 +54,6 @@
  * @install_request: Total number of property 'install' requests
  * @install_count: Total number of successful 'install' requests
  * @recent_idx: Index of property most recently accessed by set/get
- * @dirty_list: List of all properties that have been 'atomic_set' but not
- *              yet cleared with 'msm_property_pop_dirty'
  * @is_active: Whether or not drm component properties are 'active'
  * @state_cache: Cache of local states, to prevent alloc/free thrashing
  * @state_size: Size of local state structures
@@ -64,7 +73,6 @@
 
 	int32_t recent_idx;
 
-	struct list_head dirty_list;
 	bool is_active;
 
 	void *state_cache[MSM_PROP_STATE_CACHE_SIZE];
@@ -74,6 +82,19 @@
 };
 
 /**
+ * struct msm_property_state - Structure for local property state information
+ * @property_count: Total number of properties
+ * @values: Pointer to array of msm_property_value objects
+ * @dirty_list: List of all properties that have been 'atomic_set' but not
+ *              yet cleared with 'msm_property_pop_dirty'
+ */
+struct msm_property_state {
+	uint32_t property_count;
+	struct msm_property_value *values;
+	struct list_head dirty_list;
+};
+
+/**
  * msm_property_get_default - query default value of a property
  * @info: Pointer to property info container struct
  * @property_idx: Property index
@@ -134,12 +155,14 @@
  * msm_property_pop_dirty - determine next dirty property and clear
  *                          its dirty flag
  * @info: Pointer to property info container struct
+ * @property_state: Pointer to property state container struct
  * Returns: Valid msm property index on success,
  *          -EAGAIN if no dirty properties are available
  *          Property indicies returned from this function are similar
  *          to those returned by the msm_property_index function.
  */
-int msm_property_pop_dirty(struct msm_property_info *info);
+int msm_property_pop_dirty(struct msm_property_info *info,
+		struct msm_property_state *property_state);
 
 /**
  * msm_property_init - initialize property info structure
@@ -268,38 +291,37 @@
 /**
  * msm_property_set_dirty - forcibly flag a property as dirty
  * @info: Pointer to property info container struct
+ * @property_state: Pointer to property state container struct
  * @property_idx: Property index
  * Returns: Zero on success
  */
-int msm_property_set_dirty(struct msm_property_info *info, int property_idx);
+int msm_property_set_dirty(struct msm_property_info *info,
+		struct msm_property_state *property_state,
+		int property_idx);
 
 /**
  * msm_property_atomic_set - helper function for atomic property set callback
  * @info: Pointer to property info container struct
- * @property_values: Pointer to property values cache array
- * @property_blobs: Pointer to property blobs cache array
+ * @property_state: Pointer to local state structure
  * @property: Incoming property pointer
  * @val: Incoming property value
  * Returns: Zero on success
  */
 int msm_property_atomic_set(struct msm_property_info *info,
-		uint64_t *property_values,
-		struct drm_property_blob **property_blobs,
+		struct msm_property_state *property_state,
 		struct drm_property *property,
 		uint64_t val);
 
 /**
  * msm_property_atomic_get - helper function for atomic property get callback
  * @info: Pointer to property info container struct
- * @property_values: Pointer to property values cache array
- * @property_blobs: Pointer to property blobs cache array
+ * @property_state: Pointer to local state structure
  * @property: Incoming property pointer
  * @val: Pointer to variable for receiving property value
  * Returns: Zero on success
  */
 int msm_property_atomic_get(struct msm_property_info *info,
-		uint64_t *property_values,
-		struct drm_property_blob **property_blobs,
+		struct msm_property_state *property_state,
 		struct drm_property *property,
 		uint64_t *val);
 
@@ -313,50 +335,47 @@
  * msm_property_reset_state - helper function for state reset callback
  * @info: Pointer to property info container struct
  * @state: Pointer to local state structure
+ * @property_state: Pointer to property state container struct
  * @property_values: Pointer to property values cache array
- * @property_blobs: Pointer to property blobs cache array
  */
-void msm_property_reset_state(struct msm_property_info *info,
-		void *state,
-		uint64_t *property_values,
-		struct drm_property_blob **property_blobs);
+void msm_property_reset_state(struct msm_property_info *info, void *state,
+		struct msm_property_state *property_state,
+		struct msm_property_value *property_values);
 
 /**
  * msm_property_duplicate_state - helper function for duplicate state cb
  * @info: Pointer to property info container struct
  * @old_state: Pointer to original state structure
  * @state: Pointer to newly created state structure
+ * @property_state: Pointer to destination property state container struct
  * @property_values: Pointer to property values cache array
- * @property_blobs: Pointer to property blobs cache array
  */
 void msm_property_duplicate_state(struct msm_property_info *info,
 		void *old_state,
 		void *state,
-		uint64_t *property_values,
-		struct drm_property_blob **property_blobs);
+		struct msm_property_state *property_state,
+		struct msm_property_value *property_values);
 
 /**
  * msm_property_destroy_state - helper function for destroy state cb
  * @info: Pointer to property info container struct
  * @state: Pointer to local state structure
- * @property_values: Pointer to property values cache array
- * @property_blobs: Pointer to property blobs cache array
+ * @property_state: Pointer to property state container struct
  */
 void msm_property_destroy_state(struct msm_property_info *info,
 		void *state,
-		uint64_t *property_values,
-		struct drm_property_blob **property_blobs);
+		struct msm_property_state *property_state);
 
 /**
  * msm_property_get_blob - obtain cached data pointer for drm blob property
  * @info: Pointer to property info container struct
- * @property_blobs: Pointer to property blobs cache array
+ * @property_state: Pointer to property state container struct
  * @byte_len: Optional pointer to variable for accepting blob size
  * @property_idx: Property index
  * Returns: Pointer to blob data
  */
 void *msm_property_get_blob(struct msm_property_info *info,
-		struct drm_property_blob **property_blobs,
+		struct msm_property_state *property_state,
 		size_t *byte_len,
 		uint32_t property_idx);
 
@@ -385,13 +404,13 @@
  * DRM_MODE_PROP_IMMUTABLE flag set.
  * Note: This function cannot be called on a blob.
  * @info: Pointer to property info container struct
- * @property_values: Pointer to property values cache array
+ * @property_state: Pointer to property state container struct
  * @property_idx: Property index
  * @val: value of the property to set
  * Returns: Zero on success
  */
 int msm_property_set_property(struct msm_property_info *info,
-		uint64_t *property_values,
+		struct msm_property_state *property_state,
 		uint32_t property_idx,
 		uint64_t val);
 
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index 7d7f74a..730fc06 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -37,6 +37,7 @@
 	struct device *dev;
 	struct dma_iommu_mapping *mmu_mapping;
 	bool domain_attached;
+	bool secure;
 };
 
 struct msm_smmu {
@@ -275,6 +276,14 @@
 	msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir, dma_buf);
 }
 
+static bool msm_smmu_is_domain_secure(struct msm_mmu *mmu)
+{
+	struct msm_smmu *smmu = to_msm_smmu(mmu);
+	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+
+	return client->secure;
+}
+
 static const struct msm_mmu_funcs funcs = {
 	.attach = msm_smmu_attach,
 	.detach = msm_smmu_detach,
@@ -285,6 +294,7 @@
 	.map_dma_buf = msm_smmu_map_dma_buf,
 	.unmap_dma_buf = msm_smmu_unmap_dma_buf,
 	.destroy = msm_smmu_destroy,
+	.is_domain_secure = msm_smmu_is_domain_secure,
 };
 
 static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
@@ -458,6 +468,7 @@
 	if (domain->secure) {
 		int secure_vmid = VMID_CP_PIXEL;
 
+		client->secure = true;
 		rc = iommu_domain_set_attr(client->mmu_mapping->domain,
 				DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
 		if (rc) {
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index e87058e..f5e2ada 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -233,8 +233,8 @@
 	}
 
 	if (!c_conn->ops.get_dst_format) {
-		SDE_ERROR("get_dst_format is invalid\n");
-		return -EINVAL;
+		SDE_DEBUG("get_dst_format is unavailable\n");
+		return 0;
 	}
 
 	dst_format = c_conn->ops.get_dst_format(c_conn->display);
@@ -331,7 +331,7 @@
 
 	/* try to get user config data first */
 	*cfg = msm_property_get_blob(&c_conn->property_info,
-					c_state->property_blobs,
+					&c_state->property_state,
 					&dither_sz,
 					CONNECTOR_PROP_PP_DITHER);
 	/* if user config data doesn't exist, use default dither blob */
@@ -459,13 +459,12 @@
 	drm_framebuffer_unreference(c_state->out_fb);
 	c_state->out_fb = NULL;
 
-	if (c_conn) {
-		c_state->property_values[CONNECTOR_PROP_OUT_FB] =
+	if (c_conn)
+		c_state->property_values[CONNECTOR_PROP_OUT_FB].value =
 			msm_property_get_default(&c_conn->property_info,
 					CONNECTOR_PROP_OUT_FB);
-	} else {
-		c_state->property_values[CONNECTOR_PROP_OUT_FB] = ~0;
-	}
+	else
+		c_state->property_values[CONNECTOR_PROP_OUT_FB].value = ~0;
 }
 
 static void sde_connector_atomic_destroy_state(struct drm_connector *connector,
@@ -496,8 +495,7 @@
 	} else {
 		/* destroy value helper */
 		msm_property_destroy_state(&c_conn->property_info, c_state,
-				c_state->property_values,
-				c_state->property_blobs);
+				&c_state->property_state);
 	}
 }
 
@@ -526,7 +524,8 @@
 
 	/* reset value helper, zero out state structure and reset properties */
 	msm_property_reset_state(&c_conn->property_info, c_state,
-			c_state->property_values, c_state->property_blobs);
+			&c_state->property_state,
+			c_state->property_values);
 
 	c_state->base.connector = connector;
 	connector->state = &c_state->base;
@@ -554,8 +553,8 @@
 
 	/* duplicate value helper */
 	msm_property_duplicate_state(&c_conn->property_info,
-			c_oldstate, c_state, c_state->property_values,
-			c_state->property_blobs);
+			c_oldstate, c_state,
+			&c_state->property_state, c_state->property_values);
 
 	/* additional handling for drm framebuffer objects */
 	if (c_state->out_fb) {
@@ -755,8 +754,7 @@
 
 	/* generic property handling */
 	rc = msm_property_atomic_set(&c_conn->property_info,
-			c_state->property_values, c_state->property_blobs,
-			property, val);
+			&c_state->property_state, property, val);
 	if (rc)
 		goto end;
 
@@ -863,8 +861,7 @@
 	else
 		/* get cached property value */
 		rc = msm_property_atomic_get(&c_conn->property_info,
-				c_state->property_values,
-				c_state->property_blobs, property, val);
+				&c_state->property_state, property, val);
 
 	/* allow for custom override */
 	if (c_conn->ops.get_property)
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 8796c52..1b594cd 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -315,6 +315,7 @@
  * @base: Base drm connector structure
  * @out_fb: Pointer to output frame buffer, if applicable
  * @aspace: Address space for accessing frame buffer objects, if applicable
+ * @property_state: Local storage for msm_prop properties
  * @property_values: Local cache of current connector property values
  * @rois: Regions of interest structure for mapping CRTC to Connector output
  * @property_blobs: blob properties
@@ -323,7 +324,8 @@
 	struct drm_connector_state base;
 	struct drm_framebuffer *out_fb;
 	struct msm_gem_address_space *aspace;
-	uint64_t property_values[CONNECTOR_PROP_COUNT];
+	struct msm_property_state property_state;
+	struct msm_property_value property_values[CONNECTOR_PROP_COUNT];
 
 	struct msm_roi_list rois;
 	struct drm_property_blob *property_blobs[CONNECTOR_PROP_BLOBCOUNT];
@@ -346,15 +348,15 @@
  */
 #define sde_connector_get_property(S, X) \
 	((S) && ((X) < CONNECTOR_PROP_COUNT) ? \
-	 (to_sde_connector_state((S))->property_values[(X)]) : 0)
+	 (to_sde_connector_state((S))->property_values[(X)].value) : 0)
 
 /**
- * sde_connector_get_property_values - retrieve property values cache
+ * sde_connector_get_property_state - retrieve property state cache
  * @S: Pointer to drm connector state
- * Returns: Integer value of requested property
+ * Returns: Pointer to local property state structure
  */
-#define sde_connector_get_property_values(S) \
-	((S) ? (to_sde_connector_state((S))->property_values) : 0)
+#define sde_connector_get_property_state(S) \
+	((S) ? (&to_sde_connector_state((S))->property_state) : NULL)
 
 /**
  * sde_connector_get_out_fb - query out_fb value from sde connector state
@@ -379,6 +381,20 @@
 }
 
 /**
+ * sde_connector_get_lp - helper accessor to retrieve LP state
+ * @connector: pointer to drm connector
+ * Returns: value of the CONNECTOR_PROP_LP property or 0
+ */
+static inline uint64_t sde_connector_get_lp(
+		struct drm_connector *connector)
+{
+	if (!connector || !connector->state)
+		return 0;
+	return sde_connector_get_property(connector->state,
+			CONNECTOR_PROP_LP);
+}
+
+/**
  * sde_connector_init - create drm connector object for a given display
  * @dev: Pointer to drm device struct
  * @encoder: Pointer to associated encoder
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index ff802e6..935dc12 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -1472,7 +1472,7 @@
 			sde_crtc->event = NULL;
 			DRM_DEBUG_VBL("%s: send event: %pK\n",
 						sde_crtc->name, event);
-			SDE_EVT32(DRMID(crtc));
+			SDE_EVT32_VERBOSE(DRMID(crtc));
 			drm_crtc_send_vblank_event(crtc, event);
 		}
 	}
@@ -1899,7 +1899,7 @@
 		cstate->lm_bounds[i].h = adj_mode->vdisplay;
 		memcpy(&cstate->lm_roi[i], &cstate->lm_bounds[i],
 				sizeof(cstate->lm_roi[i]));
-		SDE_EVT32(DRMID(crtc), i,
+		SDE_EVT32_VERBOSE(DRMID(crtc), i,
 				cstate->lm_bounds[i].x, cstate->lm_bounds[i].y,
 				cstate->lm_bounds[i].w, cstate->lm_bounds[i].h);
 		SDE_DEBUG("%s: lm%d bnd&roi (%d,%d,%d,%d)\n", sde_crtc->name, i,
@@ -2084,7 +2084,7 @@
 
 	/* destroy value helper */
 	msm_property_destroy_state(&sde_crtc->property_info, cstate,
-			cstate->property_values, cstate->property_blobs);
+			&cstate->property_state);
 }
 
 static int _sde_crtc_wait_for_frame_done(struct drm_crtc *crtc)
@@ -2103,7 +2103,7 @@
 		return 0;
 	}
 
-	SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_ENTRY);
+	SDE_EVT32_VERBOSE(DRMID(crtc), SDE_EVTLOG_FUNC_ENTRY);
 	ret = wait_for_completion_timeout(&sde_crtc->frame_done_comp,
 			msecs_to_jiffies(SDE_FRAME_DONE_TIMEOUT));
 	if (!ret) {
@@ -2112,7 +2112,7 @@
 		SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FATAL);
 		rc = -ETIMEDOUT;
 	}
-	SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT);
+	SDE_EVT32_VERBOSE(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT);
 
 	return rc;
 }
@@ -2344,7 +2344,7 @@
 	/* duplicate value helper */
 	msm_property_duplicate_state(&sde_crtc->property_info,
 			old_cstate, cstate,
-			cstate->property_values, cstate->property_blobs);
+			&cstate->property_state, cstate->property_values);
 
 	/* duplicate base helper */
 	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
@@ -2389,7 +2389,8 @@
 
 	/* reset value helper */
 	msm_property_reset_state(&sde_crtc->property_info, cstate,
-			cstate->property_values, cstate->property_blobs);
+			&cstate->property_state,
+			cstate->property_values);
 
 	_sde_crtc_set_input_fence_timeout(cstate);
 
@@ -2710,6 +2711,130 @@
 	return rc;
 }
 
+static int _sde_crtc_find_plane_fb_modes(struct drm_crtc_state *state,
+		uint32_t *fb_ns,
+		uint32_t *fb_sec,
+		uint32_t *fb_ns_dir,
+		uint32_t *fb_sec_dir)
+{
+	struct drm_plane *plane;
+	const struct drm_plane_state *pstate;
+	struct sde_plane_state *sde_pstate;
+	uint32_t mode = 0;
+	int rc;
+
+	if (!state) {
+		SDE_ERROR("invalid state\n");
+		return -EINVAL;
+	}
+
+	*fb_ns = 0;
+	*fb_sec = 0;
+	*fb_ns_dir = 0;
+	*fb_sec_dir = 0;
+	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+		if (IS_ERR_OR_NULL(pstate)) {
+			rc = PTR_ERR(pstate);
+			SDE_ERROR("crtc%d failed to get plane%d state%d\n",
+					state->crtc->base.id,
+					plane->base.id, rc);
+			return rc;
+		}
+		sde_pstate = to_sde_plane_state(pstate);
+		mode = sde_plane_get_property(sde_pstate,
+				PLANE_PROP_FB_TRANSLATION_MODE);
+		switch (mode) {
+		case SDE_DRM_FB_NON_SEC:
+			(*fb_ns)++;
+			break;
+		case SDE_DRM_FB_SEC:
+			(*fb_sec)++;
+			break;
+		case SDE_DRM_FB_NON_SEC_DIR_TRANS:
+			(*fb_ns_dir)++;
+			break;
+		case SDE_DRM_FB_SEC_DIR_TRANS:
+			(*fb_sec_dir)++;
+			break;
+		default:
+			SDE_ERROR("Error: Plane[%d], fb_trans_mode:%d",
+					plane->base.id,
+					mode);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int _sde_crtc_check_secure_state(struct drm_crtc *crtc,
+		struct drm_crtc_state *state)
+{
+	struct drm_encoder *encoder;
+	struct sde_crtc_state *cstate;
+	uint32_t secure;
+	uint32_t fb_ns = 0, fb_sec = 0, fb_ns_dir = 0, fb_sec_dir = 0;
+	int encoder_cnt = 0;
+	int rc;
+
+	if (!crtc || !state) {
+		SDE_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	cstate = to_sde_crtc_state(state);
+
+	secure = sde_crtc_get_property(cstate,
+			CRTC_PROP_SECURITY_LEVEL);
+
+	rc = _sde_crtc_find_plane_fb_modes(state,
+			&fb_ns,
+			&fb_sec,
+			&fb_ns_dir,
+			&fb_sec_dir);
+	if (rc)
+		return rc;
+
+	/**
+	 * validate planes
+	 * fb_ns_dir is for  secure display use case,
+	 * fb_sec_dir is for secure camera preview use case,
+	 * fb_sec is for secure video playback,
+	 * fb_ns is for normal non secure use cases.
+	 */
+	if (((secure == SDE_DRM_SEC_ONLY) &&
+				(fb_ns || fb_sec || fb_sec_dir)) ||
+			(fb_sec || fb_sec_dir)) {
+		SDE_ERROR(
+			"crtc%d: invalid planes fb_modes Sec:%d, NS:%d, Sec_Dir:%d, NS_Dir%d\n",
+				crtc->base.id,
+				fb_sec, fb_ns, fb_sec_dir,
+				fb_ns_dir);
+		return -EINVAL;
+	}
+
+	/**
+	 * secure_crtc is not allowed in a shared toppolgy
+	 * across different encoders.
+	 */
+	if (fb_ns_dir || fb_sec_dir) {
+		drm_for_each_encoder(encoder, crtc->dev)
+			if (encoder->crtc ==  crtc)
+				encoder_cnt++;
+
+		if (encoder_cnt >
+			MAX_ALLOWED_ENCODER_CNT_PER_SECURE_CRTC) {
+			SDE_ERROR(
+				"crtc%d, invalid virtual encoder crtc%d\n",
+				crtc->base.id,
+				encoder_cnt);
+			return -EINVAL;
+
+		}
+	}
+	SDE_DEBUG("crtc:%d Secure validation successful\n", crtc->base.id);
+	return 0;
+}
+
 static int sde_crtc_atomic_check(struct drm_crtc *crtc,
 		struct drm_crtc_state *state)
 {
@@ -2756,6 +2881,10 @@
 	_sde_crtc_setup_is_ppsplit(state);
 	_sde_crtc_setup_lm_bounds(crtc, state);
 
+	rc = _sde_crtc_check_secure_state(crtc, state);
+	if (rc)
+		return rc;
+
 	 /* get plane state for all drm planes associated with crtc state */
 	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
 		if (IS_ERR_OR_NULL(pstate)) {
@@ -3196,8 +3325,7 @@
 		sde_crtc = to_sde_crtc(crtc);
 		cstate = to_sde_crtc_state(state);
 		ret = msm_property_atomic_set(&sde_crtc->property_info,
-				cstate->property_values, cstate->property_blobs,
-				property, val);
+				&cstate->property_state, property, val);
 		if (!ret) {
 			idx = msm_property_index(&sde_crtc->property_info,
 					property);
@@ -3317,8 +3445,8 @@
 				SDE_ERROR("fence create failed\n");
 		} else {
 			ret = msm_property_atomic_get(&sde_crtc->property_info,
-					cstate->property_values,
-					cstate->property_blobs, property, val);
+					&cstate->property_state,
+					property, val);
 			if (ret)
 				ret = sde_cp_crtc_get_property(crtc,
 					property, val);
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 86b7855..439aeac 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -269,9 +269,9 @@
  * @lm_roi        : Current LM ROI, possibly sub-rectangle of mode.
  *                  Origin top left of CRTC.
  * @user_roi_list : List of user's requested ROIs as from set property
+ * @property_state: Local storage for msm_prop properties
  * @property_values: Current crtc property values
  * @input_fence_timeout_ns : Cached input fence timeout, in ns
- * @property_blobs: Reference pointers for blob properties
  * @num_dim_layers: Number of dim layers
  * @dim_layer: Dim layer configs
  * @new_perf: new performance state being requested
@@ -296,9 +296,9 @@
 	struct sde_rect lm_roi[CRTC_DUAL_MIXERS];
 	struct msm_roi_list user_roi_list;
 
-	uint64_t property_values[CRTC_PROP_COUNT];
+	struct msm_property_state property_state;
+	struct msm_property_value property_values[CRTC_PROP_COUNT];
 	uint64_t input_fence_timeout_ns;
-	struct drm_property_blob *property_blobs[CRTC_PROP_COUNT];
 	uint32_t num_dim_layers;
 	struct sde_hw_dim_layer dim_layer[SDE_MAX_DIM_LAYERS];
 
@@ -320,7 +320,7 @@
  * Returns: Integer value of requested property
  */
 #define sde_crtc_get_property(S, X) \
-	((S) && ((X) < CRTC_PROP_COUNT) ? ((S)->property_values[(X)]) : 0)
+	((S) && ((X) < CRTC_PROP_COUNT) ? ((S)->property_values[(X)].value) : 0)
 
 static inline int sde_crtc_mixer_width(struct sde_crtc *sde_crtc,
 	struct drm_display_mode *mode)
@@ -492,4 +492,21 @@
 void sde_crtc_get_crtc_roi(struct drm_crtc_state *state,
 		const struct sde_rect **crtc_roi);
 
+/** sde_crt_get_secure_level - retrieve the secure level from the give state
+ *	object, this is used to determine the secure state of the crtc
+ * @crtc : Pointer to drm crtc structure
+ * @usr: Pointer to drm crtc state
+ * return: secure_level
+ */
+static inline int sde_crtc_get_secure_level(struct drm_crtc *crtc,
+		struct drm_crtc_state *state)
+{
+	if (!crtc || !state)
+		return -EINVAL;
+
+	return sde_crtc_get_property(to_sde_crtc_state(state),
+			CRTC_PROP_SECURITY_LEVEL);
+}
+
+
 #endif /* _SDE_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 0e94085..d41ddec 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -72,6 +72,7 @@
 #define MISR_BUFF_SIZE			256
 
 #define IDLE_TIMEOUT	64
+#define IDLE_SHORT_TIMEOUT	1
 
 /**
  * enum sde_enc_rc_events - events for resource control state machine
@@ -334,9 +335,9 @@
 
 	SDE_DEBUG_PHYS(phys_enc, "pending_cnt %d\n",
 			atomic_read(wait_info->atomic_cnt));
-	SDE_EVT32(DRMID(phys_enc->parent), irq->hw_idx,
-			atomic_read(wait_info->atomic_cnt),
-			SDE_EVTLOG_FUNC_ENTRY);
+	SDE_EVT32_VERBOSE(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+		irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
+		atomic_read(wait_info->atomic_cnt), SDE_EVTLOG_FUNC_ENTRY);
 
 	ret = sde_encoder_helper_wait_event_timeout(
 			DRMID(phys_enc->parent),
@@ -349,9 +350,10 @@
 		if (irq_status) {
 			unsigned long flags;
 
-			SDE_EVT32(DRMID(phys_enc->parent),
-					irq->hw_idx,
-					atomic_read(wait_info->atomic_cnt));
+			SDE_EVT32(DRMID(phys_enc->parent), intr_idx,
+				irq->hw_idx, irq->irq_idx,
+				phys_enc->hw_pp->idx - PINGPONG_0,
+				atomic_read(wait_info->atomic_cnt));
 			SDE_DEBUG_PHYS(phys_enc,
 					"done but irq %d not triggered\n",
 					irq->irq_idx);
@@ -361,13 +363,22 @@
 			ret = 0;
 		} else {
 			ret = -ETIMEDOUT;
+			SDE_EVT32(DRMID(phys_enc->parent), intr_idx,
+				irq->hw_idx, irq->irq_idx,
+				phys_enc->hw_pp->idx - PINGPONG_0,
+				atomic_read(wait_info->atomic_cnt), irq_status,
+				SDE_EVTLOG_ERROR);
 		}
 	} else {
 		ret = 0;
+		SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+			irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
+			atomic_read(wait_info->atomic_cnt));
 	}
 
-	SDE_EVT32(DRMID(phys_enc->parent), irq->hw_idx, ret,
-			SDE_EVTLOG_FUNC_EXIT);
+	SDE_EVT32_VERBOSE(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+		irq->irq_idx, ret, phys_enc->hw_pp->idx - PINGPONG_0,
+		atomic_read(wait_info->atomic_cnt), SDE_EVTLOG_FUNC_EXIT);
 
 	return ret;
 }
@@ -1335,6 +1346,8 @@
 		u32 sw_event)
 {
 	bool schedule_off = false;
+	bool autorefresh_enabled = false;
+	unsigned int lp, idle_timeout;
 	struct sde_encoder_virt *sde_enc;
 	struct msm_drm_private *priv;
 	struct msm_drm_thread *disp_thread;
@@ -1364,7 +1377,7 @@
 
 	SDE_DEBUG_ENC(sde_enc, "sw_event:%d, idle_pc_supported:%d\n", sw_event,
 			sde_enc->idle_pc_supported);
-	SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->idle_pc_supported,
+	SDE_EVT32_VERBOSE(DRMID(drm_enc), sw_event, sde_enc->idle_pc_supported,
 			sde_enc->rc_state, SDE_EVTLOG_FUNC_ENTRY);
 
 	switch (sw_event) {
@@ -1417,13 +1430,33 @@
 			return 0;
 		}
 
-		/* schedule delayed off work */
-		kthread_queue_delayed_work(
+		/* schedule delayed off work if autorefresh is disabled */
+		if (sde_enc->cur_master &&
+			sde_enc->cur_master->ops.is_autorefresh_enabled)
+			autorefresh_enabled =
+				sde_enc->cur_master->ops.is_autorefresh_enabled(
+							sde_enc->cur_master);
+
+		/* set idle timeout based on master connector's lp value */
+		if (sde_enc->cur_master)
+			lp = sde_connector_get_lp(
+					sde_enc->cur_master->connector);
+		else
+			lp = SDE_MODE_DPMS_ON;
+
+		if (lp == SDE_MODE_DPMS_LP2)
+			idle_timeout = IDLE_SHORT_TIMEOUT;
+		else
+			idle_timeout = IDLE_TIMEOUT;
+
+		if (!autorefresh_enabled)
+			kthread_queue_delayed_work(
 				&disp_thread->worker,
 				&sde_enc->delayed_off_work,
-				msecs_to_jiffies(IDLE_TIMEOUT));
+				msecs_to_jiffies(idle_timeout));
 		SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
-				SDE_EVTLOG_FUNC_CASE2);
+				autorefresh_enabled,
+				idle_timeout, SDE_EVTLOG_FUNC_CASE2);
 		SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work scheduled\n",
 				sw_event);
 		break;
@@ -1549,11 +1582,12 @@
 		break;
 
 	default:
+		SDE_EVT32(DRMID(drm_enc), sw_event, SDE_EVTLOG_ERROR);
 		SDE_ERROR("unexpected sw_event: %d\n", sw_event);
 		break;
 	}
 
-	SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->idle_pc_supported,
+	SDE_EVT32_VERBOSE(DRMID(drm_enc), sw_event, sde_enc->idle_pc_supported,
 			sde_enc->rc_state, SDE_EVTLOG_FUNC_EXIT);
 	return 0;
 }
@@ -2096,7 +2130,7 @@
 				atomic_read(info->atomic_cnt) == 0, jiffies);
 		time = ktime_to_ms(ktime_get());
 
-		SDE_EVT32(drm_id, hw_id, rc, time, expected_time,
+		SDE_EVT32_VERBOSE(drm_id, hw_id, rc, time, expected_time,
 				atomic_read(info->atomic_cnt));
 	/* If we timed out, counter is valid and time is less, wait again */
 	} while (atomic_read(info->atomic_cnt) && (rc == 0) &&
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index c1a40f5..7170d55 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -125,6 +125,8 @@
  * @irq_control:		Handler to enable/disable all the encoder IRQs
  * @update_split_role:		Update the split role of the phys enc
  * @restore:			Restore all the encoder configs.
+ * @is_autorefresh_enabled:	provides the autorefresh current
+ *                              enable/disable state.
  */
 
 struct sde_encoder_phys_ops {
@@ -164,6 +166,7 @@
 	void (*update_split_role)(struct sde_encoder_phys *phys_enc,
 			enum sde_enc_split_role role);
 	void (*restore)(struct sde_encoder_phys *phys);
+	bool (*is_autorefresh_enabled)(struct sde_encoder_phys *phys);
 };
 
 /**
@@ -312,6 +315,7 @@
  * @pp_timeout_report_cnt: number of pingpong done irq timeout errors
  * @pending_rd_ptr_cnt: atomic counter to indicate if retire fence can be
  *                      signaled at the next rd_ptr_irq
+ * @rd_ptr_timestamp: last rd_ptr_irq timestamp
  * @autorefresh: autorefresh feature state
  */
 struct sde_encoder_phys_cmd {
@@ -321,6 +325,7 @@
 	int pp_timeout_report_cnt;
 	struct sde_encoder_phys_cmd_autorefresh autorefresh;
 	atomic_t pending_rd_ptr_cnt;
+	ktime_t rd_ptr_timestamp;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 2a46636..ad00a7f 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -47,6 +47,12 @@
 
 #define SDE_ENC_WR_PTR_START_TIMEOUT_US 20000
 
+/*
+ * Threshold for signalling retire fences in cases where
+ * CTL_START_IRQ is received just after RD_PTR_IRQ
+ */
+#define SDE_ENC_CTL_START_THRESHOLD_US 500
+
 static inline int _sde_encoder_phys_cmd_get_idle_timeout(
 		struct sde_encoder_phys_cmd *cmd_enc)
 {
@@ -212,7 +218,7 @@
 {
 	struct sde_encoder_phys *phys_enc = arg;
 	struct sde_encoder_phys_cmd *cmd_enc;
-	bool signal_fence = false;
+	u32 event = 0;
 
 	if (!phys_enc || !phys_enc->hw_pp)
 		return;
@@ -221,48 +227,29 @@
 	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
 
 	/**
-	 * signal only for master,
-	 * - when the ctl_start irq is done and incremented
-	 *   the pending_rd_ptr_cnt.
-	 * - when ctl_start irq status bit is set. This handles the case
-	 *   where ctl_start status bit is set in hardware, but the interrupt
-	 *   is delayed due to some reason.
+	 * signal only for master, when the ctl_start irq is
+	 * done and incremented the pending_rd_ptr_cnt.
 	 */
-	if (sde_encoder_phys_cmd_is_master(phys_enc) &&
-			atomic_read(&phys_enc->pending_retire_fence_cnt)) {
+	if (sde_encoder_phys_cmd_is_master(phys_enc)
+		    && atomic_add_unless(&cmd_enc->pending_rd_ptr_cnt, -1, 0)
+		    && atomic_add_unless(
+				&phys_enc->pending_retire_fence_cnt, -1, 0)) {
 
-		if (atomic_add_unless(
-				&cmd_enc->pending_rd_ptr_cnt, -1, 0)) {
-			signal_fence = true;
-		} else {
-			signal_fence =
-				sde_core_irq_read_nolock(phys_enc->sde_kms,
-				    phys_enc->irq[INTR_IDX_CTL_START].irq_idx,
-				    false);
-			if (signal_fence)
-				SDE_EVT32_IRQ(DRMID(phys_enc->parent),
-				    phys_enc->hw_pp->idx - PINGPONG_0,
-				    atomic_read(
-					&phys_enc->pending_retire_fence_cnt),
-				    SDE_EVTLOG_FUNC_CASE1);
-		}
-
-		if (signal_fence && phys_enc->parent_ops.handle_frame_done) {
-			atomic_add_unless(
-				&phys_enc->pending_retire_fence_cnt, -1, 0);
+		event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
+		if (phys_enc->parent_ops.handle_frame_done)
 			phys_enc->parent_ops.handle_frame_done(
-				phys_enc->parent, phys_enc,
-				SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
-		}
+				phys_enc->parent, phys_enc, event);
 	}
 
 	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
-			phys_enc->hw_pp->idx - PINGPONG_0, signal_fence, 0xfff);
+			phys_enc->hw_pp->idx - PINGPONG_0, event, 0xfff);
 
 	if (phys_enc->parent_ops.handle_vblank_virt)
 		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
 			phys_enc);
 
+	cmd_enc->rd_ptr_timestamp = ktime_get();
+
 	SDE_ATRACE_END("rd_ptr_irq");
 }
 
@@ -271,6 +258,8 @@
 	struct sde_encoder_phys *phys_enc = arg;
 	struct sde_encoder_phys_cmd *cmd_enc;
 	struct sde_hw_ctl *ctl;
+	u32 event = 0;
+	s64 time_diff_us;
 
 	if (!phys_enc || !phys_enc->hw_ctl)
 		return;
@@ -279,16 +268,41 @@
 	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
 
 	ctl = phys_enc->hw_ctl;
-	SDE_EVT32_IRQ(DRMID(phys_enc->parent), ctl->idx - CTL_0, 0xfff);
 	atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
 
-	/*
-	 * this is required for the fence signalling to be done in rd_ptr_irq
-	 * after ctrl_start_irq
-	 */
+	time_diff_us = ktime_us_delta(ktime_get(), cmd_enc->rd_ptr_timestamp);
+
+	/* handle retire fence based on only master */
 	if (sde_encoder_phys_cmd_is_master(phys_enc)
-			&& atomic_read(&phys_enc->pending_retire_fence_cnt))
-		atomic_inc(&cmd_enc->pending_rd_ptr_cnt);
+			&& atomic_read(&phys_enc->pending_retire_fence_cnt)) {
+		/**
+		 * Handle rare cases where the ctl_start_irq is received
+		 * after rd_ptr_irq. If it falls within a threshold, it is
+		 * guaranteed the frame would be picked up in the current TE.
+		 * Signal retire fence immediately in such case.
+		 */
+		if ((time_diff_us <= SDE_ENC_CTL_START_THRESHOLD_US)
+			    && atomic_add_unless(
+				&phys_enc->pending_retire_fence_cnt, -1, 0)) {
+
+			event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
+
+			if (phys_enc->parent_ops.handle_frame_done)
+				phys_enc->parent_ops.handle_frame_done(
+					phys_enc->parent, phys_enc, event);
+
+		/**
+		 * In ideal cases, ctl_start_irq is received before the
+		 * rd_ptr_irq, so set the atomic flag to indicate the event
+		 * and rd_ptr_irq will handle signalling the retire fence
+		 */
+		} else {
+			atomic_inc(&cmd_enc->pending_rd_ptr_cnt);
+		}
+	}
+
+	SDE_EVT32_IRQ(DRMID(phys_enc->parent), ctl->idx - CTL_0,
+				time_diff_us, event, 0xfff);
 
 	/* Signal any waiting ctl start interrupt */
 	wake_up_all(&phys_enc->pending_kickoff_wq);
@@ -467,7 +481,8 @@
 			phys_enc->hw_pp->idx - PINGPONG_0,
 			info.rd_ptr_line_count,
 			info.wr_ptr_line_count);
-	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
+	SDE_EVT32_VERBOSE(DRMID(phys_enc->parent),
+			phys_enc->hw_pp->idx - PINGPONG_0,
 			info.wr_ptr_line_count);
 
 	ret = hw_pp->ops.poll_timeout_wr_ptr(hw_pp, timeout_us);
@@ -707,9 +722,6 @@
 
 	tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
 
-	/* enable external TE after kickoff to avoid premature autorefresh */
-	tc_cfg.hw_vsync_mode = 0;
-
 	/*
 	 * By setting sync_cfg_height to near max register value, we essentially
 	 * disable sde hw generated TE signal, since hw TE will arrive first.
@@ -721,6 +733,7 @@
 	tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
 	tc_cfg.start_pos = mode->vdisplay;
 	tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
+	tc_cfg.hw_vsync_mode = true;
 
 	SDE_DEBUG_CMDENC(cmd_enc,
 		"tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
@@ -825,7 +838,7 @@
 	phys_enc->enable_state = SDE_ENC_ENABLED;
 }
 
-static bool _sde_encoder_phys_cmd_is_autorefresh_enabled(
+static bool sde_encoder_phys_cmd_is_autorefresh_enabled(
 		struct sde_encoder_phys *phys_enc)
 {
 	struct sde_hw_pingpong *hw_pp;
@@ -849,17 +862,6 @@
 	return cfg.enable;
 }
 
-static void _sde_encoder_phys_cmd_connect_te(
-		struct sde_encoder_phys *phys_enc, bool enable)
-{
-	if (!phys_enc || !phys_enc->hw_pp ||
-			!phys_enc->hw_pp->ops.connect_external_te)
-		return;
-
-	SDE_EVT32(DRMID(phys_enc->parent), enable);
-	phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
-}
-
 static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
@@ -1083,86 +1085,51 @@
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 		to_sde_encoder_phys_cmd(phys_enc);
+	unsigned long lock_flags;
 
 	if (!phys_enc)
 		return;
 
-	if (sde_encoder_phys_cmd_is_master(phys_enc)) {
-		unsigned long lock_flags;
+	if (!sde_encoder_phys_cmd_is_master(phys_enc))
+		return;
 
+	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
+			cmd_enc->autorefresh.cfg.enable);
 
-		SDE_EVT32(DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
-				cmd_enc->autorefresh.cfg.enable);
-
-		if (!_sde_encoder_phys_cmd_is_autorefresh_enabled(phys_enc))
-			return;
-
-		/**
-		 * Autorefresh must be disabled carefully:
-		 *  - Must disable while there is no ongoing transmission
-		 *  - Receiving a TE will trigger the next Autorefresh TX
-		 *  - Only safe to disable Autorefresh between PPDone and TE
-		 *  - However, that is a small time window
-		 *  - Disabling External TE gives large safe window, assuming
-		 *    internally generated TE is set to a large counter value
-		 *
-		 * If Autorefresh is active:
-		 * 1. Disable external TE
-		 *   - TE will run on an SDE counter set to large value (~200ms)
-		 *
-		 * 2. Check for ongoing TX
-		 *   - If ongoing TX, set pending_kickoff_cnt if not set already
-		 *   - We don't want to wait for a ppdone that will never
-		 *     arrive, so verify ongoing TX
-		 *
-		 * 3. Wait for TX to Complete
-		 *  - Wait for PPDone pending count to reach 0
-		 *
-		 * 4. Leave Autorefresh Disabled
-		 *   - Assume disable of Autorefresh since it is now safe
-		 *   - Can now safely Disable Encoder, do debug printing, etc.
-		 *     without worrying that Autorefresh will kickoff
-		 */
-
-		spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
-
-		/* disable external TE to prevent next autorefresh */
-		_sde_encoder_phys_cmd_connect_te(phys_enc, false);
-
-		/* verify that we disabled TE during outstanding TX */
-		if (_sde_encoder_phys_cmd_is_ongoing_pptx(phys_enc))
-			atomic_add_unless(&phys_enc->pending_kickoff_cnt, 1, 1);
-
-		spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
-
-		/* wait for ppdone if necessary due to catching ongoing TX */
-		if (_sde_encoder_phys_cmd_wait_for_idle(phys_enc))
-			SDE_ERROR_CMDENC(cmd_enc,
-					"pp:%d kickoff timed out\n",
-					phys_enc->hw_pp->idx - PINGPONG_0);
-
-		/*
-		 * not strictly necessary for kickoff, but simplifies disable
-		 * callflow since our disable is split across multiple phys_encs
-		 */
-		_sde_encoder_phys_cmd_config_autorefresh(phys_enc, 0);
-
-		SDE_DEBUG_CMDENC(cmd_enc, "disabled autorefresh & ext TE\n");
-
-	}
-}
-
-static void sde_encoder_phys_cmd_handle_post_kickoff(
-		struct sde_encoder_phys *phys_enc)
-{
-	if (!phys_enc)
+	if (!sde_encoder_phys_cmd_is_autorefresh_enabled(phys_enc))
 		return;
 
 	/**
-	 * re-enable external TE, either for the first time after enabling
-	 * or if disabled for Autorefresh
+	 * Autorefresh must be disabled carefully:
+	 *  - Autorefresh must be disabled between pp_done and te
+	 *    signal prior to sdm845 targets. All targets after sdm845
+	 *    supports autorefresh disable without turning off the
+	 *    hardware TE and pp_done wait.
+	 *
+	 *  - Wait for TX to Complete
+	 *    Wait for PPDone confirms the last frame transfer is complete.
+	 *
+	 *  - Leave Autorefresh Disabled
+	 *    - Assume disable of Autorefresh since it is now safe
+	 *    - Can now safely Disable Encoder, do debug printing, etc.
+	 *     without worrying that Autorefresh will kickoff
 	 */
-	_sde_encoder_phys_cmd_connect_te(phys_enc, true);
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+
+	_sde_encoder_phys_cmd_config_autorefresh(phys_enc, 0);
+
+	/* check for outstanding TX */
+	if (_sde_encoder_phys_cmd_is_ongoing_pptx(phys_enc))
+		atomic_add_unless(&phys_enc->pending_kickoff_cnt, 1, 1);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	/* wait for ppdone if necessary due to catching ongoing TX */
+	if (_sde_encoder_phys_cmd_wait_for_idle(phys_enc))
+		SDE_ERROR_CMDENC(cmd_enc, "pp:%d kickoff timed out\n",
+				phys_enc->hw_pp->idx - PINGPONG_0);
+
+	SDE_DEBUG_CMDENC(cmd_enc, "disabled autorefresh\n");
 }
 
 static void sde_encoder_phys_cmd_trigger_start(
@@ -1200,13 +1167,14 @@
 	ops->wait_for_commit_done = sde_encoder_phys_cmd_wait_for_commit_done;
 	ops->prepare_for_kickoff = sde_encoder_phys_cmd_prepare_for_kickoff;
 	ops->wait_for_tx_complete = sde_encoder_phys_cmd_wait_for_tx_complete;
-	ops->handle_post_kickoff = sde_encoder_phys_cmd_handle_post_kickoff;
 	ops->trigger_start = sde_encoder_phys_cmd_trigger_start;
 	ops->needs_single_flush = sde_encoder_phys_cmd_needs_single_flush;
 	ops->hw_reset = sde_encoder_helper_hw_reset;
 	ops->irq_control = sde_encoder_phys_cmd_irq_control;
 	ops->update_split_role = sde_encoder_phys_cmd_update_split_role;
 	ops->restore = sde_encoder_phys_cmd_enable_helper;
+	ops->is_autorefresh_enabled =
+			sde_encoder_phys_cmd_is_autorefresh_enabled;
 }
 
 struct sde_encoder_phys *sde_encoder_phys_cmd_init(
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index c95fb47..2b736e5 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -918,10 +918,12 @@
  * @pixel_format:	DRM pixel format
  * @width:		Desired fb width
  * @height:		Desired fb height
+ * @pitch:		Desired fb pitch
  */
 static int _sde_encoder_phys_wb_init_internal_fb(
 		struct sde_encoder_phys_wb *wb_enc,
-		uint32_t pixel_format, uint32_t width, uint32_t height)
+		uint32_t pixel_format, uint32_t width,
+		uint32_t height, uint32_t pitch)
 {
 	struct drm_device *dev;
 	struct drm_framebuffer *fb;
@@ -951,9 +953,11 @@
 	mode_cmd.pixel_format = pixel_format;
 	mode_cmd.width = width;
 	mode_cmd.height = height;
+	mode_cmd.pitches[0] = pitch;
 
 	size = sde_format_get_framebuffer_size(pixel_format,
-			mode_cmd.width, mode_cmd.height, 0, 0);
+			mode_cmd.width, mode_cmd.height,
+			mode_cmd.pitches, NULL, 0);
 	if (!size) {
 		SDE_DEBUG("not creating zero size buffer\n");
 		return -EINVAL;
@@ -1314,7 +1318,7 @@
 
 	/* create internal buffer for disable logic */
 	if (_sde_encoder_phys_wb_init_internal_fb(wb_enc,
-				DRM_FORMAT_RGB888, 2, 1)) {
+				DRM_FORMAT_RGB888, 2, 1, 6)) {
 		SDE_ERROR("failed to init internal fb\n");
 		goto fail_wb_init;
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index 04c9e79..3acf4c9 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -701,7 +701,8 @@
 		const struct sde_format *fmt,
 		const uint32_t width,
 		const uint32_t height,
-		struct sde_hw_fmt_layout *layout)
+		struct sde_hw_fmt_layout *layout,
+		const uint32_t *pitches)
 {
 	int i;
 
@@ -751,6 +752,17 @@
 		}
 	}
 
+	/*
+	 * linear format: allow user allocated pitches if they are greater than
+	 * the requirement.
+	 * ubwc format: pitch values are computed uniformly across
+	 * all the components based on ubwc specifications.
+	 */
+	for (i = 0; i < layout->num_planes && i < SDE_MAX_PLANES; ++i) {
+		if (pitches && layout->plane_pitch[i] < pitches[i])
+			layout->plane_pitch[i] = pitches[i];
+	}
+
 	for (i = 0; i < SDE_MAX_PLANES; i++)
 		layout->total_size += layout->plane_size[i];
 
@@ -761,7 +773,8 @@
 		const struct sde_format *fmt,
 		const uint32_t w,
 		const uint32_t h,
-		struct sde_hw_fmt_layout *layout)
+		struct sde_hw_fmt_layout *layout,
+		const uint32_t *pitches)
 {
 	if (!layout || !fmt) {
 		DRM_ERROR("invalid pointer\n");
@@ -776,7 +789,7 @@
 	if (SDE_FORMAT_IS_UBWC(fmt) || SDE_FORMAT_IS_TILE(fmt))
 		return _sde_format_get_plane_sizes_ubwc(fmt, w, h, layout);
 
-	return _sde_format_get_plane_sizes_linear(fmt, w, h, layout);
+	return _sde_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
 }
 
 int sde_format_get_block_size(const struct sde_format *fmt,
@@ -801,6 +814,7 @@
 		const uint32_t format,
 		const uint32_t width,
 		const uint32_t height,
+		const uint32_t *pitches,
 		const uint64_t *modifiers,
 		const uint32_t modifiers_len)
 {
@@ -811,7 +825,10 @@
 	if (!fmt)
 		return 0;
 
-	if (sde_format_get_plane_sizes(fmt, width, height, &layout))
+	if (!pitches)
+		return -EINVAL;
+
+	if (sde_format_get_plane_sizes(fmt, width, height, &layout, pitches))
 		layout.total_size = 0;
 
 	return layout.total_size;
@@ -917,7 +934,7 @@
 
 	/* Can now check the pitches given vs pitches expected */
 	for (i = 0; i < layout->num_planes; ++i) {
-		if (layout->plane_pitch[i] != fb->pitches[i]) {
+		if (layout->plane_pitch[i] > fb->pitches[i]) {
 			DRM_ERROR("plane %u expected pitch %u, fb %u\n",
 				i, layout->plane_pitch[i], fb->pitches[i]);
 			return -EINVAL;
@@ -959,7 +976,7 @@
 
 	/* Populate the plane sizes etc via get_format */
 	ret = sde_format_get_plane_sizes(layout->format, fb->width, fb->height,
-			layout);
+			layout, fb->pitches);
 	if (ret)
 		return ret;
 
@@ -1063,7 +1080,7 @@
 	num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format);
 
 	ret = sde_format_get_plane_sizes(fmt, cmd->width, cmd->height,
-			&layout);
+			&layout, cmd->pitches);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.h b/drivers/gpu/drm/msm/sde/sde_formats.h
index 2333a72..58065ab 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.h
+++ b/drivers/gpu/drm/msm/sde/sde_formats.h
@@ -64,6 +64,8 @@
  * @w:               width of the buffer
  * @h:               height of the buffer
  * @layout:          layout of the buffer
+ * @pitches:         array of size [SDE_MAX_PLANES] to populate
+ *		     pitch for each plane
  *
  * Return: size of the buffer
  */
@@ -71,7 +73,8 @@
 		const struct sde_format *fmt,
 		const uint32_t w,
 		const uint32_t h,
-		struct sde_hw_fmt_layout *layout);
+		struct sde_hw_fmt_layout *layout,
+		const uint32_t *pitches);
 
 /**
  * sde_format_get_block_size - get block size of given format when
@@ -137,6 +140,8 @@
  * @format:            DRM pixel format
  * @width:             pixel width
  * @height:            pixel height
+ * @pitches:           array of size [SDE_MAX_PLANES] to populate
+ *		       pitch for each plane
  * @modifiers:         array to populate with drm modifiers, can be NULL
  * @modifiers_len:     length of modifers array
  *
@@ -146,6 +151,7 @@
 		const uint32_t format,
 		const uint32_t width,
 		const uint32_t height,
+		const uint32_t *pitches,
 		const uint64_t *modifiers,
 		const uint32_t modifiers_len);
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
index e88f40f..e844bc0 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
@@ -231,29 +231,6 @@
 	return 0;
 }
 
-static int sde_hw_pp_connect_external_te(struct sde_hw_pingpong *pp,
-		bool enable_external_te)
-{
-	struct sde_hw_blk_reg_map *c = &pp->hw;
-	u32 cfg;
-	int orig;
-
-	if (!pp)
-		return -EINVAL;
-
-	c = &pp->hw;
-	cfg = SDE_REG_READ(c, PP_SYNC_CONFIG_VSYNC);
-	orig = (bool)(cfg & BIT(20));
-	if (enable_external_te)
-		cfg |= BIT(20);
-	else
-		cfg &= ~BIT(20);
-	SDE_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
-	SDE_EVT32(pp->idx - PINGPONG_0, cfg);
-
-	return orig;
-}
-
 static int sde_hw_pp_get_vsync_info(struct sde_hw_pingpong *pp,
 		struct sde_hw_pp_vsync_info *info)
 {
@@ -280,7 +257,6 @@
 
 	ops->setup_tearcheck = sde_hw_pp_setup_te_config;
 	ops->enable_tearcheck = sde_hw_pp_enable_te;
-	ops->connect_external_te = sde_hw_pp_connect_external_te;
 	ops->get_vsync_info = sde_hw_pp_get_vsync_info;
 	ops->setup_autorefresh = sde_hw_pp_setup_autorefresh_config;
 	ops->setup_dsc = sde_hw_pp_setup_dsc;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
index f0a2054..4f27ff5 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
@@ -80,13 +80,6 @@
 			bool enable);
 
 	/**
-	 * read, modify, write to either set or clear listening to external TE
-	 * @Return: 1 if TE was originally connected, 0 if not, or -ERROR
-	 */
-	int (*connect_external_te)(struct sde_hw_pingpong *pp,
-			bool enable_external_te);
-
-	/**
 	 * provides the programmed and current
 	 * line_count
 	 */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index 85af820..d8cd75a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -316,6 +316,7 @@
 	u32 chroma_samp, unpack, src_format;
 	u32 secure = 0, secure_bit_mask;
 	u32 opmode = 0;
+	u32 fast_clear = 0;
 	u32 op_mode_off, unpack_pat_off, format_off;
 	u32 idx;
 
@@ -385,10 +386,12 @@
 		SDE_REG_WRITE(c, SSPP_FETCH_CONFIG,
 			SDE_FETCH_CONFIG_RESET_VALUE |
 			ctx->mdp->highest_bank_bit << 18);
-		if (IS_UBWC_20_SUPPORTED(ctx->catalog->ubwc_version))
+		if (IS_UBWC_20_SUPPORTED(ctx->catalog->ubwc_version)) {
+			fast_clear = fmt->alpha_enable ? BIT(31) : 0;
 			SDE_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
-					BIT(31) | (ctx->mdp->ubwc_swizzle) |
+					fast_clear | (ctx->mdp->ubwc_swizzle) |
 					(ctx->mdp->highest_bank_bit << 4));
+		}
 	}
 
 	opmode |= MDSS_MDP_OP_PE_OVERRIDE;
@@ -768,6 +771,16 @@
 	SDE_REG_WRITE(&ctx->hw, QSEED3_OP_MODE + idx, op_mode);
 }
 
+static u32 _sde_hw_sspp_get_scaler3_ver(struct sde_hw_pipe *ctx)
+{
+	u32 idx;
+
+	if (!ctx || _sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED3, &idx))
+		return 0;
+
+	return SDE_REG_READ(&ctx->hw, QSEED3_HW_VERSION + idx);
+}
+
 /**
  * sde_hw_sspp_setup_rects()
  */
@@ -1167,8 +1180,10 @@
 	if (sde_hw_sspp_multirect_enabled(c->cap))
 		c->ops.setup_multirect = sde_hw_sspp_setup_multirect;
 
-	if (test_bit(SDE_SSPP_SCALER_QSEED3, &features))
+	if (test_bit(SDE_SSPP_SCALER_QSEED3, &features)) {
 		c->ops.setup_scaler = _sde_hw_sspp_setup_scaler3;
+		c->ops.get_scaler_ver = _sde_hw_sspp_get_scaler3_ver;
+	}
 
 	if (test_bit(SDE_SSPP_HSIC, &features)) {
 		/* TODO: add version based assignment here as inline or macro */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
index 8d14715..c19eb5c 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -575,6 +575,12 @@
 		void *scaler_cfg);
 
 	/**
+	 * get_scaler_ver - get scaler h/w version
+	 * @ctx: Pointer to pipe context
+	 */
+	u32 (*get_scaler_ver)(struct sde_hw_pipe *ctx);
+
+	/**
 	 * setup_sys_cache - setup system cache configuration
 	 * @ctx: Pointer to pipe context
 	 * @cfg: Pointer to system cache configuration
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 42af245..8747288 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -404,7 +404,7 @@
 
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
 
-	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
+	SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
 }
 
 static void sde_kms_wait_for_tx_complete(struct msm_kms *kms,
@@ -641,6 +641,7 @@
 		.soft_reset   = dsi_display_soft_reset,
 		.pre_kickoff  = dsi_conn_pre_kickoff,
 		.clk_ctrl = dsi_display_clk_ctrl,
+		.set_power = dsi_display_set_power,
 		.get_topology = dsi_conn_get_topology,
 		.get_dst_format = dsi_display_get_dst_format
 	};
@@ -1135,7 +1136,7 @@
 	}
 
 	ret = sde_format_get_plane_sizes(fbo->fmt, fbo->width, fbo->height,
-			&fbo->layout);
+			&fbo->layout, fbo->layout.plane_pitch);
 	if (ret) {
 		SDE_ERROR("failed to get plane sizes\n");
 		goto done;
@@ -1341,6 +1342,70 @@
 	sde_reg_dma_deinit();
 }
 
+int sde_kms_mmu_detach(struct sde_kms *sde_kms, bool secure_only)
+{
+	int i;
+
+	if (!sde_kms)
+		return -EINVAL;
+
+	for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
+		struct msm_mmu *mmu;
+		struct msm_gem_address_space *aspace = sde_kms->aspace[i];
+
+		if (!aspace)
+			continue;
+
+		mmu = sde_kms->aspace[i]->mmu;
+
+		if (secure_only &&
+			!aspace->mmu->funcs->is_domain_secure(mmu))
+			continue;
+
+		/* cleanup aspace before detaching */
+		msm_gem_aspace_domain_attach_detach_update(aspace, true);
+
+		SDE_DEBUG("Detaching domain:%d\n", i);
+		aspace->mmu->funcs->detach(mmu, (const char **)iommu_ports,
+			ARRAY_SIZE(iommu_ports));
+
+		aspace->domain_attached = false;
+	}
+
+	return 0;
+}
+
+int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only)
+{
+	int i;
+
+	if (!sde_kms)
+		return -EINVAL;
+
+	for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
+		struct msm_mmu *mmu;
+		struct msm_gem_address_space *aspace = sde_kms->aspace[i];
+
+		if (!aspace)
+			continue;
+
+		mmu = sde_kms->aspace[i]->mmu;
+
+		if (secure_only &&
+			!aspace->mmu->funcs->is_domain_secure(mmu))
+			continue;
+
+		SDE_DEBUG("Attaching domain:%d\n", i);
+		aspace->mmu->funcs->attach(mmu, (const char **)iommu_ports,
+			ARRAY_SIZE(iommu_ports));
+
+		msm_gem_aspace_domain_attach_detach_update(aspace, false);
+		aspace->domain_attached = true;
+	}
+
+	return 0;
+}
+
 static void sde_kms_destroy(struct msm_kms *kms)
 {
 	struct sde_kms *sde_kms;
@@ -1373,6 +1438,103 @@
 		sde_crtc_cancel_pending_flip(priv->crtcs[i], file);
 }
 
+static int sde_kms_check_secure_transition(struct msm_kms *kms,
+		struct drm_atomic_state *state)
+{
+	struct sde_kms *sde_kms;
+	struct drm_device *dev;
+	struct drm_crtc *crtc;
+	struct drm_crtc *sec_crtc = NULL, *temp_crtc = NULL;
+	struct drm_crtc_state *crtc_state;
+	int secure_crtc_cnt = 0, active_crtc_cnt = 0;
+	int secure_global_crtc_cnt = 0, active_mode_crtc_cnt = 0;
+	int i;
+
+	if (!kms || !state) {
+		return -EINVAL;
+		SDE_ERROR("invalid arguments\n");
+	}
+
+	/* iterate state object for active and secure crtc */
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		if (!crtc_state->active)
+			continue;
+		active_crtc_cnt++;
+		if (sde_crtc_get_secure_level(crtc, crtc_state) ==
+				SDE_DRM_SEC_ONLY) {
+			sec_crtc = crtc;
+			secure_crtc_cnt++;
+		}
+	}
+
+	/* bail out from further validation if no secure ctrc */
+	if (!secure_crtc_cnt)
+		return 0;
+
+	if ((secure_crtc_cnt > MAX_ALLOWED_SECURE_CLIENT_CNT) ||
+		(secure_crtc_cnt &&
+		 (active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE))) {
+		SDE_ERROR("Secure check failed active:%d, secure:%d\n",
+				active_crtc_cnt, secure_crtc_cnt);
+		return -EPERM;
+	}
+
+	sde_kms = to_sde_kms(kms);
+	dev = sde_kms->dev;
+	/* iterate global list for active and secure crtc */
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+
+		if (!crtc->state->active)
+			continue;
+
+		active_mode_crtc_cnt++;
+
+		if (sde_crtc_get_secure_level(crtc, crtc->state) ==
+				SDE_DRM_SEC_ONLY) {
+			secure_global_crtc_cnt++;
+			temp_crtc = crtc;
+		}
+	}
+
+	/**
+	 * if more than one crtc is active fail
+	 * check if the previous and current commit secure
+	 * are same
+	 */
+	if (secure_crtc_cnt && ((active_mode_crtc_cnt > 1) ||
+			(secure_global_crtc_cnt && (temp_crtc != sec_crtc))))
+		SDE_ERROR("Secure check failed active:%d crtc_id:%d\n",
+				active_mode_crtc_cnt, temp_crtc->base.id);
+
+	return 0;
+}
+
+static int sde_kms_atomic_check(struct msm_kms *kms,
+		struct drm_atomic_state *state)
+{
+	struct sde_kms *sde_kms;
+	struct drm_device *dev;
+	int ret;
+
+	if (!kms || !state)
+		return -EINVAL;
+
+	sde_kms = to_sde_kms(kms);
+	dev = sde_kms->dev;
+
+	ret = drm_atomic_helper_check(dev, state);
+	if (ret)
+		return ret;
+	/*
+	 * Check if any secure transition(moving CRTC between secure and
+	 * non-secure state and vice-versa) is allowed or not. when moving
+	 * to secure state, planes with fb_mode set to dir_translated only can
+	 * be staged on the CRTC, and only one CRTC can be active during
+	 * Secure state
+	 */
+	return sde_kms_check_secure_transition(kms, state);
+}
+
 static struct msm_gem_address_space*
 _sde_kms_get_address_space(struct msm_kms *kms,
 		unsigned int domain)
@@ -1393,7 +1555,9 @@
 	if (domain >= MSM_SMMU_DOMAIN_MAX)
 		return NULL;
 
-	return sde_kms->aspace[domain];
+	return (sde_kms->aspace[domain] &&
+			sde_kms->aspace[domain]->domain_attached) ?
+		sde_kms->aspace[domain] : NULL;
 }
 
 static const struct msm_kms_funcs kms_funcs = {
@@ -1413,6 +1577,7 @@
 	.enable_vblank   = sde_kms_enable_vblank,
 	.disable_vblank  = sde_kms_disable_vblank,
 	.check_modified_format = sde_format_check_modified_format,
+	.atomic_check = sde_kms_atomic_check,
 	.get_format      = sde_get_msm_format,
 	.round_pixclk    = sde_kms_round_pixclk,
 	.destroy         = sde_kms_destroy,
@@ -1463,7 +1628,7 @@
 			continue;
 		}
 
-		aspace = msm_gem_smmu_address_space_create(sde_kms->dev->dev,
+		aspace = msm_gem_smmu_address_space_create(sde_kms->dev,
 			mmu, "sde");
 		if (IS_ERR(aspace)) {
 			ret = PTR_ERR(aspace);
@@ -1480,7 +1645,7 @@
 			msm_gem_address_space_destroy(aspace);
 			goto fail;
 		}
-
+		aspace->domain_attached = true;
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index d818fdf..4c0699e 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -93,6 +93,15 @@
 /* timeout in frames waiting for frame done */
 #define SDE_FRAME_DONE_TIMEOUT	60
 
+/* max active secure client counts allowed */
+#define MAX_ALLOWED_SECURE_CLIENT_CNT	1
+
+/* max active crtc when secure client is active */
+#define MAX_ALLOWED_CRTC_CNT_DURING_SECURE	1
+
+/* max virtual encoders per secure crtc */
+#define MAX_ALLOWED_ENCODER_CNT_PER_SECURE_CRTC	1
+
 /*
  * struct sde_irq_callback - IRQ callback handlers
  * @list: list to callback
@@ -500,4 +509,13 @@
  */
 void sde_kms_fbo_unreference(struct sde_kms_fbo *fbo);
 
+/**
+ * smmu attach/detach functions
+ * @sde_kms: poiner to sde_kms structure
+ * @secure_only: if true only secure contexts are attached/detached, else
+ * all contexts are attached/detached/
+ */
+int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only);
+int sde_kms_mmu_detach(struct sde_kms *sde_kms, bool secure_only);
+
 #endif /* __sde_kms_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 665315d..1affa9c 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -258,16 +258,16 @@
 				((src_width + 32) * fmt->bpp);
 		} else {
 			/* non NV12 */
-			total_fl = (fixed_buff_size / 2 - hflip_bytes) /
-				((src_width + 32) * fmt->bpp * 2);
+			total_fl = (fixed_buff_size / 2 - hflip_bytes) * 2 /
+				((src_width + 32) * fmt->bpp);
 		}
 	} else {
 		if (pstate->multirect_mode == SDE_SSPP_MULTIRECT_PARALLEL) {
-			total_fl = (fixed_buff_size / 2 - hflip_bytes) /
-				((src_width + 32) * fmt->bpp * 2);
+			total_fl = (fixed_buff_size / 2 - hflip_bytes) * 2 /
+				((src_width + 32) * fmt->bpp);
 		} else {
-			total_fl = (fixed_buff_size - hflip_bytes) /
-				((src_width + 32) * fmt->bpp * 2);
+			total_fl = (fixed_buff_size - hflip_bytes) * 2 /
+				((src_width + 32) * fmt->bpp);
 		}
 	}
 
@@ -675,7 +675,7 @@
 
 	SDE_DEBUG("plane%d size:%llu time:%llu\n",
 			plane->base.id, cfg.size, cfg.time);
-	SDE_EVT32(DRMID(plane), cfg.size, cfg.time);
+	SDE_EVT32_VERBOSE(DRMID(plane), cfg.size, cfg.time);
 	psde->pipe_hw->ops.setup_ts_prefill(psde->pipe_hw, &cfg,
 			pstate->multirect_index);
 }
@@ -925,7 +925,7 @@
 	else if (ret)
 		SDE_ERROR_PLANE(psde, "failed to get format layout, %d\n", ret);
 	else if (psde->pipe_hw->ops.setup_sourceaddress) {
-		SDE_EVT32(psde->pipe_hw->idx,
+		SDE_EVT32_VERBOSE(psde->pipe_hw->idx,
 				pipe_cfg->layout.width,
 				pipe_cfg->layout.height,
 				pipe_cfg->layout.plane_addr[0],
@@ -957,15 +957,15 @@
 
 	cfg->dir_lut = msm_property_get_blob(
 			&psde->property_info,
-			pstate->property_blobs, &cfg->dir_len,
+			&pstate->property_state, &cfg->dir_len,
 			PLANE_PROP_SCALER_LUT_ED);
 	cfg->cir_lut = msm_property_get_blob(
 			&psde->property_info,
-			pstate->property_blobs, &cfg->cir_len,
+			&pstate->property_state, &cfg->cir_len,
 			PLANE_PROP_SCALER_LUT_CIR);
 	cfg->sep_lut = msm_property_get_blob(
 			&psde->property_info,
-			pstate->property_blobs, &cfg->sep_len,
+			&pstate->property_state, &cfg->sep_len,
 			PLANE_PROP_SCALER_LUT_SEP);
 	if (!cfg->dir_lut || !cfg->cir_lut || !cfg->sep_lut)
 		ret = -ENODATA;
@@ -1276,7 +1276,7 @@
 	if (psde->pipe_hw->ops.setup_pa_memcolor) {
 		/* Skin memory color setup */
 		memcol = msm_property_get_blob(&psde->property_info,
-					pstate->property_blobs,
+					&pstate->property_state,
 					&memcol_sz,
 					PLANE_PROP_SKIN_COLOR);
 		psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
@@ -1284,7 +1284,7 @@
 
 		/* Sky memory color setup */
 		memcol = msm_property_get_blob(&psde->property_info,
-					pstate->property_blobs,
+					&pstate->property_state,
 					&memcol_sz,
 					PLANE_PROP_SKY_COLOR);
 		psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
@@ -1292,7 +1292,7 @@
 
 		/* Foliage memory color setup */
 		memcol = msm_property_get_blob(&psde->property_info,
-					pstate->property_blobs,
+					&pstate->property_state,
 					&memcol_sz,
 					PLANE_PROP_FOLIAGE_COLOR);
 		psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
@@ -3250,7 +3250,8 @@
 	}
 
 	/* determine what needs to be refreshed */
-	while ((idx = msm_property_pop_dirty(&psde->property_info)) >= 0) {
+	while ((idx = msm_property_pop_dirty(&psde->property_info,
+					&pstate->property_state)) >= 0) {
 		switch (idx) {
 		case PLANE_PROP_SCALER_V1:
 		case PLANE_PROP_SCALER_V2:
@@ -3805,6 +3806,10 @@
 		sde_kms_info_stop(info);
 	}
 
+	if (psde->pipe_hw && psde->pipe_hw->ops.get_scaler_ver)
+		sde_kms_info_add_keyint(info, "scaler_step_ver",
+			psde->pipe_hw->ops.get_scaler_ver(psde->pipe_hw));
+
 	sde_kms_info_add_keyint(info, "max_linewidth",
 			psde->pipe_sblk->maxlinewidth);
 	sde_kms_info_add_keyint(info, "max_upscale",
@@ -3908,7 +3913,8 @@
 	}
 
 	/* force property to be dirty, even if the pointer didn't change */
-	msm_property_set_dirty(&psde->property_info, PLANE_PROP_SCALER_V1);
+	msm_property_set_dirty(&psde->property_info,
+			&pstate->property_state, PLANE_PROP_SCALER_V1);
 
 	/* populate from user space */
 	pe = &pstate->pixel_ext;
@@ -3974,7 +3980,8 @@
 	}
 
 	/* force property to be dirty, even if the pointer didn't change */
-	msm_property_set_dirty(&psde->property_info, PLANE_PROP_SCALER_V2);
+	msm_property_set_dirty(&psde->property_info,
+			&pstate->property_state, PLANE_PROP_SCALER_V2);
 
 	/* populate from user space */
 	pe = &pstate->pixel_ext;
@@ -4091,8 +4098,7 @@
 	} else {
 		pstate = to_sde_plane_state(state);
 		ret = msm_property_atomic_set(&psde->property_info,
-				pstate->property_values, pstate->property_blobs,
-				property, val);
+				&pstate->property_state, property, val);
 		if (!ret) {
 			idx = msm_property_index(&psde->property_info,
 					property);
@@ -4109,7 +4115,7 @@
 				break;
 			case PLANE_PROP_SCALER_V2:
 				_sde_plane_set_scaler_v2(psde, pstate,
-					(void *)val);
+						(void *)val);
 				break;
 			case PLANE_PROP_EXCL_RECT_V1:
 				_sde_plane_set_excl_rect_v1(psde, pstate,
@@ -4154,8 +4160,7 @@
 		pstate = to_sde_plane_state(state);
 		sde_plane_rot_install_caps(plane);
 		ret = msm_property_atomic_get(&psde->property_info,
-				pstate->property_values, pstate->property_blobs,
-				property, val);
+				&pstate->property_state, property, val);
 	}
 
 	return ret;
@@ -4216,7 +4221,7 @@
 
 	/* destroy value helper */
 	msm_property_destroy_state(&psde->property_info, pstate,
-			pstate->property_values, pstate->property_blobs);
+			&pstate->property_state);
 }
 
 static struct drm_plane_state *
@@ -4247,13 +4252,14 @@
 
 	/* duplicate value helper */
 	msm_property_duplicate_state(&psde->property_info, old_state, pstate,
-			pstate->property_values, pstate->property_blobs);
+			&pstate->property_state, pstate->property_values);
 
 	/* clear out any input fence */
 	pstate->input_fence = 0;
 	input_fence_default = msm_property_get_default(
 			&psde->property_info, PLANE_PROP_INPUT_FENCE);
-	msm_property_set_property(&psde->property_info, pstate->property_values,
+	msm_property_set_property(&psde->property_info,
+			&pstate->property_state,
 			PLANE_PROP_INPUT_FENCE, input_fence_default);
 
 	pstate->dirty = 0x0;
@@ -4293,7 +4299,8 @@
 
 	/* reset value helper */
 	msm_property_reset_state(&psde->property_info, pstate,
-			pstate->property_values, pstate->property_blobs);
+			&pstate->property_state,
+			pstate->property_values);
 
 	pstate->base.plane = plane;
 
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index 2f0068a..a5599a5 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -119,8 +119,8 @@
 /**
  * struct sde_plane_state: Define sde extension of drm plane state object
  * @base:	base drm plane state object
+ * @property_state: Local storage for msm_prop properties
  * @property_values:	cached plane property values
- * @property_blobs:	blob properties
  * @aspace:	pointer to address space for input/output buffers
  * @input_fence:	dereferenced input fence pointer
  * @stage:	assigned by crtc blender
@@ -136,8 +136,8 @@
  */
 struct sde_plane_state {
 	struct drm_plane_state base;
-	uint64_t property_values[PLANE_PROP_COUNT];
-	struct drm_property_blob *property_blobs[PLANE_PROP_BLOBCOUNT];
+	struct msm_property_state property_state;
+	struct msm_property_value property_values[PLANE_PROP_COUNT];
 	struct msm_gem_address_space *aspace;
 	void *input_fence;
 	enum sde_stage stage;
@@ -178,8 +178,8 @@
  * @X: Property index, from enum msm_mdp_plane_property
  * Returns: Integer value of requested property
  */
-#define sde_plane_get_property(S, X) \
-	((S) && ((X) < PLANE_PROP_COUNT) ? ((S)->property_values[(X)]) : 0)
+#define sde_plane_get_property(S, X) ((S) && ((X) < PLANE_PROP_COUNT) ? \
+	((S)->property_values[(X)].value) : 0)
 
 /**
  * sde_plane_pipe - return sspp identifier for the given plane
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index 0382ed0..be3a8af 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -1150,7 +1150,7 @@
 
 		(void) msm_property_set_property(
 				sde_connector_get_propinfo(conn),
-				sde_connector_get_property_values(conn->state),
+				sde_connector_get_property_state(conn->state),
 				CONNECTOR_PROP_TOPOLOGY_NAME,
 				SDE_RM_TOPOLOGY_NONE);
 	}
@@ -1170,7 +1170,7 @@
 
 	ret = msm_property_set_property(
 			sde_connector_get_propinfo(conn_state->connector),
-			sde_connector_get_property_values(conn_state),
+			sde_connector_get_property_state(conn_state),
 			CONNECTOR_PROP_TOPOLOGY_NAME,
 			rsvp->topology);
 	if (ret) {
@@ -1267,7 +1267,7 @@
 		(void) msm_property_set_property(
 				sde_connector_get_propinfo(
 						conn_state->connector),
-				sde_connector_get_property_values(conn_state),
+				sde_connector_get_property_state(conn_state),
 				CONNECTOR_PROP_TOPOLOGY_NAME,
 				SDE_RM_TOPOLOGY_NONE);
 	}
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index 58448ca..58069f2 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -56,6 +56,8 @@
 #define MMSS_VBIF_TEST_BUS_OUT		0x230
 
 /* Vbif error info */
+#define MMSS_VBIF_PND_ERR		0x190
+#define MMSS_VBIF_SRC_ERR		0x194
 #define MMSS_VBIF_XIN_HALT_CTRL1	0x204
 #define MMSS_VBIF_ERR_INFO		0X1a0
 #define MMSS_VBIF_ERR_INFO_1		0x1a4
@@ -2373,7 +2375,7 @@
 	u32 **dump_mem = NULL;
 	u32 *dump_addr = NULL;
 	u32 value, d0, d1;
-	unsigned long reg;
+	unsigned long reg, reg1, reg2;
 	struct vbif_debug_bus_entry *head;
 	phys_addr_t phys = 0;
 	int i, list_size = 0;
@@ -2447,13 +2449,18 @@
 	wmb();
 
 	/**
-	 * Extract VBIF error info based on XIN halt status.
-	 * If the XIN client is not in HALT state, then retrieve the
-	 * VBIF error info for it.
+	 * Extract VBIF error info based on XIN halt and error status.
+	 * If the XIN client is not in HALT state, or an error is detected,
+	 * then retrieve the VBIF error info for it.
 	 */
 	reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
-	dev_err(sde_dbg_base.dev, "XIN HALT:0x%lX\n", reg);
+	reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
+	reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
+	dev_err(sde_dbg_base.dev,
+			"XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
+			reg, reg1, reg2);
 	reg >>= 16;
+	reg &= ~(reg1 | reg2);
 	for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
 		if (!test_bit(0, &reg)) {
 			writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c
index 130bd1f..3c03b92 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.c
+++ b/drivers/gpu/drm/msm/sde_edid_parser.c
@@ -228,10 +228,17 @@
 {
 	u8 cea_mode = 0;
 	struct drm_display_mode *mode;
+	u32 mode_fmt_flags = 0;
 
 	/* Need to add Y420 support flag to the modes */
 	list_for_each_entry(mode, &connector->probed_modes, head) {
+		/* Cache the format flags before clearing */
+		mode_fmt_flags = mode->flags;
+		/* Clear the RGB/YUV format flags before calling upstream API */
+		mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK;
 		cea_mode = drm_match_cea_mode(mode);
+		/* Restore the format flags */
+		mode->flags = mode_fmt_flags;
 		if ((cea_mode != 0) && (cea_mode == video_format)) {
 			SDE_EDID_DEBUG("%s found match for %d ", __func__,
 			video_format);
@@ -245,7 +252,7 @@
 const u8 *db)
 {
 	u32 offset = 0;
-	u8 len = 0;
+	u8 cmdb_len = 0;
 	u8 svd_len = 0;
 	const u8 *svd = NULL;
 	u32 i = 0, j = 0;
@@ -261,10 +268,8 @@
 		return;
 	}
 	SDE_EDID_DEBUG("%s +\n", __func__);
-	len = db[0] & 0x1f;
+	cmdb_len = db[0] & 0x1f;
 
-	if (len < 7)
-		return;
 	/* Byte 3 to L+1 contain SVDs */
 	offset += 2;
 
@@ -272,20 +277,24 @@
 
 	if (svd) {
 		/*moving to the next byte as vic info begins there*/
-		++svd;
 		svd_len = svd[0] & 0x1f;
+		++svd;
 	}
 
 	for (i = 0; i < svd_len; i++, j++) {
-		video_format = *svd & 0x7F;
-		if (db[offset] & (1 << j))
+		video_format = *(svd + i) & 0x7F;
+		if (cmdb_len == 1) {
+			/* If cmdb_len is 1, it means all SVDs support YUV */
+			sde_edid_set_y420_support(connector, video_format);
+		} else if (db[offset] & (1 << j)) {
 			sde_edid_set_y420_support(connector, video_format);
 
-		if (j & 0x80) {
-			j = j/8;
-			offset++;
-			if (offset >= len)
-				break;
+			if (j & 0x80) {
+				j = j/8;
+				offset++;
+				if (offset >= cmdb_len)
+					break;
+			}
 		}
 	}
 
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.h b/drivers/gpu/drm/msm/sde_edid_parser.h
index eb68439..b58b322 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.h
+++ b/drivers/gpu/drm/msm/sde_edid_parser.h
@@ -33,6 +33,8 @@
 #define SDE_CEA_EXT    0x02
 #define SDE_EXTENDED_TAG 0x07
 
+#define SDE_DRM_MODE_FLAG_FMT_MASK (0x3 << 20)
+
 enum extended_data_block_types {
 	VIDEO_CAPABILITY_DATA_BLOCK = 0x0,
 	VENDOR_SPECIFIC_VIDEO_DATA_BLOCK = 0x01,
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index 78c325d..9cbffa5 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -16,7 +16,7 @@
 
 #define MAX_CLIENT_NAME_LEN 128
 
-#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	1600000000
+#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	0
 #define SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA	0
 #define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	1600000000
 #define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index 54bdd42..4fc40d9 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -582,7 +582,7 @@
 		return -EINVAL;
 
 	mutex_lock(&rsc->client_lock);
-	SDE_EVT32(caller_client->id, caller_client->current_state,
+	SDE_EVT32_VERBOSE(caller_client->id, caller_client->current_state,
 			state, rsc->current_state, SDE_EVTLOG_FUNC_ENTRY);
 	caller_client->crtc_id = crtc_id;
 	caller_client->current_state = state;
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index 26a3154..aa8fa01 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -345,7 +345,7 @@
 		if (!test_bit(POWER_CTRL_BIT_12, &power_status)) {
 			reg = dss_reg_r(&rsc->drv_io,
 				SDE_RSCC_SEQ_PROGRAM_COUNTER, rsc->debug_mode);
-			SDE_EVT32(count, reg, power_status);
+			SDE_EVT32_VERBOSE(count, reg, power_status);
 			rc = 0;
 			break;
 		}
@@ -676,7 +676,7 @@
 		break;
 
 	case VSYNC_ENABLE:
-		reg = BIT(8) | ((mode & 0x7) < 10);
+		reg = BIT(8) | ((mode & 0x7) << 10);
 		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_DEBUG_BUS,
 					reg, rsc->debug_mode);
 		break;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 56bb758..7bb1e53 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -30,6 +30,7 @@
 #include "radeon_audio.h"
 #include "atom.h"
 #include <linux/backlight.h>
+#include <linux/dmi.h>
 
 extern int atom_debug;
 
@@ -2183,9 +2184,17 @@
 		goto assigned;
 	}
 
-	/* on DCE32 and encoder can driver any block so just crtc id */
+	/*
+	 * On DCE32 any encoder can drive any block so usually just use crtc id,
+	 * but Apple thinks different at least on iMac10,1, so there use linkb,
+	 * otherwise the internal eDP panel will stay dark.
+	 */
 	if (ASIC_IS_DCE32(rdev)) {
-		enc_idx = radeon_crtc->crtc_id;
+		if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1"))
+			enc_idx = (dig->linkb) ? 1 : 0;
+		else
+			enc_idx = radeon_crtc->crtc_id;
+
 		goto assigned;
 	}
 
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index ea36dc4..2481049 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -782,6 +782,12 @@
 	if (r600_dpm_get_vrefresh(rdev) > 120)
 		return true;
 
+	/* disable mclk switching if the refresh is >120Hz, even if the
+        * blanking period would allow it
+        */
+	if (r600_dpm_get_vrefresh(rdev) > 120)
+		return true;
+
 	if (vblank_time < switch_limit)
 		return true;
 	else
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index c18fc31..94983e8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1343,7 +1343,6 @@
 		       mem_type);
 		return ret;
 	}
-	fence_put(man->move);
 
 	man->use_type = false;
 	man->has_type = false;
@@ -1355,6 +1354,9 @@
 		ret = (*man->func->takedown)(man);
 	}
 
+	fence_put(man->move);
+	man->move = NULL;
+
 	return ret;
 }
 EXPORT_SYMBOL(ttm_bo_clean_mm);
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 6f465aa..6426363 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -2342,6 +2342,11 @@
 		return ret;
 	}
 
+	/* Clear the busy_data stats - we're starting over from scratch */
+	adreno_dev->busy_data.gpu_busy = 0;
+	adreno_dev->busy_data.vbif_ram_cycles = 0;
+	adreno_dev->busy_data.vbif_starved_ram = 0;
+
 	/* Set the page table back to the default page table */
 	adreno_ringbuffer_set_global(adreno_dev, 0);
 	kgsl_mmu_set_pt(&device->mmu, device->mmu.defaultpagetable);
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 422c434..0a45d27 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -983,6 +983,13 @@
 	spin_unlock(&dispatcher->plist_lock);
 }
 
+static inline void _decrement_submit_now(struct kgsl_device *device)
+{
+	spin_lock(&device->submit_lock);
+	device->submit_now--;
+	spin_unlock(&device->submit_lock);
+}
+
 /**
  * adreno_dispatcher_issuecmds() - Issue commmands from pending contexts
  * @adreno_dev: Pointer to the adreno device struct
@@ -992,15 +999,29 @@
 static void adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
 {
 	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+	spin_lock(&device->submit_lock);
+	/* If state transition to SLUMBER, schedule the work for later */
+	if (device->slumber == true) {
+		spin_unlock(&device->submit_lock);
+		goto done;
+	}
+	device->submit_now++;
+	spin_unlock(&device->submit_lock);
 
 	/* If the dispatcher is busy then schedule the work for later */
 	if (!mutex_trylock(&dispatcher->mutex)) {
-		adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
-		return;
+		_decrement_submit_now(device);
+		goto done;
 	}
 
 	_adreno_dispatcher_issuecmds(adreno_dev);
 	mutex_unlock(&dispatcher->mutex);
+	_decrement_submit_now(device);
+	return;
+done:
+	adreno_dispatcher_schedule(device);
 }
 
 /**
@@ -2452,7 +2473,7 @@
 	mutex_unlock(&device->mutex);
 }
 
-static void adreno_dispatcher_work(struct work_struct *work)
+static void adreno_dispatcher_work(struct kthread_work *work)
 {
 	struct adreno_dispatcher *dispatcher =
 		container_of(work, struct adreno_dispatcher, work);
@@ -2512,7 +2533,7 @@
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
 
-	kgsl_schedule_work(&dispatcher->work);
+	kthread_queue_work(&kgsl_driver.worker, &dispatcher->work);
 }
 
 /**
@@ -2808,7 +2829,7 @@
 	setup_timer(&dispatcher->fault_timer, adreno_dispatcher_fault_timer,
 		(unsigned long) adreno_dev);
 
-	INIT_WORK(&dispatcher->work, adreno_dispatcher_work);
+	kthread_init_work(&dispatcher->work, adreno_dispatcher_work);
 
 	init_completion(&dispatcher->idle_gate);
 	complete_all(&dispatcher->idle_gate);
diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h
index 72545db..48f0cdc 100644
--- a/drivers/gpu/msm/adreno_dispatch.h
+++ b/drivers/gpu/msm/adreno_dispatch.h
@@ -91,7 +91,7 @@
 	atomic_t fault;
 	struct plist_head pending;
 	spinlock_t plist_lock;
-	struct work_struct work;
+	struct kthread_work work;
 	struct kobject kobj;
 	struct completion idle_gate;
 	unsigned int disp_preempt_fair_sched;
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index c6df7bb..0882447 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -548,6 +548,8 @@
 
 	mutex_unlock(&device->mutex);
 
+	debugfs_remove_recursive(drawctxt->debug_root);
+
 	/* wake threads waiting to submit commands from this context */
 	wake_up_all(&drawctxt->waiting);
 	wake_up_all(&drawctxt->wq);
@@ -569,7 +571,6 @@
 		gpudev->preemption_context_destroy(context);
 
 	drawctxt = ADRENO_CONTEXT(context);
-	debugfs_remove_recursive(drawctxt->debug_root);
 	kfree(drawctxt);
 }
 
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 129e99c..f88132f 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -4645,6 +4645,7 @@
 		device->id, device->reg_phys, device->reg_len);
 
 	rwlock_init(&device->context_lock);
+	spin_lock_init(&device->submit_lock);
 
 	setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
 
@@ -4788,6 +4789,8 @@
 static int __init kgsl_core_init(void)
 {
 	int result = 0;
+	struct sched_param param = { .sched_priority = 2 };
+
 	/* alloc major and minor device numbers */
 	result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX,
 		"kgsl");
@@ -4854,6 +4857,18 @@
 	kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry",
 		WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
 
+	kthread_init_worker(&kgsl_driver.worker);
+
+	kgsl_driver.worker_thread = kthread_run(kthread_worker_fn,
+		&kgsl_driver.worker, "kgsl_worker_thread");
+
+	if (IS_ERR(kgsl_driver.worker_thread)) {
+		pr_err("unable to start kgsl thread\n");
+		goto err;
+	}
+
+	sched_setscheduler(kgsl_driver.worker_thread, SCHED_FIFO, &param);
+
 	kgsl_events_init();
 
 	result = kgsl_drawobjs_cache_init();
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index c54e51e..f80da79 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -25,6 +25,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/mm.h>
 #include <linux/uaccess.h>
+#include <linux/kthread.h>
 #include <asm/cacheflush.h>
 
 /*
@@ -151,6 +152,8 @@
 	unsigned int full_cache_threshold;
 	struct workqueue_struct *workqueue;
 	struct workqueue_struct *mem_workqueue;
+	struct kthread_worker worker;
+	struct task_struct *worker_thread;
 };
 
 extern struct kgsl_driver kgsl_driver;
@@ -300,7 +303,7 @@
 	void *priv;
 	struct list_head node;
 	unsigned int created;
-	struct work_struct work;
+	struct kthread_work work;
 	int result;
 	struct kgsl_event_group *group;
 };
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index b621ada..4aaea80 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -269,6 +269,11 @@
 	struct kgsl_pwrctrl pwrctrl;
 	int open_count;
 
+	/* For GPU inline submission */
+	uint32_t submit_now;
+	spinlock_t submit_lock;
+	bool slumber;
+
 	struct mutex mutex;
 	uint32_t state;
 	uint32_t requested_state;
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index d042f05..759a966 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -32,7 +32,7 @@
 {
 	list_del(&event->node);
 	event->result = result;
-	queue_work(device->events_wq, &event->work);
+	kthread_queue_work(&kgsl_driver.worker, &event->work);
 }
 
 /**
@@ -42,7 +42,7 @@
  * Each event callback has its own work struct and is run on a event specific
  * workqeuue.  This is the worker that queues up the event callback function.
  */
-static void _kgsl_event_worker(struct work_struct *work)
+static void _kgsl_event_worker(struct kthread_work *work)
 {
 	struct kgsl_event *event = container_of(work, struct kgsl_event, work);
 	int id = KGSL_CONTEXT_ID(event->context);
@@ -286,7 +286,7 @@
 	event->created = jiffies;
 	event->group = group;
 
-	INIT_WORK(&event->work, _kgsl_event_worker);
+	kthread_init_work(&event->work, _kgsl_event_worker);
 
 	trace_kgsl_register_event(KGSL_CONTEXT_ID(context), timestamp, func);
 
@@ -301,7 +301,7 @@
 
 	if (timestamp_cmp(retired, timestamp) >= 0) {
 		event->result = KGSL_EVENT_RETIRED;
-		queue_work(device->events_wq, &event->work);
+		kthread_queue_work(&kgsl_driver.worker, &event->work);
 		spin_unlock(&group->lock);
 		return 0;
 	}
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 5c53a05c..6710cd2 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1049,6 +1049,8 @@
 	if (on) {
 		switch (flag) {
 		case KGSL_PWRFLAGS_CLK_ON:
+			/* make sure pwrrail is ON before enabling clocks */
+			kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
 			kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON,
 				KGSL_STATE_ACTIVE);
 			break;
@@ -1854,7 +1856,12 @@
 
 	if (kgsl_gmu_isenabled(device))
 		return 0;
-	if (test_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->ctrl_flags))
+	/*
+	 * Disabling the regulator means also disabling dependent clocks.
+	 * Hence don't disable it if force clock ON is set.
+	 */
+	if (test_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->ctrl_flags) ||
+		test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->ctrl_flags))
 		return 0;
 
 	if (state == KGSL_PWRFLAGS_OFF) {
@@ -2362,9 +2369,24 @@
 		   || device->state ==  KGSL_STATE_NAP) {
 
 		if (!atomic_read(&device->active_cnt)) {
+			spin_lock(&device->submit_lock);
+			if (device->submit_now) {
+				spin_unlock(&device->submit_lock);
+				goto done;
+			}
+			/* Don't allow GPU inline submission in SLUMBER */
+			if (requested_state == KGSL_STATE_SLUMBER)
+				device->slumber = true;
+			spin_unlock(&device->submit_lock);
+
 			ret = kgsl_pwrctrl_change_state(device,
 					device->requested_state);
 			if (ret == -EBUSY) {
+				if (requested_state == KGSL_STATE_SLUMBER) {
+					spin_lock(&device->submit_lock);
+					device->slumber = false;
+					spin_unlock(&device->submit_lock);
+				}
 				/*
 				 * If the GPU is currently busy, restore
 				 * the requested state and reschedule
@@ -2375,7 +2397,7 @@
 				kgsl_schedule_work(&device->idle_check_ws);
 			}
 		}
-
+done:
 		if (!ret)
 			kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
 
@@ -2835,6 +2857,13 @@
 	trace_kgsl_pwr_set_state(device, state);
 	device->state = state;
 	device->requested_state = KGSL_STATE_NONE;
+
+	spin_lock(&device->submit_lock);
+	if (state == KGSL_STATE_SLUMBER || state == KGSL_STATE_SUSPEND)
+		device->slumber = true;
+	else
+		device->slumber = false;
+	spin_unlock(&device->submit_lock);
 }
 
 static void kgsl_pwrctrl_request_state(struct kgsl_device *device,
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index f0f202b..5061f6a 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -741,6 +741,8 @@
 	 */
 
 	memdesc->pages = kgsl_malloc(len_alloc * sizeof(struct page *));
+	memdesc->page_count = 0;
+	memdesc->size = 0;
 
 	if (memdesc->pages == NULL) {
 		ret = -ENOMEM;
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index c8f2702e..159512c 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -211,6 +211,7 @@
 	if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
 		spin_lock(&drvdata->spinlock);
 		stm_disable_hw(drvdata);
+		drvdata->enable = false;
 		spin_unlock(&drvdata->spinlock);
 
 		/* Wait until the engine has completely stopped */
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index d6941ea..85fe87f 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -171,8 +171,11 @@
 	if (!used)
 		kfree(buf);
 
-	if (!ret)
+	if (!ret) {
+		coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
+		coresight_cti_map_trigout(drvdata->cti_flush, 1, 0);
 		dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
+	}
 
 	return ret;
 }
@@ -244,6 +247,9 @@
 
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
+	coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
+	coresight_cti_unmap_trigout(drvdata->cti_flush, 1, 0);
+
 	dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
 }
 
@@ -521,11 +527,13 @@
 		goto out;
 	}
 
-	/* There is no point in reading a TMC in HW FIFO mode */
-	mode = readl_relaxed(drvdata->base + TMC_MODE);
-	if (mode != TMC_MODE_CIRCULAR_BUFFER) {
-		ret = -EINVAL;
-		goto out;
+	if (drvdata->enable) {
+		/* There is no point in reading a TMC in HW FIFO mode */
+		mode = readl_relaxed(drvdata->base + TMC_MODE);
+		if (mode != TMC_MODE_CIRCULAR_BUFFER) {
+			ret = -EINVAL;
+			goto out;
+		}
 	}
 
 	val = local_read(&drvdata->mode);
@@ -565,11 +573,13 @@
 
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 
-	/* There is no point in reading a TMC in HW FIFO mode */
-	mode = readl_relaxed(drvdata->base + TMC_MODE);
-	if (mode != TMC_MODE_CIRCULAR_BUFFER) {
-		spin_unlock_irqrestore(&drvdata->spinlock, flags);
-		return -EINVAL;
+	if (drvdata->enable) {
+		/* There is no point in reading a TMC in HW FIFO mode */
+		mode = readl_relaxed(drvdata->base + TMC_MODE);
+		if (mode != TMC_MODE_CIRCULAR_BUFFER) {
+			spin_unlock_irqrestore(&drvdata->spinlock, flags);
+			return -EINVAL;
+		}
 	}
 
 	/* Re-enable the TMC if need be */
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 9e6f443..3234928 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -800,6 +800,8 @@
 			mutex_unlock(&drvdata->mem_lock);
 			return ret;
 		}
+		coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
+		coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
 	} else {
 		drvdata->usbch = usb_qdss_open("qdss", drvdata,
 					       usb_notifier);
@@ -891,6 +893,7 @@
 	unsigned long flags;
 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
+	mutex_lock(&drvdata->mem_lock);
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 	if (drvdata->reading) {
 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -911,6 +914,11 @@
 
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
+	if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
+		coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
+		coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
+	}
+	mutex_unlock(&drvdata->mem_lock);
 	dev_info(drvdata->dev, "TMC-ETR disabled\n");
 }
 
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 98fcd01..b97ebb8 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -361,6 +361,9 @@
 		drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
+		coresight_cti_map_trigout(drvdata->cti_flush, 3, 0);
+		coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
+
 		tmc_etr_bam_disable(drvdata);
 		usb_qdss_close(drvdata->usbch);
 	} else if (!strcmp(str, str_tmc_etr_out_mode[TMC_ETR_OUT_MODE_USB])) {
@@ -381,6 +384,9 @@
 		drvdata->out_mode = TMC_ETR_OUT_MODE_USB;
 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
+		coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
+		coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
+
 		drvdata->usbch = usb_qdss_open("qdss", drvdata,
 					       usb_notifier);
 		if (IS_ERR(drvdata->usbch)) {
@@ -503,6 +509,7 @@
 	struct resource *res = &adev->res;
 	struct coresight_desc desc = { 0 };
 	struct device_node *np = adev->dev.of_node;
+	struct coresight_cti_data *ctidata;
 
 	pdata = of_get_coresight_platform_data(dev, np);
 	if (IS_ERR(pdata)) {
@@ -554,6 +561,19 @@
 
 	pm_runtime_put(&adev->dev);
 
+	ctidata = of_get_coresight_cti_data(dev, adev->dev.of_node);
+	if (IS_ERR(ctidata)) {
+		dev_err(dev, "invalid cti data\n");
+	} else if (ctidata && ctidata->nr_ctis == 2) {
+		drvdata->cti_flush = coresight_cti_get(ctidata->names[0]);
+		if (IS_ERR(drvdata->cti_flush))
+			dev_err(dev, "failed to get flush cti\n");
+
+		drvdata->cti_reset = coresight_cti_get(ctidata->names[1]);
+		if (IS_ERR(drvdata->cti_reset))
+			dev_err(dev, "failed to get reset cti\n");
+	}
+
 	desc.pdata = pdata;
 	desc.dev = dev;
 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index a9de0e8..6643adc 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -25,6 +25,7 @@
 #include <linux/amba/bus.h>
 #include <linux/usb_bam.h>
 #include <linux/usb/usb_qdss.h>
+#include <linux/coresight-cti.h>
 
 #define TMC_RSZ			0x004
 #define TMC_STS			0x00c
@@ -184,7 +185,8 @@
 	struct tmc_etr_bam_data	*bamdata;
 	bool			enable_to_bam;
 	bool			sticky_enable;
-
+	struct coresight_cti	*cti_flush;
+	struct coresight_cti	*cti_reset;
 };
 
 /* Generic functions */
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 2492f90..81bbd78 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,7 +23,7 @@
 #include <linux/coresight.h>
 #include <linux/cpumask.h>
 #include <asm/smp_plat.h>
-
+#include <linux/coresight-cti.h>
 
 static int of_dev_node_match(struct device *dev, void *data)
 {
@@ -196,3 +196,45 @@
 	return pdata;
 }
 EXPORT_SYMBOL_GPL(of_get_coresight_platform_data);
+
+struct coresight_cti_data *of_get_coresight_cti_data(
+				struct device *dev, struct device_node *node)
+{
+	int i, ret;
+	uint32_t ctis_len;
+	struct device_node *child_node;
+	struct coresight_cti_data *ctidata;
+
+	ctidata = devm_kzalloc(dev, sizeof(*ctidata), GFP_KERNEL);
+	if (!ctidata)
+		return ERR_PTR(-ENOMEM);
+
+	if (of_get_property(node, "coresight-ctis", &ctis_len))
+		ctidata->nr_ctis = ctis_len/sizeof(uint32_t);
+	else
+		return ERR_PTR(-EINVAL);
+
+	if (ctidata->nr_ctis) {
+		ctidata->names = devm_kzalloc(dev, ctidata->nr_ctis *
+					      sizeof(*ctidata->names),
+					      GFP_KERNEL);
+		if (!ctidata->names)
+			return ERR_PTR(-ENOMEM);
+
+		for (i = 0; i < ctidata->nr_ctis; i++) {
+			child_node = of_parse_phandle(node, "coresight-ctis",
+						      i);
+			if (!child_node)
+				return ERR_PTR(-EINVAL);
+
+			ret = of_property_read_string(child_node,
+						      "coresight-name",
+						      &ctidata->names[i]);
+			of_node_put(child_node);
+			if (ret)
+				return ERR_PTR(ret);
+		}
+	}
+	return ctidata;
+}
+EXPORT_SYMBOL(of_get_coresight_cti_data);
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 8fd108d..63e82f8 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -518,6 +518,11 @@
 	struct dst_entry *dst;
 	int ret;
 
+	if (!addr->net) {
+		pr_warn_ratelimited("%s: missing namespace\n", __func__);
+		return -EINVAL;
+	}
+
 	if (src_in->sa_family == AF_INET) {
 		struct rtable *rt = NULL;
 		const struct sockaddr_in *dst_in4 =
@@ -555,7 +560,6 @@
 	}
 
 	addr->bound_dev_if = ndev->ifindex;
-	addr->net = dev_net(ndev);
 	dev_put(ndev);
 
 	return ret;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index f2d40c0..809a028 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -976,6 +976,8 @@
 		} else
 			ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
 						 qp_attr_mask);
+		qp_attr->port_num = id_priv->id.port_num;
+		*qp_attr_mask |= IB_QP_PORT;
 	} else
 		ret = -ENOSYS;
 
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 1fb31a4..0a260a0 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1823,7 +1823,7 @@
 	mr->ndescs = sg_nents;
 
 	for_each_sg(sgl, sg, sg_nents, i) {
-		if (unlikely(i > mr->max_descs))
+		if (unlikely(i >= mr->max_descs))
 			break;
 		klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
 		klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 140f3f3..e46e2b0 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -83,6 +83,7 @@
 static struct iscsi_transport iscsi_iser_transport;
 static struct scsi_transport_template *iscsi_iser_scsi_transport;
 static struct workqueue_struct *release_wq;
+static DEFINE_MUTEX(unbind_iser_conn_mutex);
 struct iser_global ig;
 
 int iser_debug_level = 0;
@@ -550,12 +551,14 @@
 	 */
 	if (iser_conn) {
 		mutex_lock(&iser_conn->state_mutex);
+		mutex_lock(&unbind_iser_conn_mutex);
 		iser_conn_terminate(iser_conn);
 		iscsi_conn_stop(cls_conn, flag);
 
 		/* unbind */
 		iser_conn->iscsi_conn = NULL;
 		conn->dd_data = NULL;
+		mutex_unlock(&unbind_iser_conn_mutex);
 
 		complete(&iser_conn->stop_completion);
 		mutex_unlock(&iser_conn->state_mutex);
@@ -973,13 +976,21 @@
 	struct iser_conn *iser_conn;
 	struct ib_device *ib_dev;
 
+	mutex_lock(&unbind_iser_conn_mutex);
+
 	session = starget_to_session(scsi_target(sdev))->dd_data;
 	iser_conn = session->leadconn->dd_data;
+	if (!iser_conn) {
+		mutex_unlock(&unbind_iser_conn_mutex);
+		return -ENOTCONN;
+	}
 	ib_dev = iser_conn->ib_conn.device->ib_device;
 
 	if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
 		blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K);
 
+	mutex_unlock(&unbind_iser_conn_mutex);
+
 	return 0;
 }
 
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 6dd43f6..39d2837 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -1447,7 +1447,7 @@
 isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
 {
 	struct isert_conn *isert_conn = wc->qp->qp_context;
-	struct ib_device *ib_dev = isert_conn->cm_id->device;
+	struct ib_device *ib_dev = isert_conn->device->ib_device;
 
 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
 		isert_print_wc(wc, "login recv");
diff --git a/drivers/input/misc/keychord.c b/drivers/input/misc/keychord.c
index a5ea27a..c5ab3dd 100644
--- a/drivers/input/misc/keychord.c
+++ b/drivers/input/misc/keychord.c
@@ -232,9 +232,11 @@
 {
 	struct keychord_device *kdev = file->private_data;
 	struct input_keychord *keychords = 0;
-	struct input_keychord *keychord, *next, *end;
+	struct input_keychord *keychord;
 	int ret, i, key;
 	unsigned long flags;
+	size_t resid = count;
+	size_t key_bytes;
 
 	if (count < sizeof(struct input_keychord))
 		return -EINVAL;
@@ -265,15 +267,29 @@
 	kdev->head = kdev->tail = 0;
 
 	keychord = keychords;
-	end = (struct input_keychord *)((char *)keychord + count);
 
-	while (keychord < end) {
-		next = NEXT_KEYCHORD(keychord);
-		if (keychord->count <= 0 || next > end) {
+	while (resid > 0) {
+		/* Is the entire keychord entry header present ? */
+		if (resid < sizeof(struct input_keychord)) {
+			pr_err("keychord: Insufficient bytes present for header %zu\n",
+			       resid);
+			goto err_unlock_return;
+		}
+		resid -= sizeof(struct input_keychord);
+		if (keychord->count <= 0) {
 			pr_err("keychord: invalid keycode count %d\n",
 				keychord->count);
 			goto err_unlock_return;
 		}
+		key_bytes = keychord->count * sizeof(keychord->keycodes[0]);
+		/* Do we have all the expected keycodes ? */
+		if (resid < key_bytes) {
+			pr_err("keychord: Insufficient bytes present for keycount %zu\n",
+			       resid);
+			goto err_unlock_return;
+		}
+		resid -= key_bytes;
+
 		if (keychord->version != KEYCHORD_VERSION) {
 			pr_err("keychord: unsupported version %d\n",
 				keychord->version);
@@ -292,7 +308,7 @@
 		}
 
 		kdev->keychord_count++;
-		keychord = next;
+		keychord = NEXT_KEYCHORD(keychord);
 	}
 
 	kdev->keychords = keychords;
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 89abfdb..c84c685 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -434,8 +434,10 @@
 {
 	struct i8042_port *port = serio->port_data;
 
+	spin_lock_irq(&i8042_lock);
 	port->exists = true;
-	mb();
+	spin_unlock_irq(&i8042_lock);
+
 	return 0;
 }
 
@@ -448,16 +450,20 @@
 {
 	struct i8042_port *port = serio->port_data;
 
+	spin_lock_irq(&i8042_lock);
 	port->exists = false;
+	port->serio = NULL;
+	spin_unlock_irq(&i8042_lock);
 
 	/*
+	 * We need to make sure that interrupt handler finishes using
+	 * our serio port before we return from this function.
 	 * We synchronize with both AUX and KBD IRQs because there is
 	 * a (very unlikely) chance that AUX IRQ is raised for KBD port
 	 * and vice versa.
 	 */
 	synchronize_irq(I8042_AUX_IRQ);
 	synchronize_irq(I8042_KBD_IRQ);
-	port->serio = NULL;
 }
 
 /*
@@ -574,7 +580,7 @@
 
 	spin_unlock_irqrestore(&i8042_lock, flags);
 
-	if (likely(port->exists && !filtered))
+	if (likely(serio && !filtered))
 		serio_interrupt(serio, data, dfl);
 
  out:
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index daccf64..779001e 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -647,6 +647,9 @@
 	int enabled;
 	u64 val;
 
+	if (cpu >= nr_cpu_ids)
+		return -EINVAL;
+
 	if (gic_irq_in_rdist(d))
 		return -EINVAL;
 
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 2678a00..5bd52e4 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -158,6 +158,11 @@
 #define	FLASH_LED_DISABLE			0x00
 #define	FLASH_LED_SAFETY_TMR_DISABLED		0x13
 #define	FLASH_LED_MAX_TOTAL_CURRENT_MA		3750
+#define	FLASH_LED_IRES5P0_MAX_CURR_MA		640
+#define	FLASH_LED_IRES7P5_MAX_CURR_MA		960
+#define	FLASH_LED_IRES10P0_MAX_CURR_MA		1280
+#define	FLASH_LED_IRES12P5_MAX_CURR_MA		1600
+#define	MAX_IRES_LEVELS				4
 
 /* notifier call chain for flash-led irqs */
 static ATOMIC_NOTIFIER_HEAD(irq_notifier_list);
@@ -196,13 +201,15 @@
 	struct pinctrl_state		*hw_strobe_state_suspend;
 	int				hw_strobe_gpio;
 	int				ires_ua;
+	int				default_ires_ua;
 	int				max_current;
 	int				current_ma;
 	int				prev_current_ma;
 	u8				duration;
 	u8				id;
 	u8				type;
-	u8				ires;
+	u8				ires_idx;
+	u8				default_ires_idx;
 	u8				hdrm_val;
 	u8				current_reg_val;
 	u8				strobe_ctrl;
@@ -305,6 +312,11 @@
 	125, 119, 113, 107, 149, 143, 137, 131,
 };
 
+static int max_ires_curr_ma_table[MAX_IRES_LEVELS] = {
+	FLASH_LED_IRES12P5_MAX_CURR_MA, FLASH_LED_IRES10P0_MAX_CURR_MA,
+	FLASH_LED_IRES7P5_MAX_CURR_MA, FLASH_LED_IRES5P0_MAX_CURR_MA
+};
+
 static int qpnp_flash_led_read(struct qpnp_flash_led *led, u16 addr, u8 *data)
 {
 	int rc;
@@ -935,6 +947,7 @@
 
 static void qpnp_flash_led_node_set(struct flash_node_data *fnode, int value)
 {
+	int i = 0;
 	int prgm_current_ma = value;
 	int min_ma = fnode->ires_ua / 1000;
 	struct qpnp_flash_led *led = dev_get_drvdata(&fnode->pdev->dev);
@@ -944,7 +957,22 @@
 	else if (value < min_ma)
 		prgm_current_ma = min_ma;
 
+	fnode->ires_idx = fnode->default_ires_idx;
+	fnode->ires_ua = fnode->default_ires_ua;
+
 	prgm_current_ma = min(prgm_current_ma, fnode->max_current);
+	if (prgm_current_ma > max_ires_curr_ma_table[fnode->ires_idx]) {
+		/* find the matching ires */
+		for (i = MAX_IRES_LEVELS - 1; i >= 0; i--) {
+			if (prgm_current_ma <= max_ires_curr_ma_table[i]) {
+				fnode->ires_idx = i;
+				fnode->ires_ua = FLASH_LED_IRES_MIN_UA +
+				      (FLASH_LED_IRES_BASE - fnode->ires_idx) *
+				      FLASH_LED_IRES_DIVISOR;
+				break;
+			}
+		}
+	}
 	fnode->current_ma = prgm_current_ma;
 	fnode->cdev.brightness = prgm_current_ma;
 	fnode->current_reg_val = CURRENT_MA_TO_REG_VAL(prgm_current_ma,
@@ -1062,7 +1090,7 @@
 	val = 0;
 	for (i = 0; i < led->num_fnodes; i++)
 		if (snode->led_mask & BIT(led->fnode[i].id))
-			val |= led->fnode[i].ires << (led->fnode[i].id * 2);
+			val |= led->fnode[i].ires_idx << (led->fnode[i].id * 2);
 
 	rc = qpnp_flash_led_masked_write(led, FLASH_LED_REG_IRES(led->base),
 						FLASH_LED_CURRENT_MASK, val);
@@ -1434,13 +1462,14 @@
 		return rc;
 	}
 
-	fnode->ires_ua = FLASH_LED_IRES_DEFAULT_UA;
-	fnode->ires = FLASH_LED_IRES_DEFAULT_VAL;
+	fnode->default_ires_ua = fnode->ires_ua = FLASH_LED_IRES_DEFAULT_UA;
+	fnode->default_ires_idx = fnode->ires_idx = FLASH_LED_IRES_DEFAULT_VAL;
 	rc = of_property_read_u32(node, "qcom,ires-ua", &val);
 	if (!rc) {
-		fnode->ires_ua = val;
-		fnode->ires = FLASH_LED_IRES_BASE -
-			(val - FLASH_LED_IRES_MIN_UA) / FLASH_LED_IRES_DIVISOR;
+		fnode->default_ires_ua = fnode->ires_ua = val;
+		fnode->default_ires_idx = fnode->ires_idx =
+			FLASH_LED_IRES_BASE - (val - FLASH_LED_IRES_MIN_UA) /
+			FLASH_LED_IRES_DIVISOR;
 	} else if (rc != -EINVAL) {
 		pr_err("Unable to read current resolution rc=%d\n", rc);
 		return rc;
diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c
index c50fc0e..a1e0908 100644
--- a/drivers/mailbox/qti-tcs.c
+++ b/drivers/mailbox/qti-tcs.c
@@ -11,12 +11,13 @@
  *
  */
 
-#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
 
 #include <linux/atomic.h>
 #include <linux/bitmap.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/ipc_logging.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
@@ -40,6 +41,8 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/rpmh.h>
 
+#define TCS_DRV_IPC_LOG_SIZE		2
+
 #define MAX_CMDS_PER_TCS		16
 #define MAX_TCS_PER_TYPE		3
 #define MAX_TCS_SLOTS			(MAX_CMDS_PER_TCS * MAX_TCS_PER_TYPE)
@@ -140,8 +143,35 @@
 	u64 tcs_last_recv_ts[MAX_POOL_SIZE];
 	atomic_t tcs_send_count[MAX_POOL_SIZE];
 	atomic_t tcs_irq_count[MAX_POOL_SIZE];
+	void *ipc_log_ctx;
 };
 
+/* Log to IPC and Ftrace */
+#define log_send_msg(drv, m, n, i, a, d, c, t) do {			\
+	trace_rpmh_send_msg(drv->name, m, n, i, a, d, c, t);		\
+	ipc_log_string(drv->ipc_log_ctx,				\
+		"send msg: m=%d n=%d msgid=0x%x addr=0x%x data=0x%x cmpl=%d trigger=%d", \
+		m, n, i, a, d, c, t);					\
+	} while (0)
+
+#define log_rpmh_notify_irq(drv, m, a, e) do {				\
+	trace_rpmh_notify_irq(drv->name, m, a, e);			\
+	ipc_log_string(drv->ipc_log_ctx,				\
+		"irq response: m=%d addr=0x%x err=%d", m, a, e);	\
+	} while (0)
+
+#define log_rpmh_control_msg(drv, d) do {				\
+	trace_rpmh_control_msg(drv->name, d);				\
+	ipc_log_string(drv->ipc_log_ctx, "ctrlr msg: data=0x%x", d);	\
+	} while (0)
+
+#define log_rpmh_notify(drv, m, a, e) do {				\
+	trace_rpmh_notify(drv->name, m, a, e);				\
+	ipc_log_string(drv->ipc_log_ctx,				\
+		"tx done: m=%d addr=0x%x err=%d", m, a, e);		\
+	} while (0)
+
+
 static int tcs_response_pool_init(struct tcs_drv *drv)
 {
 	struct tcs_response_pool *pool;
@@ -223,7 +253,6 @@
 			break;
 		}
 		pos++;
-		udelay(1);
 	} while (1);
 	spin_unlock_irqrestore(&pool->lock, flags);
 
@@ -241,7 +270,7 @@
 		return;
 
 	msg = resp->msg;
-	pr_warn("Response object idx=%d:\n\tfor-tcs=%d\tin-use=%d\n",
+	pr_warn("Response object [idx=%d for-tcs=%d in-use=%d]\n",
 			resp->idx, resp->m, resp->in_use);
 	pr_warn("Msg: state=%d\n", msg->state);
 	for (i = 0; i < msg->num_payload; i++)
@@ -425,8 +454,10 @@
 			sts = read_tcs_reg(base, TCS_DRV_CMD_STATUS, m, i);
 			if ((!(sts & CMD_STATUS_ISSUED)) ||
 				((resp->msg->is_complete || cmd->complete) &&
-				(!(sts & CMD_STATUS_COMPL))))
+				(!(sts & CMD_STATUS_COMPL)))) {
 				resp->err = -EIO;
+				break;
+			}
 		}
 
 		/* Check for response if this was a read request */
@@ -437,7 +468,7 @@
 			mbox_chan_received_data(resp->chan, resp->msg);
 		}
 
-		trace_rpmh_notify_irq(drv->name, m, resp->msg->payload[0].addr,
+		log_rpmh_notify_irq(drv, m, resp->msg->payload[0].addr,
 						resp->err);
 
 		/* Clear the AMC mode for non-ACTIVE TCSes */
@@ -480,7 +511,7 @@
 {
 	struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
 
-	trace_rpmh_notify(drv->name, m, msg->payload[0].addr, err);
+	log_rpmh_notify(drv, m, msg->payload[0].addr, err);
 	mbox_chan_txdone(chan, err);
 }
 
@@ -546,7 +577,7 @@
 		write_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n + i, msgid);
 		write_tcs_reg(base, TCS_DRV_CMD_ADDR, m, n + i, cmd->addr);
 		write_tcs_reg(base, TCS_DRV_CMD_DATA, m, n + i, cmd->data);
-		trace_rpmh_send_msg(drv->name, m, n + i, msgid, cmd->addr,
+		log_send_msg(drv, m, n + i, msgid, cmd->addr,
 					cmd->data, cmd->complete, trigger);
 	}
 
@@ -667,7 +698,8 @@
 	int n = 0;
 
 	/* For active requests find the first free AMC. */
-	if (tcs->type == ACTIVE_TCS)
+	if (msg->state == RPMH_ACTIVE_ONLY_STATE ||
+			msg->state == RPMH_AWAKE_STATE)
 		return find_free_tcs(tcs);
 
 	/* Find if we already have the msg in our TCS */
@@ -780,6 +812,10 @@
 		spin_lock_irqsave(&tcs->tcs_lock, flags);
 		for (i = 0; i < tcs->num_tcs; i++) {
 			m = i + tcs->tcs_offset;
+			if (!tcs_is_free(drv, m)) {
+				spin_unlock_irqrestore(&tcs->tcs_lock, flags);
+				return -EBUSY;
+			}
 			__tcs_buffer_invalidate(drv->reg_base, m);
 		}
 		/* Mark the TCS as free */
@@ -795,7 +831,7 @@
 	int n;
 	struct tcs_mbox *tcs = get_tcs_from_index(drv, m);
 	void __iomem *base = drv->reg_base;
-	u32 enable, addr, data, msgid;
+	u32 enable, addr, data, msgid, sts, irq_sts;
 
 	if (!tcs || tcs_is_free(drv, m))
 		return;
@@ -804,15 +840,24 @@
 	if (!enable)
 		return;
 
-	pr_warn("TCS-%d contents:\n", m);
+	pr_warn("RSC:%s\n", drv->name);
+
+	sts = read_tcs_reg(base, TCS_DRV_STATUS, m, 0);
+	data = read_tcs_reg(base, TCS_DRV_CONTROL, m, 0);
+	irq_sts = read_tcs_reg(base, TCS_DRV_IRQ_STATUS, 0, 0);
+	pr_warn("TCS=%d [ctrlr-sts:%s amc-mode:0x%x irq-sts:%s]\n",
+			m, sts ? "IDLE" : "BUSY", data,
+			(irq_sts & BIT(m)) ? "COMPLETED" : "PENDING");
+
 	for (n = 0; n < tcs->ncpt; n++) {
 		if (!(enable & BIT(n)))
 			continue;
 		addr = read_tcs_reg(base, TCS_DRV_CMD_ADDR, m, n);
 		data = read_tcs_reg(base, TCS_DRV_CMD_DATA, m, n);
 		msgid = read_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n);
-		pr_warn("\tn=%d addr=0x%x data=0x%x hdr=0x%x\n",
-						n, addr, data, msgid);
+		sts = read_tcs_reg(base, TCS_DRV_CMD_STATUS, m, n);
+		pr_warn("\tCMD=%d [addr=0x%x data=0x%x hdr=0x%x sts=0x%x]\n",
+						n, addr, data, msgid, sts);
 	}
 }
 
@@ -866,7 +911,8 @@
 		goto tx_fail;
 	}
 
-	if (!msg->payload || msg->num_payload > MAX_RPMH_PAYLOAD) {
+	if (!msg->payload || !msg->num_payload ||
+			msg->num_payload > MAX_RPMH_PAYLOAD) {
 		dev_err(dev, "Payload error\n");
 		ret = -EINVAL;
 		goto tx_fail;
@@ -896,8 +942,11 @@
 	 * Since we are re-purposing the wake TCS, invalidate previous
 	 * contents to avoid confusion.
 	 */
-	if (msg->state == RPMH_AWAKE_STATE)
-		tcs_mbox_invalidate(chan);
+	if (msg->state == RPMH_AWAKE_STATE) {
+		ret = tcs_mbox_invalidate(chan);
+		if (ret)
+			goto tx_fail;
+	}
 
 	/* Post the message to the TCS and trigger */
 	ret = tcs_mbox_write(chan, msg, true);
@@ -909,8 +958,10 @@
 				drv, msg, chan, TCS_M_INIT, ret);
 
 		dev_err(dev, "Error sending RPMH message %d\n", ret);
-		if (resp)
+		if (!IS_ERR(resp))
 			send_tcs_response(resp);
+		else
+			dev_err(dev, "No response object %ld\n", PTR_ERR(resp));
 		ret = 0;
 	}
 
@@ -932,7 +983,7 @@
 	for (i = 0; i < msg->num_payload; i++) {
 		/* Only data is write capable */
 		writel_relaxed(cpu_to_le32(msg->payload[i].data), addr);
-		trace_rpmh_control_msg(drv->name, msg->payload[i].data);
+		log_rpmh_control_msg(drv, msg->payload[i].data);
 		addr += TCS_HIDDEN_CMD_SHIFT;
 	}
 }
@@ -977,7 +1028,8 @@
 		goto tx_done;
 	}
 
-	if (msg->num_payload > MAX_RPMH_PAYLOAD) {
+	if (!msg->payload || (!msg->num_payload && !msg->invalidate) ||
+			msg->num_payload > MAX_RPMH_PAYLOAD) {
 		dev_err(dev, "Payload error\n");
 		goto tx_done;
 	}
@@ -1117,7 +1169,8 @@
 		if (tcs->num_tcs > MAX_TCS_PER_TYPE)
 			return -EINVAL;
 
-		if (st > max_tcs)
+		if (st + tcs->num_tcs > max_tcs &&
+				st + tcs->num_tcs >= sizeof(tcs->tcs_mask))
 			return -EINVAL;
 
 		tcs->tcs_mask = ((1 << tcs->num_tcs) - 1) << st;
@@ -1139,10 +1192,12 @@
 		for (j = 0; j < i; j++) {
 			ret = of_parse_phandle_with_args(np, "mboxes",
 							"#mbox-cells", j, &p);
-			if (!ret && p.np == pdev->dev.of_node)
+			of_node_put(p.np);
+			if (!ret && p.np == pdev->dev.of_node) {
+				num_chans++;
 				break;
+			}
 		}
-		num_chans++;
 	}
 
 	if (!num_chans) {
@@ -1200,6 +1255,9 @@
 	for (i = 0; i < ARRAY_SIZE(drv->tcs_in_use); i++)
 		atomic_set(&drv->tcs_in_use[i], 0);
 
+	drv->ipc_log_ctx = ipc_log_context_create(TCS_DRV_IPC_LOG_SIZE,
+						drv->name, 0);
+
 	ret = mbox_controller_register(&drv->mbox);
 	if (ret)
 		return ret;
diff --git a/drivers/md/dm-android-verity.c b/drivers/md/dm-android-verity.c
index 3b19017..eb4bdf6 100644
--- a/drivers/md/dm-android-verity.c
+++ b/drivers/md/dm-android-verity.c
@@ -646,6 +646,8 @@
         android_verity_target.direct_access = dm_linear_direct_access,
 	android_verity_target.io_hints = NULL;
 
+	set_disk_ro(dm_disk(dm_table_get_md(ti->table)), 0);
+
 	err = dm_linear_ctr(ti, DM_LINEAR_ARGS, linear_table_args);
 
 	if (!err) {
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index ac8235b..0d437c9 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -431,7 +431,7 @@
 	unsigned long flags;
 	struct priority_group *pg;
 	struct pgpath *pgpath;
-	bool bypassed = true;
+	unsigned bypassed = 1;
 
 	if (!atomic_read(&m->nr_valid_paths)) {
 		clear_bit(MPATHF_QUEUE_IO, &m->flags);
@@ -470,7 +470,7 @@
 	 */
 	do {
 		list_for_each_entry(pg, &m->priority_groups, list) {
-			if (pg->bypassed == bypassed)
+			if (pg->bypassed == !!bypassed)
 				continue;
 			pgpath = choose_path_in_pg(m, pg, nr_bytes);
 			if (!IS_ERR_OR_NULL(pgpath)) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 29e2df5..81a7875 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1073,7 +1073,7 @@
 		 */
 		DEFINE_WAIT(w);
 		for (;;) {
-			flush_signals(current);
+			sigset_t full, old;
 			prepare_to_wait(&conf->wait_barrier,
 					&w, TASK_INTERRUPTIBLE);
 			if (bio_end_sector(bio) <= mddev->suspend_lo ||
@@ -1082,7 +1082,10 @@
 			     !md_cluster_ops->area_resyncing(mddev, WRITE,
 				     bio->bi_iter.bi_sector, bio_end_sector(bio))))
 				break;
+			sigfillset(&full);
+			sigprocmask(SIG_BLOCK, &full, &old);
 			schedule();
+			sigprocmask(SIG_SETMASK, &old, NULL);
 		}
 		finish_wait(&conf->wait_barrier, &w);
 	}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f34ad2b..8f117d6 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5300,12 +5300,15 @@
 				 * userspace, we want an interruptible
 				 * wait.
 				 */
-				flush_signals(current);
 				prepare_to_wait(&conf->wait_for_overlap,
 						&w, TASK_INTERRUPTIBLE);
 				if (logical_sector >= mddev->suspend_lo &&
 				    logical_sector < mddev->suspend_hi) {
+					sigset_t full, old;
+					sigfillset(&full);
+					sigprocmask(SIG_BLOCK, &full, &old);
 					schedule();
+					sigprocmask(SIG_SETMASK, &old, NULL);
 					do_prepare = true;
 				}
 				goto retry;
@@ -7557,12 +7560,10 @@
 {
 
 	if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
-		struct md_rdev *rdev;
 
 		spin_lock_irq(&conf->device_lock);
 		conf->previous_raid_disks = conf->raid_disks;
-		rdev_for_each(rdev, conf->mddev)
-			rdev->data_offset = rdev->new_data_offset;
+		md_finish_reshape(conf->mddev);
 		smp_wmb();
 		conf->reshape_progress = MaxSector;
 		conf->mddev->reshape_position = MaxSector;
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index 8f2556e..61611d1 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -3691,7 +3691,14 @@
 	core->nr = nr;
 	sprintf(core->name, "cx88[%d]", core->nr);
 
-	core->tvnorm = V4L2_STD_NTSC_M;
+	/*
+	 * Note: Setting initial standard here would cause first call to
+	 * cx88_set_tvnorm() to return without programming any registers.  Leave
+	 * it blank for at this point and it will get set later in
+	 * cx8800_initdev()
+	 */
+	core->tvnorm  = 0;
+
 	core->width   = 320;
 	core->height  = 240;
 	core->field   = V4L2_FIELD_INTERLACED;
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index d83eb3b..3b140ad 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1422,7 +1422,7 @@
 
 	/* initial device configuration */
 	mutex_lock(&core->lock);
-	cx88_set_tvnorm(core, core->tvnorm);
+	cx88_set_tvnorm(core, V4L2_STD_NTSC_M);
 	v4l2_ctrl_handler_setup(&core->video_hdl);
 	v4l2_ctrl_handler_setup(&core->audio_hdl);
 	cx88_video_mux(core, 0);
diff --git a/drivers/media/platform/msm/broadcast/tspp.c b/drivers/media/platform/msm/broadcast/tspp.c
index 7717759..44193f5 100644
--- a/drivers/media/platform/msm/broadcast/tspp.c
+++ b/drivers/media/platform/msm/broadcast/tspp.c
@@ -24,6 +24,7 @@
 #include <linux/uaccess.h>       /* copy_to_user */
 #include <linux/slab.h>          /* kfree, kzalloc */
 #include <linux/ioport.h>        /* XXX_ mem_region */
+#include <asm/dma-iommu.h>
 #include <linux/dma-mapping.h>   /* dma_XXX */
 #include <linux/dmapool.h>       /* DMA pools */
 #include <linux/delay.h>         /* msleep */
@@ -59,6 +60,10 @@
 #define TSPP_NUM_KEYS                  8
 #define INVALID_CHANNEL                0xFFFFFFFF
 #define TSPP_BAM_DEFAULT_IPC_LOGLVL    2
+
+#define TSPP_SMMU_IOVA_START	(0x10000000)
+#define TSPP_SMMU_IOVA_SIZE	(0x40000000)
+
 /*
  * BAM descriptor FIFO size (in number of descriptors).
  * Max number of descriptors allowed by SPS which is 8K-1.
@@ -489,6 +494,8 @@
 	struct mutex mutex;
 	struct tspp_pinctrl pinctrl;
 	unsigned int tts_source; /* Time stamp source type LPASS timer/TCR */
+	struct dma_iommu_mapping *iommu_mapping;
+	bool bypass_s1_smmu;
 
 	struct dentry *dent;
 	struct dentry *debugfs_regs[ARRAY_SIZE(debugfs_tspp_regs)];
@@ -1058,6 +1065,42 @@
 	tspp_key_entry &= ~(1 << entry);
 }
 
+static int tspp_iommu_init(struct tspp_device *device)
+{
+	struct dma_iommu_mapping *iommu_map;
+	int s1_bypass = 1;
+
+	iommu_map = arm_iommu_create_mapping(&platform_bus_type,
+						TSPP_SMMU_IOVA_START,
+						TSPP_SMMU_IOVA_SIZE);
+	if (IS_ERR(iommu_map)) {
+		dev_err(&device->pdev->dev, "iommu_create_mapping failure\n");
+		return PTR_ERR(iommu_map);
+	}
+	if (iommu_domain_set_attr(iommu_map->domain,
+				  DOMAIN_ATTR_S1_BYPASS, &s1_bypass)) {
+		dev_err(&device->pdev->dev, "Can't bypass s1 translation\n");
+		arm_iommu_release_mapping(iommu_map);
+		return -EIO;
+	}
+	if (arm_iommu_attach_device(&device->pdev->dev, iommu_map)) {
+		dev_err(&device->pdev->dev, "can't arm_iommu_attach_device\n");
+		arm_iommu_release_mapping(iommu_map);
+		return -EIO;
+	}
+
+	device->iommu_mapping = iommu_map;
+	return 0;
+}
+
+static void tspp_iommu_release_iomapping(struct tspp_device *device)
+{
+	if (device->bypass_s1_smmu && device->iommu_mapping)
+		arm_iommu_release_mapping(device->iommu_mapping);
+
+	device->iommu_mapping = NULL;
+}
+
 static int tspp_alloc_buffer(u32 channel_id, struct tspp_data_descriptor *desc,
 	u32 size, struct dma_pool *dma_pool, tspp_allocator *alloc, void *user)
 {
@@ -2959,6 +3002,14 @@
 		goto err_irq;
 	device->req_irqs = false;
 
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,smmu-s1-bypass")) {
+		device->bypass_s1_smmu = true;
+		if (tspp_iommu_init(device)) {
+			dev_err(&pdev->dev, "iommu init failed");
+			goto err_iommu;
+		}
+	}
+
 	device->tts_source = TSIF_TTS_TCR;
 	for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
 		device->tsif[i].tts_source = device->tts_source;
@@ -3029,6 +3080,8 @@
 	tspp_debugfs_exit(device);
 	for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
 		tsif_debugfs_exit(&device->tsif[i]);
+err_iommu:
+	tspp_iommu_release_iomapping(device);
 err_irq:
 	iounmap(device->bam_props.virt_addr);
 err_map_bam:
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
index 99bd263..800c9ea 100644
--- a/drivers/media/platform/msm/camera/Makefile
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -8,3 +8,4 @@
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_module/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg/
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
index 38f13c4..03f6e0c 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
@@ -27,23 +27,12 @@
 #include "cam_cpas_api.h"
 #include "cam_hw_intf.h"
 #include "cam_hw.h"
-
-#ifdef CONFIG_CAM_CDM_DBG
-#define CDM_CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDM_CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
-#ifdef CONFIG_CAM_CDM_DUMP_DBG
-#define CDM_DUMP_CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDM_DUMP_CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
+#include "cam_debug_util.h"
 
 #define CAM_MAX_SW_CDM_VERSION_SUPPORTED  1
 #define CAM_SW_CDM_INDEX                  0
 #define CAM_CDM_INFLIGHT_WORKS            5
-#define CAM_CDM_HW_RESET_TIMEOUT          3000
+#define CAM_CDM_HW_RESET_TIMEOUT          300
 
 #define CAM_CDM_HW_ID_MASK      0xF
 #define CAM_CDM_HW_ID_SHIFT     0x5
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
index 3604357..6c8bde1 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "CAM-CDM-CORE %s:%d " fmt, __func__, __LINE__
-
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/of.h>
@@ -32,7 +30,7 @@
 static void cam_cdm_get_client_refcount(struct cam_cdm_client *client)
 {
 	mutex_lock(&client->lock);
-	CDM_CDBG("CDM client get refcount=%d\n",
+	CAM_DBG(CAM_CDM, "CDM client get refcount=%d",
 		client->refcount);
 	client->refcount++;
 	mutex_unlock(&client->lock);
@@ -41,12 +39,12 @@
 static void cam_cdm_put_client_refcount(struct cam_cdm_client *client)
 {
 	mutex_lock(&client->lock);
-	CDM_CDBG("CDM client put refcount=%d\n",
+	CAM_DBG(CAM_CDM, "CDM client put refcount=%d",
 		client->refcount);
 	if (client->refcount > 0) {
 		client->refcount--;
 	} else {
-		pr_err("Refcount put when zero\n");
+		CAM_ERR(CAM_CDM, "Refcount put when zero");
 		WARN_ON(1);
 	}
 	mutex_unlock(&client->lock);
@@ -63,16 +61,16 @@
 		cam_version->reserved = 0;
 		return true;
 	default:
-		pr_err("CDM Version=%x not supported in util\n", ver);
+		CAM_ERR(CAM_CDM, "CDM Version=%x not supported in util", ver);
 	break;
 	}
 	return false;
 }
 
-void cam_cdm_cpas_cb(int32_t client_handle, void *userdata,
+void cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
 	enum cam_camnoc_irq_type evt_type, uint32_t evt_data)
 {
-	pr_err("CPAS error callback type=%d with data=%x\n", evt_type,
+	CAM_ERR(CAM_CDM, "CPAS error callback type=%d with data=%x", evt_type,
 		evt_data);
 }
 
@@ -84,13 +82,14 @@
 		case CAM_CDM170_VERSION:
 			return &CDM170_ops;
 		default:
-			pr_err("CDM Version=%x not supported in util\n", ver);
+			CAM_ERR(CAM_CDM, "CDM Version=%x not supported in util",
+				ver);
 		}
 	} else if (cam_version) {
 		if ((cam_version->major == 1) && (cam_version->minor == 0) &&
 			(cam_version->incr == 0))
 			return &CDM170_ops;
-		pr_err("cam_hw_version=%x:%x:%x not supported\n",
+		CAM_ERR(CAM_CDM, "cam_hw_version=%x:%x:%x not supported",
 			cam_version->major, cam_version->minor,
 			cam_version->incr);
 	}
@@ -107,7 +106,7 @@
 		if (node->bl_tag == tag)
 			return node;
 	}
-	pr_err("Could not find the bl request for tag=%x\n", tag);
+	CAM_ERR(CAM_CDM, "Could not find the bl request for tag=%x", tag);
 
 	return NULL;
 }
@@ -135,11 +134,11 @@
 
 	for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
 		if (hw->clients[i] == NULL) {
-			CDM_CDBG("Found client slot %d\n", i);
+			CAM_DBG(CAM_CDM, "Found client slot %d", i);
 			return i;
 		}
 	}
-	pr_err("No more client slots\n");
+	CAM_ERR(CAM_CDM, "No more client slots");
 
 	return -EBUSY;
 }
@@ -153,7 +152,7 @@
 	struct cam_cdm_client *client = NULL;
 
 	if (!cdm_hw) {
-		pr_err("CDM Notify called with NULL hw info\n");
+		CAM_ERR(CAM_CDM, "CDM Notify called with NULL hw info");
 		return;
 	}
 	core = (struct cam_cdm *)cdm_hw->core_info;
@@ -166,20 +165,21 @@
 		client_idx = CAM_CDM_GET_CLIENT_IDX(node->client_hdl);
 		client = core->clients[client_idx];
 		if ((!client) || (client->handle != node->client_hdl)) {
-			pr_err("Invalid client %pK hdl=%x\n", client,
+			CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client,
 				node->client_hdl);
 			return;
 		}
 		cam_cdm_get_client_refcount(client);
 		if (client->data.cam_cdm_callback) {
-			CDM_CDBG("Calling client=%s cb cookie=%d\n",
+			CAM_DBG(CAM_CDM, "Calling client=%s cb cookie=%d",
 				client->data.identifier, node->cookie);
 			client->data.cam_cdm_callback(node->client_hdl,
 				node->userdata, CAM_CDM_CB_STATUS_BL_SUCCESS,
 				node->cookie);
-			CDM_CDBG("Exit client cb cookie=%d\n", node->cookie);
+			CAM_DBG(CAM_CDM, "Exit client cb cookie=%d",
+				node->cookie);
 		} else {
-			pr_err("No cb registered for client hdl=%x\n",
+			CAM_ERR(CAM_CDM, "No cb registered for client hdl=%x",
 				node->client_hdl);
 		}
 		cam_cdm_put_client_refcount(client);
@@ -190,7 +190,7 @@
 		if (core->clients[i] != NULL) {
 			client = core->clients[i];
 			mutex_lock(&client->lock);
-			CDM_CDBG("Found client slot %d\n", i);
+			CAM_DBG(CAM_CDM, "Found client slot %d", i);
 			if (client->data.cam_cdm_callback) {
 				if (status == CAM_CDM_CB_STATUS_PAGEFAULT) {
 					unsigned long iova =
@@ -203,7 +203,8 @@
 						(iova & 0xFFFFFFFF));
 				}
 			} else {
-				pr_err("No cb registered for client hdl=%x\n",
+				CAM_ERR(CAM_CDM,
+					"No cb registered for client hdl=%x",
 					client->handle);
 			}
 			mutex_unlock(&client->lock);
@@ -228,24 +229,26 @@
 	client_idx = CAM_CDM_GET_CLIENT_IDX(*handle);
 	client = core->clients[client_idx];
 	if (!client) {
-		pr_err("Invalid client %pK hdl=%x\n", client, *handle);
+		CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client, *handle);
 		return -EINVAL;
 	}
 	cam_cdm_get_client_refcount(client);
 	if (*handle != client->handle) {
-		pr_err("client id given handle=%x invalid\n", *handle);
+		CAM_ERR(CAM_CDM, "client id given handle=%x invalid", *handle);
 		cam_cdm_put_client_refcount(client);
 		return -EINVAL;
 	}
 	if (operation == true) {
 		if (true == client->stream_on) {
-			pr_err("Invalid CDM client is already streamed ON\n");
+			CAM_ERR(CAM_CDM,
+				"Invalid CDM client is already streamed ON");
 			cam_cdm_put_client_refcount(client);
 			return rc;
 		}
 	} else {
 		if (client->stream_on == false) {
-			pr_err("Invalid CDM client is already streamed Off\n");
+			CAM_ERR(CAM_CDM,
+				"Invalid CDM client is already streamed Off");
 			cam_cdm_put_client_refcount(client);
 			return rc;
 		}
@@ -265,26 +268,28 @@
 			rc = cam_cpas_start(core->cpas_handle,
 				&ahb_vote, &axi_vote);
 			if (rc != 0) {
-				pr_err("CPAS start failed\n");
+				CAM_ERR(CAM_CDM, "CPAS start failed");
 				goto end;
 			}
-			CDM_CDBG("CDM init first time\n");
+			CAM_DBG(CAM_CDM, "CDM init first time");
 			if (core->id == CAM_CDM_VIRTUAL) {
-				CDM_CDBG("Virtual CDM HW init first time\n");
+				CAM_DBG(CAM_CDM,
+					"Virtual CDM HW init first time");
 				rc = 0;
 			} else {
-				CDM_CDBG("CDM HW init first time\n");
+				CAM_DBG(CAM_CDM, "CDM HW init first time");
 				rc = cam_hw_cdm_init(hw_priv, NULL, 0);
 				if (rc == 0) {
 					rc = cam_hw_cdm_alloc_genirq_mem(
 						hw_priv);
 					if (rc != 0) {
-						pr_err("Genirqalloc failed\n");
+						CAM_ERR(CAM_CDM,
+							"Genirqalloc failed");
 						cam_hw_cdm_deinit(hw_priv,
 							NULL, 0);
 					}
 				} else {
-					pr_err("CDM HW init failed\n");
+					CAM_ERR(CAM_CDM, "CDM HW init failed");
 				}
 			}
 			if (rc == 0) {
@@ -292,11 +297,11 @@
 				client->stream_on = true;
 			} else {
 				if (cam_cpas_stop(core->cpas_handle))
-					pr_err("CPAS stop failed\n");
+					CAM_ERR(CAM_CDM, "CPAS stop failed");
 			}
 		} else {
 			cdm_hw->open_count++;
-			CDM_CDBG("CDM HW already ON count=%d\n",
+			CAM_DBG(CAM_CDM, "CDM HW already ON count=%d",
 				cdm_hw->open_count);
 			rc = 0;
 			client->stream_on = true;
@@ -304,35 +309,41 @@
 	} else {
 		if (cdm_hw->open_count) {
 			cdm_hw->open_count--;
-			CDM_CDBG("stream OFF CDM %d\n", cdm_hw->open_count);
+			CAM_DBG(CAM_CDM, "stream OFF CDM %d",
+				cdm_hw->open_count);
 			if (!cdm_hw->open_count) {
-				CDM_CDBG("CDM Deinit now\n");
+				CAM_DBG(CAM_CDM, "CDM Deinit now");
 				if (core->id == CAM_CDM_VIRTUAL) {
-					CDM_CDBG("Virtual CDM HW Deinit\n");
+					CAM_DBG(CAM_CDM,
+						"Virtual CDM HW Deinit");
 					rc = 0;
 				} else {
-					CDM_CDBG("CDM HW Deinit now\n");
+					CAM_DBG(CAM_CDM, "CDM HW Deinit now");
 					rc = cam_hw_cdm_deinit(
 						hw_priv, NULL, 0);
 					if (cam_hw_cdm_release_genirq_mem(
 						hw_priv))
-						pr_err("Genirq release failed\n");
+						CAM_ERR(CAM_CDM,
+							"Genirq release fail");
 				}
 				if (rc) {
-					pr_err("Deinit failed in streamoff\n");
+					CAM_ERR(CAM_CDM,
+						"Deinit failed in streamoff");
 				} else {
 					client->stream_on = false;
 					rc = cam_cpas_stop(core->cpas_handle);
 					if (rc)
-						pr_err("CPAS stop failed\n");
+						CAM_ERR(CAM_CDM,
+							"CPAS stop failed");
 				}
 			} else {
 				client->stream_on = false;
-				CDM_CDBG("Client stream off success =%d\n",
+				CAM_DBG(CAM_CDM,
+					"Client stream off success =%d",
 					cdm_hw->open_count);
 			}
 		} else {
-			CDM_CDBG("stream OFF CDM Invalid %d\n",
+			CAM_DBG(CAM_CDM, "stream OFF CDM Invalid %d",
 				cdm_hw->open_count);
 			rc = -ENXIO;
 		}
@@ -390,33 +401,35 @@
 		struct cam_cdm_client *client;
 
 		if (sizeof(struct cam_cdm_hw_intf_cmd_submit_bl) != arg_size) {
-			pr_err("Invalid CDM cmd %d arg size=%x\n", cmd,
+			CAM_ERR(CAM_CDM, "Invalid CDM cmd %d arg size=%x", cmd,
 				arg_size);
 			break;
 		}
 		req = (struct cam_cdm_hw_intf_cmd_submit_bl *)cmd_args;
 		if ((req->data->type < 0) ||
 			(req->data->type > CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA)) {
-			pr_err("Invalid req bl cmd addr type=%d\n",
+			CAM_ERR(CAM_CDM, "Invalid req bl cmd addr type=%d",
 				req->data->type);
 			break;
 		}
 		idx = CAM_CDM_GET_CLIENT_IDX(req->handle);
 		client = core->clients[idx];
 		if ((!client) || (req->handle != client->handle)) {
-			pr_err("Invalid client %pK hdl=%x\n", client,
+			CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client,
 				req->handle);
 			break;
 		}
 		cam_cdm_get_client_refcount(client);
 		if ((req->data->flag == true) &&
 			(!client->data.cam_cdm_callback)) {
-			pr_err("CDM request cb without registering cb\n");
+			CAM_ERR(CAM_CDM,
+				"CDM request cb without registering cb");
 			cam_cdm_put_client_refcount(client);
 			break;
 		}
 		if (client->stream_on != true) {
-			pr_err("Invalid CDM needs to be streamed ON first\n");
+			CAM_ERR(CAM_CDM,
+				"Invalid CDM needs to be streamed ON first");
 			cam_cdm_put_client_refcount(client);
 			break;
 		}
@@ -434,19 +447,20 @@
 		struct cam_cdm_client *client;
 
 		if (sizeof(struct cam_cdm_acquire_data) != arg_size) {
-			pr_err("Invalid CDM cmd %d arg size=%x\n", cmd,
+			CAM_ERR(CAM_CDM, "Invalid CDM cmd %d arg size=%x", cmd,
 				arg_size);
 			break;
 		}
 
 		mutex_lock(&cdm_hw->hw_mutex);
 		data = (struct cam_cdm_acquire_data *)cmd_args;
-		CDM_CDBG("Trying to acquire client=%s in hw idx=%d\n",
+		CAM_DBG(CAM_CDM, "Trying to acquire client=%s in hw idx=%d",
 			data->identifier, core->index);
 		idx = cam_cdm_find_free_client_slot(core);
 		if ((idx < 0) || (core->clients[idx])) {
 			mutex_unlock(&cdm_hw->hw_mutex);
-			pr_err("Failed to client slots for client=%s in hw idx=%d\n",
+			CAM_ERR(CAM_CDM,
+				"Fail to client slots, client=%s in hw idx=%d",
 			data->identifier, core->index);
 			break;
 		}
@@ -477,7 +491,7 @@
 				mutex_unlock(
 					&cdm_hw->hw_mutex);
 				rc = -EPERM;
-				pr_err("Invalid ops for virtual cdm\n");
+				CAM_ERR(CAM_CDM, "Invalid ops for virtual cdm");
 				break;
 			}
 		} else {
@@ -493,7 +507,7 @@
 					idx);
 		client->stream_on = false;
 		data->handle = client->handle;
-		CDM_CDBG("Acquired client=%s in hwidx=%d\n",
+		CAM_DBG(CAM_CDM, "Acquired client=%s in hwidx=%d",
 			data->identifier, core->index);
 		mutex_unlock(&client->lock);
 		rc = 0;
@@ -505,7 +519,8 @@
 		struct cam_cdm_client *client;
 
 		if (sizeof(uint32_t) != arg_size) {
-			pr_err("Invalid CDM cmd %d size=%x for handle=%x\n",
+			CAM_ERR(CAM_CDM,
+				"Invalid CDM cmd %d size=%x for handle=%x",
 				cmd, arg_size, *handle);
 			return -EINVAL;
 		}
@@ -513,14 +528,15 @@
 		mutex_lock(&cdm_hw->hw_mutex);
 		client = core->clients[idx];
 		if ((!client) || (*handle != client->handle)) {
-			pr_err("Invalid client %pK hdl=%x\n", client, *handle);
+			CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x",
+				client, *handle);
 			mutex_unlock(&cdm_hw->hw_mutex);
 			break;
 		}
 		cam_cdm_put_client_refcount(client);
 		mutex_lock(&client->lock);
 		if (client->refcount != 0) {
-			pr_err("CDM Client refcount not zero %d",
+			CAM_ERR(CAM_CDM, "CDM Client refcount not zero %d",
 				client->refcount);
 			rc = -EPERM;
 			mutex_unlock(&client->lock);
@@ -536,12 +552,12 @@
 		break;
 	}
 	case CAM_CDM_HW_INTF_CMD_RESET_HW: {
-		pr_err("CDM HW reset not supported for handle =%x\n",
+		CAM_ERR(CAM_CDM, "CDM HW reset not supported for handle =%x",
 			*((uint32_t *)cmd_args));
 		break;
 	}
 	default:
-		pr_err("CDM HW intf command not valid =%d\n", cmd);
+		CAM_ERR(CAM_CDM, "CDM HW intf command not valid =%d", cmd);
 		break;
 	}
 	return rc;
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
index eb75aaa..fa3ae04 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
@@ -32,7 +32,7 @@
 	uint32_t arg_size);
 bool cam_cdm_set_cam_hw_version(
 	uint32_t ver, struct cam_hw_version *cam_version);
-void cam_cdm_cpas_cb(int32_t client_handle, void *userdata,
+void cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
 	enum cam_camnoc_irq_type evt_type, uint32_t evt_data);
 struct cam_cdm_utils_ops *cam_cdm_get_ops(
 	uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version);
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
index 5fac7d8..5f6895c 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "CAM-CDM-HW %s:%d " fmt, __func__, __LINE__
-
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/of.h>
@@ -31,7 +29,6 @@
 #include "cam_io_util.h"
 #include "cam_hw_cdm170_reg.h"
 
-
 #define CAM_HW_CDM_CPAS_0_NAME "qcom,cam170-cpas-cdm0"
 #define CAM_HW_CDM_IPE_0_NAME "qcom,cam170-ipe0-cdm"
 #define CAM_HW_CDM_IPE_1_NAME "qcom,cam170-ipe1-cdm"
@@ -65,7 +62,7 @@
 
 	if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
 		pending_bl)) {
-		pr_err("Failed to read CDM pending BL's\n");
+		CAM_ERR(CAM_CDM, "Failed to read CDM pending BL's");
 		rc = -EIO;
 	}
 
@@ -81,28 +78,28 @@
 
 	if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_MASK,
 		&irq_mask)) {
-		pr_err("Failed to read CDM IRQ mask\n");
+		CAM_ERR(CAM_CDM, "Failed to read CDM IRQ mask");
 		return rc;
 	}
 
 	if (enable == true) {
 		if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK,
 			(irq_mask | 0x4))) {
-			pr_err("Write failed to enable BL done irq\n");
+			CAM_ERR(CAM_CDM, "Write failed to enable BL done irq");
 		} else {
 			atomic_inc(&core->bl_done);
 			rc = 0;
-			CDM_CDBG("BL done irq enabled =%d\n",
+			CAM_DBG(CAM_CDM, "BL done irq enabled =%d",
 				atomic_read(&core->bl_done));
 		}
 	} else {
 		if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK,
 			(irq_mask & 0x70003))) {
-			pr_err("Write failed to disable BL done irq\n");
+			CAM_ERR(CAM_CDM, "Write failed to disable BL done irq");
 		} else {
 			atomic_dec(&core->bl_done);
 			rc = 0;
-			CDM_CDBG("BL done irq disable =%d\n",
+			CAM_DBG(CAM_CDM, "BL done irq disable =%d",
 				atomic_read(&core->bl_done));
 		}
 	}
@@ -115,12 +112,12 @@
 
 	if (enable == true) {
 		if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x01)) {
-			pr_err("Failed to Write CDM HW core enable\n");
+			CAM_ERR(CAM_CDM, "Failed to Write CDM HW core enable");
 			rc = -EIO;
 		}
 	} else {
 		if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x02)) {
-			pr_err("Failed to Write CDM HW core disable\n");
+			CAM_ERR(CAM_CDM, "Failed to Write CDM HW core disable");
 			rc = -EIO;
 		}
 	}
@@ -132,7 +129,7 @@
 	int rc = 0;
 
 	if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0x10100)) {
-		pr_err("Failed to Write CDM HW core debug\n");
+		CAM_ERR(CAM_CDM, "Failed to Write CDM HW core debug");
 		rc = -EIO;
 	}
 
@@ -144,7 +141,7 @@
 	int rc = 0;
 
 	if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0)) {
-		pr_err("Failed to Write CDM HW core debug\n");
+		CAM_ERR(CAM_CDM, "Failed to Write CDM HW core debug");
 		rc = -EIO;
 	}
 
@@ -156,31 +153,31 @@
 	uint32_t dump_reg = 0;
 
 	cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
-	pr_err("dump core en=%x\n", dump_reg);
+	CAM_ERR(CAM_CDM, "dump core en=%x", dump_reg);
 
 	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_0_REG, &dump_reg);
-	pr_err("dump scratch0=%x\n", dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch0=%x", dump_reg);
 
 	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_1_REG, &dump_reg);
-	pr_err("dump scratch1=%x\n", dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch1=%x", dump_reg);
 
 	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_2_REG, &dump_reg);
-	pr_err("dump scratch2=%x\n", dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch2=%x", dump_reg);
 
 	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_3_REG, &dump_reg);
-	pr_err("dump scratch3=%x\n", dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch3=%x", dump_reg);
 
 	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_4_REG, &dump_reg);
-	pr_err("dump scratch4=%x\n", dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch4=%x", dump_reg);
 
 	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_5_REG, &dump_reg);
-	pr_err("dump scratch5=%x\n", dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch5=%x", dump_reg);
 
 	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_6_REG, &dump_reg);
-	pr_err("dump scratch6=%x\n", dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch6=%x", dump_reg);
 
 	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_7_REG, &dump_reg);
-	pr_err("dump scratch7=%x\n", dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch7=%x", dump_reg);
 
 }
 
@@ -191,64 +188,65 @@
 
 	mutex_lock(&cdm_hw->hw_mutex);
 	cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
-	pr_err("CDM HW core status=%x\n", dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW core status=%x", dump_reg);
 	/* First pause CDM, If it fails still proceed to dump debug info */
 	cam_hw_cdm_enable_core(cdm_hw, false);
 	cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
-	pr_err("CDM HW current pending BL=%x\n", dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW current pending BL=%x", dump_reg);
 	loop_cnt = dump_reg;
 	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_DEBUG_STATUS, &dump_reg);
-	pr_err("CDM HW Debug status reg=%x\n", dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW Debug status reg=%x", dump_reg);
 	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, &core_dbg);
 	if (core_dbg & 0x100) {
 		cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_ADDR, &dump_reg);
-		pr_err("AHB dump reglastaddr=%x\n", dump_reg);
+		CAM_ERR(CAM_CDM, "AHB dump reglastaddr=%x", dump_reg);
 		cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_DATA, &dump_reg);
-		pr_err("AHB dump reglastdata=%x\n", dump_reg);
+		CAM_ERR(CAM_CDM, "AHB dump reglastdata=%x", dump_reg);
 	} else {
-		pr_err("CDM HW AHB dump not enable\n");
+		CAM_ERR(CAM_CDM, "CDM HW AHB dump not enable");
 	}
 
 	if (core_dbg & 0x10000) {
 		int i;
 
-		pr_err("CDM HW BL FIFO dump with loop count=%d\n", loop_cnt);
+		CAM_ERR(CAM_CDM, "CDM HW BL FIFO dump with loop count=%d",
+			loop_cnt);
 		for (i = 0 ; i < loop_cnt ; i++) {
 			cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_RB, i);
 			cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_RB,
 				&dump_reg);
-			pr_err("BL(%d) base addr =%x\n", i, dump_reg);
+			CAM_ERR(CAM_CDM, "BL(%d) base addr =%x", i, dump_reg);
 			cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_RB,
 				&dump_reg);
-			pr_err("BL(%d) len=%d tag=%d\n", i,
+			CAM_ERR(CAM_CDM, "BL(%d) len=%d tag=%d", i,
 				(dump_reg & 0xFFFFF), (dump_reg & 0xFF000000));
 		}
 	} else {
-		pr_err("CDM HW BL FIFO readback not enable\n");
+		CAM_ERR(CAM_CDM, "CDM HW BL FIFO readback not enable");
 	}
 
-	pr_err("CDM HW default dump\n");
+	CAM_ERR(CAM_CDM, "CDM HW default dump");
 	cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_CFG, &dump_reg);
-	pr_err("CDM HW core cfg=%x\n", dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW core cfg=%x", dump_reg);
 
 	cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS, &dump_reg);
-	pr_err("CDM HW irq status=%x\n", dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW irq status=%x", dump_reg);
 
 	cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_SET, &dump_reg);
-	pr_err("CDM HW irq set reg=%x\n", dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW irq set reg=%x", dump_reg);
 
 	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_BASE, &dump_reg);
-	pr_err("CDM HW current BL base=%x\n", dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW current BL base=%x", dump_reg);
 
 	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_LEN, &dump_reg);
-	pr_err("CDM HW current BL len=%d tag=%d\n", (dump_reg & 0xFFFFF),
-		(dump_reg & 0xFF000000));
+	CAM_ERR(CAM_CDM, "CDM HW current BL len=%d tag=%d",
+		(dump_reg & 0xFFFFF), (dump_reg & 0xFF000000));
 
 	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_USED_AHB_BASE, &dump_reg);
-	pr_err("CDM HW current AHB base=%x\n", dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW current AHB base=%x", dump_reg);
 
 	cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
-	pr_err("CDM HW current pending BL=%x\n", dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW current pending BL=%x", dump_reg);
 
 	/* Enable CDM back */
 	cam_hw_cdm_enable_core(cdm_hw, true);
@@ -268,43 +266,46 @@
 	do {
 		if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
 			&pending_bl)) {
-			pr_err("Failed to read CDM pending BL's\n");
+			CAM_ERR(CAM_CDM, "Failed to read CDM pending BL's");
 			rc = -EIO;
 			break;
 		}
 		available_bl_slots = CAM_CDM_HWFIFO_SIZE - pending_bl;
 		if (available_bl_slots < 0) {
-			pr_err("Invalid available slots %d:%d:%d\n",
+			CAM_ERR(CAM_CDM, "Invalid available slots %d:%d:%d",
 				available_bl_slots, CAM_CDM_HWFIFO_SIZE,
 				pending_bl);
 			break;
 		}
 		if (bl_count < (available_bl_slots - 1)) {
-			CDM_CDBG("BL slot available_cnt=%d requested=%d\n",
+			CAM_DBG(CAM_CDM,
+				"BL slot available_cnt=%d requested=%d",
 				(available_bl_slots - 1), bl_count);
 				rc = bl_count;
 				break;
 		} else if (0 == (available_bl_slots - 1)) {
 			rc = cam_hw_cdm_enable_bl_done_irq(cdm_hw, true);
 			if (rc) {
-				pr_err("Enable BL done irq failed\n");
+				CAM_ERR(CAM_CDM, "Enable BL done irq failed");
 				break;
 			}
 			time_left = wait_for_completion_timeout(
 				&core->bl_complete, msecs_to_jiffies(
 				CAM_CDM_BL_FIFO_WAIT_TIMEOUT));
 			if (time_left <= 0) {
-				pr_err("CDM HW BL Wait timed out failed\n");
+				CAM_ERR(CAM_CDM,
+					"CDM HW BL Wait timed out failed");
 				if (cam_hw_cdm_enable_bl_done_irq(cdm_hw,
 					false))
-					pr_err("Disable BL done irq failed\n");
+					CAM_ERR(CAM_CDM,
+						"Disable BL done irq failed");
 				rc = -EIO;
 				break;
 			}
 			if (cam_hw_cdm_enable_bl_done_irq(cdm_hw, false))
-				pr_err("Disable BL done irq failed\n");
+				CAM_ERR(CAM_CDM, "Disable BL done irq failed");
 			rc = 0;
-			CDM_CDBG("CDM HW is ready for data\n");
+			CAM_DBG(CAM_CDM, "CDM HW is ready for data");
 		} else {
 			rc = (bl_count - (available_bl_slots - 1));
 			break;
@@ -318,12 +319,12 @@
 	uint32_t len, uint32_t tag)
 {
 	if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_REG, src)) {
-		pr_err("Failed to write CDM base to BL base\n");
+		CAM_ERR(CAM_CDM, "Failed to write CDM base to BL base");
 		return true;
 	}
 	if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_REG,
 		((len & 0xFFFFF) | ((tag & 0xFF) << 20)))) {
-		pr_err("Failed to write CDM BL len\n");
+		CAM_ERR(CAM_CDM, "Failed to write CDM BL len");
 		return true;
 	}
 	return false;
@@ -332,7 +333,7 @@
 bool cam_hw_cdm_commit_bl_write(struct cam_hw_info *cdm_hw)
 {
 	if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_STORE_REG, 1)) {
-		pr_err("Failed to write CDM commit BL\n");
+		CAM_ERR(CAM_CDM, "Failed to write CDM commit BL");
 		return true;
 	}
 	return false;
@@ -347,11 +348,11 @@
 	int rc;
 
 	if (core->bl_tag > 63) {
-		pr_err("bl_tag invalid =%d\n", core->bl_tag);
+		CAM_ERR(CAM_CDM, "bl_tag invalid =%d", core->bl_tag);
 		rc = -EINVAL;
 		goto end;
 	}
-	CDM_CDBG("CDM write BL last cmd tag=%x total=%d cookie=%d\n",
+	CAM_DBG(CAM_CDM, "CDM write BL last cmd tag=%x total=%d cookie=%d",
 		core->bl_tag, req->data->cmd_arrary_count, req->data->cookie);
 	node = kzalloc(sizeof(struct cam_cdm_bl_cb_request_entry),
 			GFP_KERNEL);
@@ -372,7 +373,7 @@
 		((4 * core->ops->cdm_required_size_genirq()) - 1),
 		core->bl_tag);
 	if (rc) {
-		pr_err("CDM hw bl write failed for gen irq bltag=%d\n",
+		CAM_ERR(CAM_CDM, "CDM hw bl write failed for gen irq bltag=%d",
 			core->bl_tag);
 		list_del_init(&node->entry);
 		kfree(node);
@@ -381,7 +382,7 @@
 	}
 
 	if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
-		pr_err("Cannot commit the genirq BL with tag tag=%d\n",
+		CAM_ERR(CAM_CDM, "Cannot commit the genirq BL with tag tag=%d",
 			core->bl_tag);
 		list_del_init(&node->entry);
 		kfree(node);
@@ -403,7 +404,7 @@
 	int write_count = 0;
 
 	if (req->data->cmd_arrary_count > CAM_CDM_HWFIFO_SIZE) {
-		pr_info("requested BL more than max size, cnt=%d max=%d\n",
+		pr_info("requested BL more than max size, cnt=%d max=%d",
 			req->data->cmd_arrary_count, CAM_CDM_HWFIFO_SIZE);
 	}
 
@@ -414,7 +415,7 @@
 	mutex_lock(&client->lock);
 	rc = cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &pending_bl);
 	if (rc) {
-		pr_err("Cannot read the current BL depth\n");
+		CAM_ERR(CAM_CDM, "Cannot read the current BL depth");
 		mutex_unlock(&client->lock);
 		mutex_unlock(&cdm_hw->hw_mutex);
 		return rc;
@@ -426,14 +427,16 @@
 
 		if ((!cdm_cmd->cmd[i].len) &&
 			(cdm_cmd->cmd[i].len > 0x100000)) {
-			pr_err("cmd len(%d) is invalid cnt=%d total cnt=%d\n",
+			CAM_ERR(CAM_CDM,
+				"cmd len(%d) is invalid cnt=%d total cnt=%d",
 				cdm_cmd->cmd[i].len, i,
 				req->data->cmd_arrary_count);
 			rc = -EINVAL;
 			break;
 		}
 		if (atomic_read(&core->error)) {
-			pr_err_ratelimited("In error state cnt=%d total cnt=%d\n",
+			CAM_ERR_RATE_LIMIT(CAM_CDM,
+				"In error state cnt=%d total cnt=%d\n",
 				i, req->data->cmd_arrary_count);
 			rc = -EIO;
 			break;
@@ -442,7 +445,8 @@
 			write_count = cam_hw_cdm_wait_for_bl_fifo(cdm_hw,
 				(req->data->cmd_arrary_count - i));
 			if (write_count < 0) {
-				pr_err("wait for bl fifo failed %d:%d\n",
+				CAM_ERR(CAM_CDM,
+					"wait for bl fifo failed %d:%d",
 					i, req->data->cmd_arrary_count);
 				rc = -EIO;
 				break;
@@ -458,7 +462,8 @@
 				&len);
 		} else if (req->data->type == CAM_CDM_BL_CMD_TYPE_HW_IOVA) {
 			if (!cdm_cmd->cmd[i].bl_addr.hw_iova) {
-				pr_err("Hw bl hw_iova is invalid %d:%d\n",
+				CAM_ERR(CAM_CDM,
+					"Hw bl hw_iova is invalid %d:%d",
 					i, req->data->cmd_arrary_count);
 				rc = -EINVAL;
 				break;
@@ -468,7 +473,8 @@
 				(uint64_t)cdm_cmd->cmd[i].bl_addr.hw_iova;
 			len = cdm_cmd->cmd[i].len + cdm_cmd->cmd[i].offset;
 		} else {
-			pr_err("Only mem hdl/hw va type is supported %d\n",
+			CAM_ERR(CAM_CDM,
+				"Only mem hdl/hw va type is supported %d",
 				req->data->type);
 			rc = -EINVAL;
 			break;
@@ -476,7 +482,7 @@
 
 		if ((!rc) && (hw_vaddr_ptr) && (len) &&
 			(len >= cdm_cmd->cmd[i].offset)) {
-			CDM_CDBG("Got the HW VA\n");
+			CAM_DBG(CAM_CDM, "Got the HW VA");
 			if (core->bl_tag >=
 				(CAM_CDM_HWFIFO_SIZE - 1))
 				core->bl_tag = 0;
@@ -485,33 +491,36 @@
 					cdm_cmd->cmd[i].offset),
 				(cdm_cmd->cmd[i].len - 1), core->bl_tag);
 			if (rc) {
-				pr_err("Hw bl write failed %d:%d\n",
+				CAM_ERR(CAM_CDM, "Hw bl write failed %d:%d",
 					i, req->data->cmd_arrary_count);
 				rc = -EIO;
 				break;
 			}
 		} else {
-			pr_err("Sanity check failed for hdl=%x len=%zu:%d\n",
+			CAM_ERR(CAM_CDM,
+				"Sanity check failed for hdl=%x len=%zu:%d",
 				cdm_cmd->cmd[i].bl_addr.mem_handle, len,
 				cdm_cmd->cmd[i].offset);
-			pr_err("Sanity check failed for %d:%d\n",
+			CAM_ERR(CAM_CDM, "Sanity check failed for %d:%d",
 				i, req->data->cmd_arrary_count);
 			rc = -EINVAL;
 			break;
 		}
 
 		if (!rc) {
-			CDM_CDBG("write BL success for cnt=%d with tag=%d\n",
+			CAM_DBG(CAM_CDM,
+				"write BL success for cnt=%d with tag=%d",
 				i, core->bl_tag);
 
-			CDM_CDBG("Now commit the BL\n");
+			CAM_DBG(CAM_CDM, "Now commit the BL");
 			if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
-				pr_err("Cannot commit the BL %d tag=%d\n",
+				CAM_ERR(CAM_CDM,
+					"Cannot commit the BL %d tag=%d",
 					i, core->bl_tag);
 				rc = -EIO;
 				break;
 			}
-			CDM_CDBG("BL commit success BL %d tag=%d\n", i,
+			CAM_DBG(CAM_CDM, "BL commit success BL %d tag=%d", i,
 				core->bl_tag);
 			core->bl_tag++;
 			if ((req->data->flag == true) &&
@@ -541,12 +550,12 @@
 		cdm_hw = payload->hw;
 		core = (struct cam_cdm *)cdm_hw->core_info;
 
-		CDM_CDBG("IRQ status=%x\n", payload->irq_status);
+		CAM_DBG(CAM_CDM, "IRQ status=%x", payload->irq_status);
 		if (payload->irq_status &
 			CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
 			struct cam_cdm_bl_cb_request_entry *node;
 
-			CDM_CDBG("inline IRQ data=%x\n",
+			CAM_DBG(CAM_CDM, "inline IRQ data=%x",
 				payload->irq_data);
 			mutex_lock(&cdm_hw->hw_mutex);
 			node = cam_cdm_find_request_by_bl_tag(
@@ -560,13 +569,15 @@
 						(void *)node);
 				} else if (node->request_type ==
 						CAM_HW_CDM_BL_CB_INTERNAL) {
-					pr_err("Invalid node=%pK %d\n", node,
+					CAM_ERR(CAM_CDM,
+						"Invalid node=%pK %d", node,
 						node->request_type);
 				}
 				list_del_init(&node->entry);
 				kfree(node);
 			} else {
-				pr_err("Invalid node for inline irq status=%x data=%x\n",
+				CAM_ERR(CAM_CDM,
+					"Inval node, inline_irq st=%x data=%x",
 					payload->irq_status, payload->irq_data);
 			}
 			mutex_unlock(&cdm_hw->hw_mutex);
@@ -574,39 +585,40 @@
 
 		if (payload->irq_status &
 			CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK) {
-			CDM_CDBG("CDM HW reset done IRQ\n");
+			CAM_DBG(CAM_CDM, "CDM HW reset done IRQ");
 			complete(&core->reset_complete);
 		}
 		if (payload->irq_status &
 			CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK) {
 			if (atomic_read(&core->bl_done)) {
-				CDM_CDBG("CDM HW BL done IRQ\n");
+				CAM_DBG(CAM_CDM, "CDM HW BL done IRQ");
 				complete(&core->bl_complete);
 			}
 		}
 		if (payload->irq_status &
 			CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK) {
-			pr_err_ratelimited("Invalid command IRQ, Need HW reset\n");
+			CAM_ERR_RATE_LIMIT(CAM_CDM,
+				"Invalid command IRQ, Need HW reset\n");
 			atomic_inc(&core->error);
 			cam_hw_cdm_dump_core_debug_registers(cdm_hw);
 		}
 		if (payload->irq_status &
 			CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK) {
-			pr_err_ratelimited("AHB Error IRQ\n");
+			CAM_ERR_RATE_LIMIT(CAM_CDM, "AHB Error IRQ\n");
 			atomic_inc(&core->error);
 			cam_hw_cdm_dump_core_debug_registers(cdm_hw);
 			atomic_dec(&core->error);
 		}
 		if (payload->irq_status &
 			CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK) {
-			pr_err_ratelimited("Overflow Error IRQ\n");
+			CAM_ERR_RATE_LIMIT(CAM_CDM, "Overflow Error IRQ\n");
 			atomic_inc(&core->error);
 			cam_hw_cdm_dump_core_debug_registers(cdm_hw);
 			atomic_dec(&core->error);
 		}
 		kfree(payload);
 	} else {
-		pr_err("NULL payload\n");
+		CAM_ERR(CAM_CDM, "NULL payload");
 	}
 
 }
@@ -622,12 +634,13 @@
 		core = (struct cam_cdm *)cdm_hw->core_info;
 		atomic_inc(&core->error);
 		cam_hw_cdm_dump_core_debug_registers(cdm_hw);
-		pr_err_ratelimited("Page fault iova addr %pK\n", (void *)iova);
+		CAM_ERR_RATE_LIMIT(CAM_CDM, "Page fault iova addr %pK\n",
+			(void *)iova);
 		cam_cdm_notify_clients(cdm_hw, CAM_CDM_CB_STATUS_PAGEFAULT,
 			(void *)iova);
 		atomic_dec(&core->error);
 	} else {
-		pr_err("Invalid token\n");
+		CAM_ERR(CAM_CDM, "Invalid token");
 	}
 
 }
@@ -639,15 +652,15 @@
 	struct cam_cdm_work_payload *payload;
 	bool work_status;
 
-	CDM_CDBG("Got irq\n");
+	CAM_DBG(CAM_CDM, "Got irq");
 	payload = kzalloc(sizeof(struct cam_cdm_work_payload), GFP_ATOMIC);
 	if (payload) {
 		if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS,
 				&payload->irq_status)) {
-			pr_err("Failed to read CDM HW IRQ status\n");
+			CAM_ERR(CAM_CDM, "Failed to read CDM HW IRQ status");
 		}
 		if (!payload->irq_status) {
-			pr_err_ratelimited("Invalid irq received\n");
+			CAM_ERR_RATE_LIMIT(CAM_CDM, "Invalid irq received\n");
 			kfree(payload);
 			return IRQ_HANDLED;
 		}
@@ -655,21 +668,22 @@
 			CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
 			if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_USR_DATA,
 				&payload->irq_data)) {
-				pr_err("Failed to read CDM HW IRQ data\n");
+				CAM_ERR(CAM_CDM,
+					"Failed to read CDM HW IRQ data");
 			}
 		}
-		CDM_CDBG("Got payload=%d\n", payload->irq_status);
+		CAM_DBG(CAM_CDM, "Got payload=%d", payload->irq_status);
 		payload->hw = cdm_hw;
 		INIT_WORK((struct work_struct *)&payload->work,
 			cam_hw_cdm_work);
 		if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR,
 			payload->irq_status))
-			pr_err("Failed to Write CDM HW IRQ Clear\n");
+			CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ Clear");
 		if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR_CMD, 0x01))
-			pr_err("Failed to Write CDM HW IRQ cmd\n");
+			CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ cmd");
 		work_status = queue_work(cdm_core->work_queue, &payload->work);
 		if (work_status == false) {
-			pr_err("Failed to queue work for irq=%x\n",
+			CAM_ERR(CAM_CDM, "Failed to queue work for irq=%x",
 				payload->irq_status);
 			kfree(payload);
 		}
@@ -697,7 +711,7 @@
 	rc = cam_mem_mgr_request_mem(&genirq_alloc_cmd,
 		&genirq_alloc_out);
 	if (rc) {
-		pr_err("Failed to get genirq cmd space rc=%d\n", rc);
+		CAM_ERR(CAM_CDM, "Failed to get genirq cmd space rc=%d", rc);
 		goto end;
 	}
 	cdm_core->gen_irq.handle = genirq_alloc_out.mem_handle;
@@ -723,7 +737,7 @@
 	genirq_release_cmd.mem_handle = cdm_core->gen_irq.handle;
 	rc = cam_mem_mgr_release_mem(&genirq_release_cmd);
 	if (rc)
-		pr_err("Failed to put genirq cmd space for hw\n");
+		CAM_ERR(CAM_CDM, "Failed to put genirq cmd space for hw");
 
 	return rc;
 }
@@ -746,11 +760,11 @@
 	rc = cam_soc_util_enable_platform_resource(soc_info, true,
 		CAM_SVS_VOTE, true);
 	if (rc) {
-		pr_err("Enable platform failed\n");
+		CAM_ERR(CAM_CDM, "Enable platform failed");
 		goto end;
 	}
 
-	CDM_CDBG("Enable soc done\n");
+	CAM_DBG(CAM_CDM, "Enable soc done");
 
 /* Before triggering the reset to HW, clear the reset complete */
 	atomic_set(&cdm_core->error, 0);
@@ -759,15 +773,15 @@
 	reinit_completion(&cdm_core->bl_complete);
 
 	if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003)) {
-		pr_err("Failed to Write CDM HW IRQ mask\n");
+		CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ mask");
 		goto disable_return;
 	}
 	if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_RST_CMD, 0x9)) {
-		pr_err("Failed to Write CDM HW reset\n");
+		CAM_ERR(CAM_CDM, "Failed to Write CDM HW reset");
 		goto disable_return;
 	}
 
-	CDM_CDBG("Waiting for CDM HW resetdone\n");
+	CAM_DBG(CAM_CDM, "Waiting for CDM HW resetdone");
 	time_left = wait_for_completion_timeout(&cdm_core->reset_complete,
 		msecs_to_jiffies(CAM_CDM_HW_RESET_TIMEOUT));
 
@@ -778,15 +792,16 @@
 	 * as a workaround.
 	 */
 	if (time_left <= 0) {
-		pr_err("CDM HW reset Wait failed time_left=%ld\n", time_left);
+		CAM_ERR(CAM_CDM, "CDM HW reset Wait failed time_left=%ld",
+			time_left);
 		time_left = 1;
 	}
 
 	if (time_left <= 0) {
-		pr_err("CDM HW reset Wait failed rc=%d\n", rc);
+		CAM_ERR(CAM_CDM, "CDM HW reset Wait failed rc=%d", rc);
 		goto disable_return;
 	} else {
-		CDM_CDBG("CDM Init success\n");
+		CAM_DBG(CAM_CDM, "CDM Init success");
 		cdm_hw->hw_state = CAM_HW_STATE_POWER_UP;
 		cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003);
 		rc = 0;
@@ -815,9 +830,9 @@
 	cdm_core = cdm_hw->core_info;
 	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
 	if (rc) {
-		pr_err("disable platform failed\n");
+		CAM_ERR(CAM_CDM, "disable platform failed");
 	} else {
-		CDM_CDBG("CDM Deinit success\n");
+		CAM_DBG(CAM_CDM, "CDM Deinit success");
 		cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
 	}
 
@@ -862,7 +877,7 @@
 
 	rc = cam_hw_cdm_soc_get_dt_properties(cdm_hw, msm_cam_hw_cdm_dt_match);
 	if (rc) {
-		pr_err("Failed to get dt properties\n");
+		CAM_ERR(CAM_CDM, "Failed to get dt properties");
 		goto release_mem;
 	}
 	cdm_hw_intf->hw_idx = cdm_hw->soc_info.index;
@@ -877,7 +892,8 @@
 	cdm_core->bl_tag = 0;
 	cdm_core->id = cam_hw_cdm_get_id_by_name(cdm_core->name);
 	if (cdm_core->id >= CAM_CDM_MAX) {
-		pr_err("Failed to get CDM HW name for %s\n", cdm_core->name);
+		CAM_ERR(CAM_CDM, "Failed to get CDM HW name for %s",
+			cdm_core->name);
 		goto release_private_mem;
 	}
 	INIT_LIST_HEAD(&cdm_core->bl_request_list);
@@ -894,14 +910,14 @@
 	cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
 	mutex_lock(&cdm_hw->hw_mutex);
 
-	CDM_CDBG("type %d index %d\n", cdm_hw_intf->hw_type,
+	CAM_DBG(CAM_CDM, "type %d index %d", cdm_hw_intf->hw_type,
 		cdm_hw_intf->hw_idx);
 
 	platform_set_drvdata(pdev, cdm_hw_intf);
 
 	rc = cam_smmu_get_handle("cpas-cdm0", &cdm_core->iommu_hdl.non_secure);
 	if (rc < 0) {
-		pr_err("cpas-cdm get iommu handle failed\n");
+		CAM_ERR(CAM_CDM, "cpas-cdm get iommu handle failed");
 		goto unlock_release_mem;
 	}
 	cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
@@ -909,7 +925,7 @@
 
 	rc = cam_smmu_ops(cdm_core->iommu_hdl.non_secure, CAM_SMMU_ATTACH);
 	if (rc < 0) {
-		pr_err("Attach iommu non secure handle failed\n");
+		CAM_ERR(CAM_CDM, "Attach iommu non secure handle failed");
 		goto destroy_non_secure_hdl;
 	}
 	cdm_core->iommu_hdl.secure = -1;
@@ -921,7 +937,7 @@
 	rc = cam_soc_util_request_platform_resource(&cdm_hw->soc_info,
 			cam_hw_cdm_irq, cdm_hw);
 	if (rc) {
-		pr_err("Failed to request platform resource\n");
+		CAM_ERR(CAM_CDM, "Failed to request platform resource");
 		goto destroy_non_secure_hdl;
 	}
 
@@ -932,10 +948,10 @@
 	strlcpy(cpas_parms.identifier, "cpas-cdm", CAM_HW_IDENTIFIER_LENGTH);
 	rc = cam_cpas_register_client(&cpas_parms);
 	if (rc) {
-		pr_err("Virtual CDM CPAS registration failed\n");
+		CAM_ERR(CAM_CDM, "Virtual CDM CPAS registration failed");
 		goto release_platform_resource;
 	}
-	CDM_CDBG("CPAS registration successful handle=%d\n",
+	CAM_DBG(CAM_CDM, "CPAS registration successful handle=%d",
 		cpas_parms.client_handle);
 	cdm_core->cpas_handle = cpas_parms.client_handle;
 
@@ -945,54 +961,54 @@
 	axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
 	rc = cam_cpas_start(cdm_core->cpas_handle, &ahb_vote, &axi_vote);
 	if (rc) {
-		pr_err("CPAS start failed\n");
+		CAM_ERR(CAM_CDM, "CPAS start failed");
 		goto cpas_unregister;
 	}
 
 	rc = cam_hw_cdm_init(cdm_hw, NULL, 0);
 	if (rc) {
-		pr_err("Failed to Init CDM HW\n");
+		CAM_ERR(CAM_CDM, "Failed to Init CDM HW");
 		goto cpas_stop;
 	}
 	cdm_hw->open_count++;
 
 	if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_HW_VERSION,
 		&cdm_core->hw_version)) {
-		pr_err("Failed to read CDM HW Version\n");
+		CAM_ERR(CAM_CDM, "Failed to read CDM HW Version");
 		goto deinit;
 	}
 
 	if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_TITAN_VERSION,
 		&cdm_core->hw_family_version)) {
-		pr_err("Failed to read CDM family Version\n");
+		CAM_ERR(CAM_CDM, "Failed to read CDM family Version");
 		goto deinit;
 	}
 
-	CDM_CDBG("CDM Hw version read success family =%x hw =%x\n",
+	CAM_DBG(CAM_CDM, "CDM Hw version read success family =%x hw =%x",
 		cdm_core->hw_family_version, cdm_core->hw_version);
 	cdm_core->ops = cam_cdm_get_ops(cdm_core->hw_version, NULL,
 		false);
 	if (!cdm_core->ops) {
-		pr_err("Failed to util ops for hw\n");
+		CAM_ERR(CAM_CDM, "Failed to util ops for hw");
 		goto deinit;
 	}
 
 	if (!cam_cdm_set_cam_hw_version(cdm_core->hw_version,
 		&cdm_core->version)) {
-		pr_err("Failed to set cam he version for hw\n");
+		CAM_ERR(CAM_CDM, "Failed to set cam he version for hw");
 		goto deinit;
 	}
 
 	rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
 	if (rc) {
-		pr_err("Failed to Deinit CDM HW\n");
+		CAM_ERR(CAM_CDM, "Failed to Deinit CDM HW");
 		cdm_hw->open_count--;
 		goto cpas_stop;
 	}
 
 	rc = cam_cpas_stop(cdm_core->cpas_handle);
 	if (rc) {
-		pr_err("CPAS stop failed\n");
+		CAM_ERR(CAM_CDM, "CPAS stop failed");
 		cdm_hw->open_count--;
 		goto cpas_unregister;
 	}
@@ -1000,30 +1016,30 @@
 	rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
 		soc_private, CAM_HW_CDM, &cdm_core->index);
 	if (rc) {
-		pr_err("HW CDM Interface registration failed\n");
+		CAM_ERR(CAM_CDM, "HW CDM Interface registration failed");
 		cdm_hw->open_count--;
 		goto cpas_unregister;
 	}
 	cdm_hw->open_count--;
 	mutex_unlock(&cdm_hw->hw_mutex);
 
-	CDM_CDBG("CDM%d probe successful\n", cdm_hw_intf->hw_idx);
+	CAM_DBG(CAM_CDM, "CDM%d probe successful", cdm_hw_intf->hw_idx);
 
 	return rc;
 
 deinit:
 	if (cam_hw_cdm_deinit(cdm_hw, NULL, 0))
-		pr_err("Deinit failed for hw\n");
+		CAM_ERR(CAM_CDM, "Deinit failed for hw");
 	cdm_hw->open_count--;
 cpas_stop:
 	if (cam_cpas_stop(cdm_core->cpas_handle))
-		pr_err("CPAS stop failed\n");
+		CAM_ERR(CAM_CDM, "CPAS stop failed");
 cpas_unregister:
 	if (cam_cpas_unregister_client(cdm_core->cpas_handle))
-		pr_err("CPAS unregister failed\n");
+		CAM_ERR(CAM_CDM, "CPAS unregister failed");
 release_platform_resource:
 	if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
-		pr_err("Release platform resource failed\n");
+		CAM_ERR(CAM_CDM, "Release platform resource failed");
 
 	flush_workqueue(cdm_core->work_queue);
 	destroy_workqueue(cdm_core->work_queue);
@@ -1031,7 +1047,7 @@
 	cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
 		NULL, cdm_hw);
 	if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
-		pr_err("Release iommu secure hdl failed\n");
+		CAM_ERR(CAM_CDM, "Release iommu secure hdl failed");
 unlock_release_mem:
 	mutex_unlock(&cdm_hw->hw_mutex);
 release_private_mem:
@@ -1053,26 +1069,28 @@
 
 	cdm_hw_intf = platform_get_drvdata(pdev);
 	if (!cdm_hw_intf) {
-		pr_err("Failed to get dev private data\n");
+		CAM_ERR(CAM_CDM, "Failed to get dev private data");
 		return rc;
 	}
 
 	cdm_hw = cdm_hw_intf->hw_priv;
 	if (!cdm_hw) {
-		pr_err("Failed to get hw private data for type=%d idx=%d\n",
+		CAM_ERR(CAM_CDM,
+			"Failed to get hw private data for type=%d idx=%d",
 			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
 		return rc;
 	}
 
 	cdm_core = cdm_hw->core_info;
 	if (!cdm_core) {
-		pr_err("Failed to get hw core data for type=%d idx=%d\n",
+		CAM_ERR(CAM_CDM,
+			"Failed to get hw core data for type=%d idx=%d",
 			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
 		return rc;
 	}
 
 	if (cdm_hw->open_count != 0) {
-		pr_err("Hw open count invalid type=%d idx=%d cnt=%d\n",
+		CAM_ERR(CAM_CDM, "Hw open count invalid type=%d idx=%d cnt=%d",
 			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx,
 			cdm_hw->open_count);
 		return rc;
@@ -1080,24 +1098,24 @@
 
 	rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
 	if (rc) {
-		pr_err("Deinit failed for hw\n");
+		CAM_ERR(CAM_CDM, "Deinit failed for hw");
 		return rc;
 	}
 
 	rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
 	if (rc) {
-		pr_err("CPAS unregister failed\n");
+		CAM_ERR(CAM_CDM, "CPAS unregister failed");
 		return rc;
 	}
 
 	if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
-		pr_err("Release platform resource failed\n");
+		CAM_ERR(CAM_CDM, "Release platform resource failed");
 
 	flush_workqueue(cdm_core->work_queue);
 	destroy_workqueue(cdm_core->work_queue);
 
 	if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
-		pr_err("Release iommu secure hdl failed\n");
+		CAM_ERR(CAM_CDM, "Release iommu secure hdl failed");
 	cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
 		NULL, cdm_hw);
 
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
index 59f5b92..fa98be2 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "CAM-CDM-INTF %s:%d " fmt, __func__, __LINE__
-
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/of.h>
@@ -41,10 +39,10 @@
 
 	mutex_lock(&cam_cdm_mgr_lock);
 	if (cdm_mgr.probe_done == false) {
-		pr_err("CDM intf mgr not probed yet\n");
+		CAM_ERR(CAM_CDM, "CDM intf mgr not probed yet");
 		rc = -EPERM;
 	} else {
-		CDM_CDBG("CDM intf mgr get refcount=%d\n",
+		CAM_DBG(CAM_CDM, "CDM intf mgr get refcount=%d",
 			cdm_mgr.refcount);
 		cdm_mgr.refcount++;
 	}
@@ -56,14 +54,14 @@
 {
 	mutex_lock(&cam_cdm_mgr_lock);
 	if (cdm_mgr.probe_done == false) {
-		pr_err("CDM intf mgr not probed yet\n");
+		CAM_ERR(CAM_CDM, "CDM intf mgr not probed yet");
 	} else {
-		CDM_CDBG("CDM intf mgr put refcount=%d\n",
+		CAM_DBG(CAM_CDM, "CDM intf mgr put refcount=%d",
 			cdm_mgr.refcount);
 		if (cdm_mgr.refcount > 0) {
 			cdm_mgr.refcount--;
 		} else {
-			pr_err("Refcount put when zero\n");
+			CAM_ERR(CAM_CDM, "Refcount put when zero");
 			WARN_ON(1);
 		}
 	}
@@ -90,20 +88,20 @@
 	int rc = -EPERM, i, j;
 	char client_name[128];
 
-	CDM_CDBG("Looking for HW id of =%s and index=%d\n",
+	CAM_DBG(CAM_CDM, "Looking for HW id of =%s and index=%d",
 		identifier, cell_index);
 	snprintf(client_name, sizeof(client_name), "%s", identifier);
-	CDM_CDBG("Looking for HW id of %s count:%d\n", client_name,
+	CAM_DBG(CAM_CDM, "Looking for HW id of %s count:%d", client_name,
 		cdm_mgr.cdm_count);
 	mutex_lock(&cam_cdm_mgr_lock);
 	for (i = 0; i < cdm_mgr.cdm_count; i++) {
 		mutex_lock(&cdm_mgr.nodes[i].lock);
-		CDM_CDBG("dt_num_supported_clients=%d\n",
+		CAM_DBG(CAM_CDM, "dt_num_supported_clients=%d",
 			cdm_mgr.nodes[i].data->dt_num_supported_clients);
 
 		for (j = 0; j <
 			cdm_mgr.nodes[i].data->dt_num_supported_clients; j++) {
-			CDM_CDBG("client name:%s\n",
+			CAM_DBG(CAM_CDM, "client name:%s",
 				cdm_mgr.nodes[i].data->dt_cdm_client_name[j]);
 			if (!strcmp(
 				cdm_mgr.nodes[i].data->dt_cdm_client_name[j],
@@ -131,10 +129,10 @@
 		return -EINVAL;
 
 	if (get_cdm_mgr_refcount()) {
-		pr_err("CDM intf mgr get refcount failed\n");
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
 		return rc;
 	}
-	CDM_CDBG("Looking for Iommu handle of %s\n", identifier);
+	CAM_DBG(CAM_CDM, "Looking for Iommu handle of %s", identifier);
 
 	for (i = 0; i < cdm_mgr.cdm_count; i++) {
 		mutex_lock(&cdm_mgr.nodes[i].lock);
@@ -173,39 +171,41 @@
 		return -EINVAL;
 
 	if (get_cdm_mgr_refcount()) {
-		pr_err("CDM intf mgr get refcount failed\n");
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
 		return rc;
 	}
 
 	if (data->id > CAM_CDM_HW_ANY) {
-		pr_err("only CAM_CDM_VIRTUAL/CAM_CDM_HW_ANY is supported\n");
+		CAM_ERR(CAM_CDM,
+			"only CAM_CDM_VIRTUAL/CAM_CDM_HW_ANY is supported");
 		rc = -EPERM;
 		goto end;
 	}
 	rc = get_cdm_index_by_id(data->identifier, data->cell_index,
 		&hw_index);
 	if ((rc < 0) && (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM)) {
-		pr_err("Failed to identify associated hw id\n");
+		CAM_ERR(CAM_CDM, "Failed to identify associated hw id");
 		goto end;
 	} else {
-		CDM_CDBG("hw_index:%d\n", hw_index);
+		CAM_DBG(CAM_CDM, "hw_index:%d", hw_index);
 		hw = cdm_mgr.nodes[hw_index].device;
 		if (hw && hw->hw_ops.process_cmd) {
 			rc = hw->hw_ops.process_cmd(hw->hw_priv,
 					CAM_CDM_HW_INTF_CMD_ACQUIRE, data,
 					sizeof(struct cam_cdm_acquire_data));
 			if (rc < 0) {
-				pr_err("CDM hw acquire failed\n");
+				CAM_ERR(CAM_CDM, "CDM hw acquire failed");
 				goto end;
 			}
 		} else {
-			pr_err("idx %d doesn't have acquire ops\n", hw_index);
+			CAM_ERR(CAM_CDM, "idx %d doesn't have acquire ops",
+				hw_index);
 			rc = -EPERM;
 		}
 	}
 end:
 	if (rc < 0) {
-		pr_err("CDM acquire failed for id=%d name=%s, idx=%d\n",
+		CAM_ERR(CAM_CDM, "CDM acquire failed for id=%d name=%s, idx=%d",
 			data->id, data->identifier, data->cell_index);
 		put_cdm_mgr_refcount();
 	}
@@ -220,7 +220,7 @@
 	struct cam_hw_intf *hw;
 
 	if (get_cdm_mgr_refcount()) {
-		pr_err("CDM intf mgr get refcount failed\n");
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
 		return rc;
 	}
 
@@ -232,10 +232,11 @@
 					CAM_CDM_HW_INTF_CMD_RELEASE, &handle,
 					sizeof(handle));
 			if (rc < 0)
-				pr_err("hw release failed for handle=%x\n",
+				CAM_ERR(CAM_CDM,
+					"hw release failed for handle=%x",
 					handle);
 		} else
-			pr_err("hw idx %d doesn't have release ops\n",
+			CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops",
 				hw_index);
 	}
 	put_cdm_mgr_refcount();
@@ -257,7 +258,7 @@
 		return rc;
 
 	if (get_cdm_mgr_refcount()) {
-		pr_err("CDM intf mgr get refcount failed\n");
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
 		rc = -EPERM;
 		return rc;
 	}
@@ -274,10 +275,11 @@
 				CAM_CDM_HW_INTF_CMD_SUBMIT_BL, &req,
 				sizeof(struct cam_cdm_hw_intf_cmd_submit_bl));
 			if (rc < 0)
-				pr_err("hw submit bl failed for handle=%x\n",
+				CAM_ERR(CAM_CDM,
+					"hw submit bl failed for handle=%x",
 					handle);
 		} else {
-			pr_err("hw idx %d doesn't have submit ops\n",
+			CAM_ERR(CAM_CDM, "hw idx %d doesn't have submit ops",
 				hw_index);
 		}
 	}
@@ -294,7 +296,7 @@
 	struct cam_hw_intf *hw;
 
 	if (get_cdm_mgr_refcount()) {
-		pr_err("CDM intf mgr get refcount failed\n");
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
 		rc = -EPERM;
 		return rc;
 	}
@@ -306,10 +308,12 @@
 				rc = hw->hw_ops.start(hw->hw_priv, &handle,
 						sizeof(uint32_t));
 				if (rc < 0)
-					pr_err("hw start failed handle=%x\n",
+					CAM_ERR(CAM_CDM,
+						"hw start failed handle=%x",
 						handle);
 			} else {
-				pr_err("hw idx %d doesn't have start ops\n",
+				CAM_ERR(CAM_CDM,
+					"hw idx %d doesn't have start ops",
 					hw_index);
 			}
 	}
@@ -326,7 +330,7 @@
 	struct cam_hw_intf *hw;
 
 	if (get_cdm_mgr_refcount()) {
-		pr_err("CDM intf mgr get refcount failed\n");
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
 		rc = -EPERM;
 		return rc;
 	}
@@ -338,10 +342,10 @@
 			rc = hw->hw_ops.stop(hw->hw_priv, &handle,
 					sizeof(uint32_t));
 			if (rc < 0)
-				pr_err("hw stop failed handle=%x\n",
+				CAM_ERR(CAM_CDM, "hw stop failed handle=%x",
 					handle);
 		} else {
-			pr_err("hw idx %d doesn't have stop ops\n",
+			CAM_ERR(CAM_CDM, "hw idx %d doesn't have stop ops",
 				hw_index);
 		}
 	}
@@ -358,7 +362,7 @@
 	struct cam_hw_intf *hw;
 
 	if (get_cdm_mgr_refcount()) {
-		pr_err("CDM intf mgr get refcount failed\n");
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
 		rc = -EPERM;
 		return rc;
 	}
@@ -371,10 +375,11 @@
 					CAM_CDM_HW_INTF_CMD_RESET_HW, &handle,
 					sizeof(handle));
 			if (rc < 0)
-				pr_err("CDM hw release failed for handle=%x\n",
+				CAM_ERR(CAM_CDM,
+					"CDM hw release failed for handle=%x",
 					handle);
 		} else {
-			pr_err("hw idx %d doesn't have release ops\n",
+			CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops",
 				hw_index);
 		}
 	}
@@ -394,7 +399,7 @@
 		return rc;
 
 	if (get_cdm_mgr_refcount()) {
-		pr_err("CDM intf mgr get refcount failed\n");
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
 		return rc;
 	}
 
@@ -417,7 +422,7 @@
 		cdm_mgr.cdm_count++;
 		rc = 0;
 	} else {
-		pr_err("CDM registration failed type=%d count=%d\n",
+		CAM_ERR(CAM_CDM, "CDM registration failed type=%d count=%d",
 			type, cdm_mgr.cdm_count);
 	}
 	mutex_unlock(&cam_cdm_mgr_lock);
@@ -436,7 +441,7 @@
 		return rc;
 
 	if (get_cdm_mgr_refcount()) {
-		pr_err("CDM intf mgr get refcount failed\n");
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
 		rc = -EPERM;
 		return rc;
 	}
@@ -459,7 +464,7 @@
 		cdm_mgr.cdm_count--;
 		rc = 0;
 	} else {
-		pr_err("CDM Deregistration failed type=%d index=%d\n",
+		CAM_ERR(CAM_CDM, "CDM Deregistration failed type=%d index=%d",
 			type, index);
 	}
 	mutex_unlock(&cam_cdm_mgr_lock);
@@ -474,7 +479,7 @@
 
 	rc = cam_cdm_intf_mgr_soc_get_dt_properties(pdev, &cdm_mgr);
 	if (rc) {
-		pr_err("Failed to get dt properties\n");
+		CAM_ERR(CAM_CDM, "Failed to get dt properties");
 		return rc;
 	}
 	mutex_lock(&cam_cdm_mgr_lock);
@@ -494,7 +499,8 @@
 		for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
 			if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
 				(cdm_mgr.nodes[i].refcount != 0))
-				pr_err("Valid node present in index=%d\n", i);
+				CAM_ERR(CAM_CDM,
+					"Valid node present in index=%d", i);
 			mutex_destroy(&cdm_mgr.nodes[i].lock);
 			cdm_mgr.nodes[i].device = NULL;
 			cdm_mgr.nodes[i].data = NULL;
@@ -511,19 +517,19 @@
 	int i, rc = -EBUSY;
 
 	if (get_cdm_mgr_refcount()) {
-		pr_err("CDM intf mgr get refcount failed\n");
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
 		return rc;
 	}
 
 	if (cam_virtual_cdm_remove(pdev)) {
-		pr_err("Virtual CDM remove failed\n");
+		CAM_ERR(CAM_CDM, "Virtual CDM remove failed");
 		goto end;
 	}
 	put_cdm_mgr_refcount();
 
 	mutex_lock(&cam_cdm_mgr_lock);
 	if (cdm_mgr.refcount != 0) {
-		pr_err("cdm manger refcount not zero %d\n",
+		CAM_ERR(CAM_CDM, "cdm manger refcount not zero %d",
 			cdm_mgr.refcount);
 		goto end;
 	}
@@ -531,7 +537,7 @@
 	for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
 		if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
 			(cdm_mgr.nodes[i].refcount != 0)) {
-			pr_err("Valid node present in index=%d\n", i);
+			CAM_ERR(CAM_CDM, "Valid node present in index=%d", i);
 			mutex_unlock(&cam_cdm_mgr_lock);
 			goto end;
 		}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c
index fbf185c..f8b0d3d 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "CAM-CDM-SOC %s:%d " fmt, __func__, __LINE__
-
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/of.h>
@@ -40,27 +38,29 @@
 	resource_size_t mem_len =
 		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
 
-	CDM_CDBG("E: b=%pK blen=%d reg=%x off=%x\n", (void *)base,
+	CAM_DBG(CAM_CDM, "E: b=%pK blen=%d reg=%x off=%x", (void __iomem *)base,
 		(int)mem_len, reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl,
 		reg)));
-	CDM_CDBG("E: b=%pK reg=%x off=%x\n", (void *)base,
+	CAM_DBG(CAM_CDM, "E: b=%pK reg=%x off=%x", (void __iomem *)base,
 		reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)));
 
 	if ((reg > cdm->offset_tbl->offset_max_size) ||
 		(reg > cdm->offset_tbl->last_offset)) {
-		pr_err_ratelimited("Invalid reg=%d\n", reg);
+		CAM_ERR_RATE_LIMIT(CAM_CDM, "Invalid reg=%d\n", reg);
 		goto permission_error;
 	} else {
 		reg_addr = (base + (CAM_CDM_OFFSET_FROM_REG(
 				cdm->offset_tbl, reg)));
 		if (reg_addr > (base + mem_len)) {
-			pr_err_ratelimited("Invalid mapped region %d\n", reg);
+			CAM_ERR_RATE_LIMIT(CAM_CDM,
+				"Invalid mapped region %d", reg);
 			goto permission_error;
 		}
 		*value = cam_io_r_mb(reg_addr);
-		CDM_CDBG("X b=%pK reg=%x off=%x val=%x\n",
-			(void *)base, reg, (CAM_CDM_OFFSET_FROM_REG(
-				cdm->offset_tbl, reg)),	*value);
+		CAM_DBG(CAM_CDM, "X b=%pK reg=%x off=%x val=%x",
+			(void __iomem *)base, reg,
+			(CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)),
+			*value);
 		return false;
 	}
 permission_error:
@@ -79,18 +79,20 @@
 	resource_size_t mem_len =
 		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
 
-	CDM_CDBG("E: b=%pK reg=%x off=%x val=%x\n", (void *)base,
+	CAM_DBG(CAM_CDM, "E: b=%pK reg=%x off=%x val=%x", (void __iomem *)base,
 		reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)), value);
 
 	if ((reg > cdm->offset_tbl->offset_max_size) ||
 		(reg > cdm->offset_tbl->last_offset)) {
-		pr_err_ratelimited("CDM accessing invalid reg=%d\n", reg);
+		CAM_ERR_RATE_LIMIT(CAM_CDM, "CDM accessing invalid reg=%d\n",
+			reg);
 		goto permission_error;
 	} else {
 		reg_addr = (base + CAM_CDM_OFFSET_FROM_REG(
 				cdm->offset_tbl, reg));
 		if (reg_addr > (base + mem_len)) {
-			pr_err_ratelimited("Accessing invalid region %d:%d\n",
+			CAM_ERR_RATE_LIMIT(CAM_CDM,
+				"Accessing invalid region %d:%d\n",
 				reg, (CAM_CDM_OFFSET_FROM_REG(
 				cdm->offset_tbl, reg)));
 			goto permission_error;
@@ -111,17 +113,17 @@
 	ptr->dt_num_supported_clients = of_property_count_strings(
 						pdev->dev.of_node,
 						"cdm-client-names");
-	CDM_CDBG("Num supported cdm_client = %d\n",
+	CAM_DBG(CAM_CDM, "Num supported cdm_client = %d",
 		ptr->dt_num_supported_clients);
 	if (ptr->dt_num_supported_clients >
 		CAM_PER_CDM_MAX_REGISTERED_CLIENTS) {
-		pr_err("Invalid count of client names count=%d\n",
+		CAM_ERR(CAM_CDM, "Invalid count of client names count=%d",
 			ptr->dt_num_supported_clients);
 		rc = -EINVAL;
 		return rc;
 	}
 	if (ptr->dt_num_supported_clients < 0) {
-		CDM_CDBG("No cdm client names found\n");
+		CAM_DBG(CAM_CDM, "No cdm client names found");
 		ptr->dt_num_supported_clients = 0;
 		ptr->dt_cdm_shared = false;
 	} else {
@@ -130,10 +132,10 @@
 	for (i = 0; i < ptr->dt_num_supported_clients; i++) {
 		rc = of_property_read_string_index(pdev->dev.of_node,
 			"cdm-client-names", i, &(ptr->dt_cdm_client_name[i]));
-		CDM_CDBG("cdm-client-names[%d] = %s\n",	i,
+		CAM_DBG(CAM_CDM, "cdm-client-names[%d] = %s",	i,
 			ptr->dt_cdm_client_name[i]);
 		if (rc < 0) {
-			pr_err("Reading cdm-client-names failed\n");
+			CAM_ERR(CAM_CDM, "Reading cdm-client-names failed");
 			break;
 		}
 	}
@@ -156,7 +158,7 @@
 
 	rc = cam_soc_util_get_dt_properties(soc_ptr);
 	if (rc != 0) {
-		pr_err("Failed to retrieve the CDM dt properties\n");
+		CAM_ERR(CAM_CDM, "Failed to retrieve the CDM dt properties");
 	} else {
 		soc_ptr->soc_private = kzalloc(
 				sizeof(struct cam_cdm_private_dt_data),
@@ -167,15 +169,15 @@
 		rc = cam_cdm_soc_load_dt_private(soc_ptr->pdev,
 			soc_ptr->soc_private);
 		if (rc != 0) {
-			pr_err("Failed to load CDM dt private data\n");
+			CAM_ERR(CAM_CDM, "Failed to load CDM dt private data");
 			goto error;
 		}
 		id = of_match_node(table, soc_ptr->pdev->dev.of_node);
 		if ((!id) || !(id->data)) {
-			pr_err("Failed to retrieve the CDM id table\n");
+			CAM_ERR(CAM_CDM, "Failed to retrieve the CDM id table");
 			goto error;
 		}
-		CDM_CDBG("CDM Hw Id compatible =%s\n", id->compatible);
+		CAM_DBG(CAM_CDM, "CDM Hw Id compatible =%s", id->compatible);
 		((struct cam_cdm *)cdm_hw->core_info)->offset_tbl =
 			(struct cam_cdm_reg_offset_table *)id->data;
 		strlcpy(((struct cam_cdm *)cdm_hw->core_info)->name,
@@ -199,7 +201,8 @@
 
 	rc = of_property_read_u32(pdev->dev.of_node,
 		"num-hw-cdm", &mgr->dt_supported_hw_cdm);
-	CDM_CDBG("Number of HW cdm supported =%d\n", mgr->dt_supported_hw_cdm);
+	CAM_DBG(CAM_CDM, "Number of HW cdm supported =%d",
+		mgr->dt_supported_hw_cdm);
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
index a3de3d1..c8b830f 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "CAM-CDM-UTIL %s:%d " fmt, __func__, __LINE__
-
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
@@ -245,7 +243,7 @@
 		*dst++ = *src++;
 	}
 
-	return pCmdBuffer;
+	return dst;
 }
 
 uint32_t *cdm_write_dmi(uint32_t *pCmdBuffer, uint8_t dmiCmd,
@@ -327,7 +325,7 @@
 
 	for (i = 0; i < base_array_size; i++) {
 		if (base_table[i])
-			CDM_CDBG("In loop %d ioremap for %x addr=%x\n",
+			CAM_DBG(CAM_CDM, "In loop %d ioremap for %x addr=%x",
 			i, (base_table[i])->mem_cam_base, hw_base);
 		if ((base_table[i]) &&
 			((base_table[i])->mem_cam_base == hw_base)) {
@@ -349,7 +347,7 @@
 
 	if ((cmd_buf_size < cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) ||
 		(!base_addr)) {
-		pr_err(" invalid base addr and data length  %d %pK\n",
+		CAM_ERR(CAM_CDM, "invalid base addr and data length  %d %pK",
 			cmd_buf_size, base_addr);
 		return -EINVAL;
 	}
@@ -359,7 +357,7 @@
 		(((reg_cont->count * sizeof(uint32_t)) +
 			cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) >
 			cmd_buf_size)) {
-		pr_err(" buffer size %d is not sufficient for count%d\n",
+		CAM_ERR(CAM_CDM, "buffer size %d is not sufficient for count%d",
 			cmd_buf_size, reg_cont->count);
 		return -EINVAL;
 	}
@@ -381,7 +379,7 @@
 	uint32_t *data;
 
 	if (!base_addr) {
-		pr_err("invalid base address\n");
+		CAM_ERR(CAM_CDM, "invalid base address");
 		return -EINVAL;
 	}
 
@@ -390,15 +388,16 @@
 		(((reg_random->count * (sizeof(uint32_t) * 2)) +
 		cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)) >
 			cmd_buf_size)) {
-		pr_err("invalid reg_count  %d cmd_buf_size %d\n",
+		CAM_ERR(CAM_CDM, "invalid reg_count  %d cmd_buf_size %d",
 			reg_random->count, cmd_buf_size);
 		return -EINVAL;
 	}
 	data = cmd_buf + cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
 
 	for (i = 0; i < reg_random->count; i++) {
-		CDM_DUMP_CDBG("reg random: offset 0x%llx, value 0x%x\n",
-			((uint64_t) base_addr + data[0]), data[1]);
+		CAM_DBG(CAM_CDM, "reg random: offset %pK, value 0x%x",
+			((void __iomem *)(base_addr + data[0])),
+			data[1]);
 		cam_io_w(data[1], base_addr + data[0]);
 		data += 2;
 	}
@@ -420,7 +419,8 @@
 	swd_dmi = (struct cdm_dmi_cmd *)cmd_buf;
 
 	if (cmd_buf_size < (cdm_required_size_dmi() + swd_dmi->length + 1)) {
-		pr_err("invalid CDM_SWD_DMI length %d\n", swd_dmi->length + 1);
+		CAM_ERR(CAM_CDM, "invalid CDM_SWD_DMI length %d",
+			swd_dmi->length + 1);
 		return -EINVAL;
 	}
 	data = cmd_buf + cdm_required_size_dmi();
@@ -457,7 +457,7 @@
 	total_cmd_buf_size = cmd_buf_size;
 
 	while (cmd_buf_size > 0) {
-		CDM_CDBG("cmd data=%x\n", *cmd_buf);
+		CAM_DBG(CAM_CDM, "cmd data=%x", *cmd_buf);
 		cdm_cmd_type = (*cmd_buf >> CAM_CDM_COMMAND_OFFSET);
 		switch (cdm_cmd_type) {
 		case CAM_CDM_CMD_REG_CONT: {
@@ -488,7 +488,8 @@
 		case CAM_CDM_CMD_SWD_DMI_32:
 		case CAM_CDM_CMD_SWD_DMI_64: {
 			if (*current_device_base == 0) {
-				pr_err("Got SWI DMI cmd =%d for invalid hw\n",
+				CAM_ERR(CAM_CDM,
+					"Got SWI DMI cmd =%d for invalid hw",
 					cdm_cmd_type);
 				ret = -EINVAL;
 				break;
@@ -513,11 +514,12 @@
 				change_base_cmd->base, base_array_size,
 				base_table, current_device_base);
 			if (ret != 0) {
-				pr_err("Get ioremap change base failed %x\n",
+				CAM_ERR(CAM_CDM,
+					"Get ioremap change base failed %x",
 					change_base_cmd->base);
 				break;
 			}
-			CDM_CDBG("Got ioremap for %x addr=%pK\n",
+			CAM_DBG(CAM_CDM, "Got ioremap for %x addr=%pK",
 				change_base_cmd->base,
 				current_device_base);
 			cmd_buf_size -= (4 *
@@ -526,7 +528,7 @@
 			}
 			break;
 		default:
-			pr_err(" unsupported cdm_cmd_type type 0%x\n",
+			CAM_ERR(CAM_CDM, "unsupported cdm_cmd_type type 0%x",
 			cdm_cmd_type);
 			ret = -EINVAL;
 			break;
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
index bfe14e1..b230d4e 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "CAM-CDM-VIRTUAL %s:%d " fmt, __func__, __LINE__
-
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/of.h>
@@ -46,7 +44,7 @@
 		if (payload->irq_status & 0x2) {
 			struct cam_cdm_bl_cb_request_entry *node;
 
-			CDM_CDBG("CDM HW Gen/inline IRQ with data=%x\n",
+			CAM_DBG(CAM_CDM, "CDM HW Gen/inline IRQ with data=%x",
 				payload->irq_data);
 			mutex_lock(&cdm_hw->hw_mutex);
 			node = cam_cdm_find_request_by_bl_tag(
@@ -60,18 +58,18 @@
 						(void *)node);
 				} else if (node->request_type ==
 					CAM_HW_CDM_BL_CB_INTERNAL) {
-					pr_err("Invalid node=%pK %d\n", node,
-						node->request_type);
+					CAM_ERR(CAM_CDM, "Invalid node=%pK %d",
+						node, node->request_type);
 				}
 				list_del_init(&node->entry);
 				kfree(node);
 			} else {
-				pr_err("Invalid node for inline irq\n");
+				CAM_ERR(CAM_CDM, "Invalid node for inline irq");
 			}
 			mutex_unlock(&cdm_hw->hw_mutex);
 		}
 		if (payload->irq_status & 0x1) {
-			CDM_CDBG("CDM HW reset done IRQ\n");
+			CAM_DBG(CAM_CDM, "CDM HW reset done IRQ");
 			complete(&core->reset_complete);
 		}
 		kfree(payload);
@@ -94,7 +92,8 @@
 
 		if ((!cdm_cmd->cmd[i].len) &&
 			(cdm_cmd->cmd[i].len > 0x100000)) {
-			pr_err("len(%d) is invalid count=%d total cnt=%d\n",
+			CAM_ERR(CAM_CDM,
+				"len(%d) is invalid count=%d total cnt=%d",
 				cdm_cmd->cmd[i].len, i,
 				req->data->cmd_arrary_count);
 			rc = -EINVAL;
@@ -111,7 +110,8 @@
 				(uint64_t)cdm_cmd->cmd[i].bl_addr.kernel_iova;
 			len = cdm_cmd->cmd[i].offset + cdm_cmd->cmd[i].len;
 		} else {
-			pr_err("Only mem hdl/Kernel va type is supported %d\n",
+			CAM_ERR(CAM_CDM,
+				"Only mem hdl/Kernel va type is supported %d",
 				req->data->type);
 			rc = -EINVAL;
 			break;
@@ -119,7 +119,8 @@
 
 		if ((!rc) && (vaddr_ptr) && (len) &&
 			(len >= cdm_cmd->cmd[i].offset)) {
-			CDM_CDBG("hdl=%x vaddr=%pK offset=%d cmdlen=%d:%zu\n",
+			CAM_DBG(CAM_CDM,
+				"hdl=%x vaddr=%pK offset=%d cmdlen=%d:%zu",
 				cdm_cmd->cmd[i].bl_addr.mem_handle,
 				(void *)vaddr_ptr, cdm_cmd->cmd[i].offset,
 				cdm_cmd->cmd[i].len, len);
@@ -130,15 +131,17 @@
 				cdm_cmd->cmd[i].len, client->data.base_array,
 				client->data.base_array_cnt, core->bl_tag);
 			if (rc) {
-				pr_err("write failed for cnt=%d:%d\n",
+				CAM_ERR(CAM_CDM, "write failed for cnt=%d:%d",
 					i, req->data->cmd_arrary_count);
 				break;
 			}
 		} else {
-			pr_err("Sanity check failed for hdl=%x len=%zu:%d\n",
+			CAM_ERR(CAM_CDM,
+				"Sanity check failed for hdl=%x len=%zu:%d",
 				cdm_cmd->cmd[i].bl_addr.mem_handle, len,
 				cdm_cmd->cmd[i].offset);
-			pr_err("Sanity check failed for cmd_count=%d cnt=%d\n",
+			CAM_ERR(CAM_CDM,
+				"Sanity check failed for cmd_count=%d cnt=%d",
 				i, req->data->cmd_arrary_count);
 			rc = -EINVAL;
 			break;
@@ -146,7 +149,8 @@
 		if (!rc) {
 			struct cam_cdm_work_payload *payload;
 
-			CDM_CDBG("write BL success for cnt=%d with tag=%d\n",
+			CAM_DBG(CAM_CDM,
+				"write BL success for cnt=%d with tag=%d",
 				i, core->bl_tag);
 			if ((true == req->data->flag) &&
 				(i == req->data->cmd_arrary_count)) {
@@ -184,7 +188,8 @@
 					}
 			}
 			core->bl_tag++;
-			CDM_CDBG("Now commit the BL nothing for virtual\n");
+			CAM_DBG(CAM_CDM,
+				"Now commit the BL nothing for virtual");
 			if (!rc && (core->bl_tag == 63))
 				core->bl_tag = 0;
 		}
@@ -230,7 +235,7 @@
 
 	rc = cam_cdm_soc_load_dt_private(pdev, cdm_hw->soc_info.soc_private);
 	if (rc) {
-		pr_err("Failed to load CDM dt private data\n");
+		CAM_ERR(CAM_CDM, "Failed to load CDM dt private data");
 		kfree(cdm_hw->soc_info.soc_private);
 		cdm_hw->soc_info.soc_private = NULL;
 		goto soc_load_failed;
@@ -257,7 +262,7 @@
 	cdm_hw_intf->hw_ops.write = NULL;
 	cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
 
-	CDM_CDBG("type %d index %d\n", cdm_hw_intf->hw_type,
+	CAM_DBG(CAM_CDM, "type %d index %d", cdm_hw_intf->hw_type,
 		cdm_hw_intf->hw_idx);
 
 	platform_set_drvdata(pdev, cdm_hw_intf);
@@ -285,22 +290,23 @@
 		CAM_HW_IDENTIFIER_LENGTH);
 	rc = cam_cpas_register_client(&cpas_parms);
 	if (rc) {
-		pr_err("Virtual CDM CPAS registration failed\n");
+		CAM_ERR(CAM_CDM, "Virtual CDM CPAS registration failed");
 		goto cpas_registration_failed;
 	}
-	CDM_CDBG("CPAS registration successful handle=%d\n",
+	CAM_DBG(CAM_CDM, "CPAS registration successful handle=%d",
 		cpas_parms.client_handle);
 	cdm_core->cpas_handle = cpas_parms.client_handle;
 
-	CDM_CDBG("CDM%d probe successful\n", cdm_hw_intf->hw_idx);
+	CAM_DBG(CAM_CDM, "CDM%d probe successful", cdm_hw_intf->hw_idx);
 
 	rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
 			soc_private, CAM_VIRTUAL_CDM, &cdm_core->index);
 	if (rc) {
-		pr_err("Virtual CDM Interface registration failed\n");
+		CAM_ERR(CAM_CDM, "Virtual CDM Interface registration failed");
 		goto intf_registration_failed;
 	}
-	CDM_CDBG("CDM%d registered to intf successful\n", cdm_hw_intf->hw_idx);
+	CAM_DBG(CAM_CDM, "CDM%d registered to intf successful",
+		cdm_hw_intf->hw_idx);
 	mutex_unlock(&cdm_hw->hw_mutex);
 
 	return 0;
@@ -328,27 +334,29 @@
 
 	cdm_hw_intf = platform_get_drvdata(pdev);
 	if (!cdm_hw_intf) {
-		pr_err("Failed to get dev private data\n");
+		CAM_ERR(CAM_CDM, "Failed to get dev private data");
 		return rc;
 	}
 
 	cdm_hw = cdm_hw_intf->hw_priv;
 	if (!cdm_hw) {
-		pr_err("Failed to get virtual private data for type=%d idx=%d\n",
+		CAM_ERR(CAM_CDM,
+			"Failed to get virtual private data for type=%d idx=%d",
 			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
 		return rc;
 	}
 
 	cdm_core = cdm_hw->core_info;
 	if (!cdm_core) {
-		pr_err("Failed to get virtual core data for type=%d idx=%d\n",
+		CAM_ERR(CAM_CDM,
+			"Failed to get virtual core data for type=%d idx=%d",
 			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
 		return rc;
 	}
 
 	rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
 	if (rc) {
-		pr_err("CPAS unregister failed\n");
+		CAM_ERR(CAM_CDM, "CPAS unregister failed");
 		return rc;
 	}
 
@@ -356,7 +364,8 @@
 			cdm_hw->soc_info.soc_private, CAM_VIRTUAL_CDM,
 			cdm_core->index);
 	if (rc) {
-		pr_err("Virtual CDM Interface de-registration failed\n");
+		CAM_ERR(CAM_CDM,
+			"Virtual CDM Interface de-registration failed");
 		return rc;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
index fac8900..8f625ae 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -13,6 +13,7 @@
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 #include "cam_context.h"
+#include "cam_debug_util.h"
 
 static int cam_context_handle_hw_event(void *context, uint32_t evt_id,
 	void *evt_data)
@@ -21,7 +22,7 @@
 	struct cam_context *ctx = (struct cam_context *)context;
 
 	if (!ctx || !ctx->state_machine) {
-		pr_err("%s: Context is not ready.\n", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
@@ -29,8 +30,9 @@
 		rc = ctx->state_machine[ctx->state].irq_ops(ctx, evt_id,
 			evt_data);
 	else
-		pr_debug("%s: No function to handle event %d in dev %d, state %d\n",
-				__func__, evt_id, ctx->dev_hdl, ctx->state);
+		CAM_DBG(CAM_CORE,
+			"No function to handle event %d in dev %d, state %d",
+			evt_id, ctx->dev_hdl, ctx->state);
 	return rc;
 }
 
@@ -40,12 +42,12 @@
 	int rc;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: Context is not ready.\n'", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
 	if (!info) {
-		pr_err("%s: Invalid get device info payload.\n", __func__);
+		CAM_ERR(CAM_CORE, "Invalid get device info payload");
 		return -EINVAL;
 	}
 
@@ -54,8 +56,8 @@
 		rc = ctx->state_machine[ctx->state].crm_ops.get_dev_info(
 			ctx, info);
 	} else {
-		pr_err("%s: No get device info in dev %d, state %d\n",
-			__func__, ctx->dev_hdl, ctx->state);
+		CAM_ERR(CAM_CORE, "No get device info in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
 		rc = -EPROTO;
 	}
 	mutex_unlock(&ctx->ctx_mutex);
@@ -69,12 +71,12 @@
 	int rc;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: Context is not ready.\n", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
 	if (!link) {
-		pr_err("%s: Invalid link payload.\n", __func__);
+		CAM_ERR(CAM_CORE, "Invalid link payload");
 		return -EINVAL;
 	}
 
@@ -82,7 +84,7 @@
 	if (ctx->state_machine[ctx->state].crm_ops.link) {
 		rc = ctx->state_machine[ctx->state].crm_ops.link(ctx, link);
 	} else {
-		pr_err("%s: No crm link in dev %d, state %d\n", __func__,
+		CAM_ERR(CAM_CORE, "No crm link in dev %d, state %d",
 			ctx->dev_hdl, ctx->state);
 		rc = -EPROTO;
 	}
@@ -97,12 +99,12 @@
 	int rc;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: Context is not ready!\n", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
 	if (!unlink) {
-		pr_err("%s: Invalid unlink payload.\n", __func__);
+		CAM_ERR(CAM_CORE, "Invalid unlink payload");
 		return -EINVAL;
 	}
 
@@ -111,8 +113,8 @@
 		rc = ctx->state_machine[ctx->state].crm_ops.unlink(
 			ctx, unlink);
 	} else {
-		pr_err("%s: No crm unlink in dev %d, state %d\n",
-			__func__, ctx->dev_hdl, ctx->state);
+		CAM_ERR(CAM_CORE, "No crm unlink in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
 		rc = -EPROTO;
 	}
 	mutex_unlock(&ctx->ctx_mutex);
@@ -126,12 +128,12 @@
 	int rc;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: Context is not ready.\n'", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
 	if (!apply) {
-		pr_err("%s: Invalid apply request payload.\n'", __func__);
+		CAM_ERR(CAM_CORE, "Invalid apply request payload");
 		return -EINVAL;
 	}
 
@@ -140,8 +142,8 @@
 		rc = ctx->state_machine[ctx->state].crm_ops.apply_req(ctx,
 			apply);
 	} else {
-		pr_err("%s: No crm apply req in dev %d, state %d\n",
-			__func__, ctx->dev_hdl, ctx->state);
+		CAM_ERR(CAM_CORE, "No crm apply req in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
 		rc = -EPROTO;
 	}
 	mutex_unlock(&ctx->ctx_mutex);
@@ -155,7 +157,7 @@
 	int rc;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: Context is not ready\n", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
@@ -164,8 +166,8 @@
 		rc = ctx->state_machine[ctx->state].crm_ops.flush_req(ctx,
 			flush);
 	} else {
-		pr_err("%s: No crm flush req in dev %d, state %d\n",
-			__func__, ctx->dev_hdl, ctx->state);
+		CAM_ERR(CAM_CORE, "No crm flush req in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
 		rc = -EPROTO;
 	}
 	mutex_unlock(&ctx->ctx_mutex);
@@ -179,13 +181,12 @@
 	int rc;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: Context is not ready.\n", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
 	if (!cmd) {
-		pr_err("%s: Invalid acquire device command payload.\n",
-			__func__);
+		CAM_ERR(CAM_CORE, "Invalid acquire device command payload");
 		return -EINVAL;
 	}
 
@@ -194,8 +195,8 @@
 		rc = ctx->state_machine[ctx->state].ioctl_ops.acquire_dev(
 			ctx, cmd);
 	} else {
-		pr_err("%s: No acquire device in dev %d, state %d\n",
-			__func__, cmd->dev_handle, ctx->state);
+		CAM_ERR(CAM_CORE, "No acquire device in dev %d, state %d",
+			cmd->dev_handle, ctx->state);
 		rc = -EPROTO;
 	}
 	mutex_unlock(&ctx->ctx_mutex);
@@ -209,13 +210,12 @@
 	int rc;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: Context is not ready.\n", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
 	if (!cmd) {
-		pr_err("%s: Invalid release device command payload.\n",
-			__func__);
+		CAM_ERR(CAM_CORE, "Invalid release device command payload");
 		return -EINVAL;
 	}
 
@@ -224,8 +224,8 @@
 		rc = ctx->state_machine[ctx->state].ioctl_ops.release_dev(
 			ctx, cmd);
 	} else {
-		pr_err("%s: No release device in dev %d, state %d\n",
-			__func__, ctx->dev_hdl, ctx->state);
+		CAM_ERR(CAM_CORE, "No release device in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
 		rc = -EPROTO;
 	}
 	mutex_unlock(&ctx->ctx_mutex);
@@ -239,13 +239,12 @@
 	int rc;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: context is not ready\n'", __func__);
+		CAM_ERR(CAM_CORE, "context is not ready");
 		return -EINVAL;
 	}
 
 	if (!cmd) {
-		pr_err("%s: Invalid config device command payload.\n",
-			__func__);
+		CAM_ERR(CAM_CORE, "Invalid config device command payload");
 		return -EINVAL;
 	}
 
@@ -254,8 +253,8 @@
 		rc = ctx->state_machine[ctx->state].ioctl_ops.config_dev(
 			ctx, cmd);
 	} else {
-		pr_err("%s: No config device in dev %d, state %d\n",
-			__func__, ctx->dev_hdl, ctx->state);
+		CAM_ERR(CAM_CORE, "No config device in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
 		rc = -EPROTO;
 	}
 	mutex_unlock(&ctx->ctx_mutex);
@@ -269,13 +268,12 @@
 	int rc = 0;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: Context is not ready.\n", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
 	if (!cmd) {
-		pr_err("%s: Invalid start device command payload.\n",
-			__func__);
+		CAM_ERR(CAM_CORE, "Invalid start device command payload");
 		return -EINVAL;
 	}
 
@@ -285,8 +283,8 @@
 			ctx, cmd);
 	else
 		/* start device can be optional for some driver */
-		pr_debug("%s: No start device in dev %d, state %d\n",
-			__func__, ctx->dev_hdl, ctx->state);
+		CAM_DBG(CAM_CORE, "No start device in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
 
 	mutex_unlock(&ctx->ctx_mutex);
 
@@ -299,13 +297,12 @@
 	int rc = 0;
 
 	if (!ctx->state_machine) {
-		pr_err("%s: Context is not ready.\n'", __func__);
+		CAM_ERR(CAM_CORE, "Context is not ready");
 		return -EINVAL;
 	}
 
 	if (!cmd) {
-		pr_err("%s: Invalid stop device command payload.\n",
-			__func__);
+		CAM_ERR(CAM_CORE, "Invalid stop device command payload");
 		return -EINVAL;
 	}
 
@@ -315,8 +312,8 @@
 			ctx, cmd);
 	else
 		/* stop device can be optional for some driver */
-		pr_warn("%s: No stop device in dev %d, state %d\n",
-			__func__, ctx->dev_hdl, ctx->state);
+		CAM_WARN(CAM_CORE, "No stop device in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
 	mutex_unlock(&ctx->ctx_mutex);
 
 	return rc;
@@ -332,7 +329,7 @@
 
 	/* crm_node_intf is optinal */
 	if (!ctx || !hw_mgr_intf || !req_list) {
-		pr_err("%s: Invalid input parameters\n", __func__);
+		CAM_ERR(CAM_CORE, "Invalid input parameters");
 		return -EINVAL;
 	}
 
@@ -375,7 +372,7 @@
 	 * so we just free the memory for the context
 	 */
 	if (ctx->state != CAM_CTX_AVAILABLE)
-		pr_err("%s: Device did not shutdown cleanly.\n", __func__);
+		CAM_ERR(CAM_CORE, "Device did not shutdown cleanly");
 
 	memset(ctx, 0, sizeof(*ctx));
 
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
index e934dff..a430466 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "CTXT-UTILS %s:%d " fmt, __func__, __LINE__
-
 #include <linux/debugfs.h>
 #include <linux/videodev2.h>
 #include <linux/slab.h>
@@ -25,6 +23,7 @@
 #include "cam_req_mgr_util.h"
 #include "cam_sync_api.h"
 #include "cam_trace.h"
+#include "cam_debug_util.h"
 
 int cam_context_buf_done_from_hw(struct cam_context *ctx,
 	void *done_event_data, uint32_t bubble_state)
@@ -36,7 +35,7 @@
 		(struct cam_hw_done_event_data *)done_event_data;
 
 	if (list_empty(&ctx->active_req_list)) {
-		pr_err("Buf done with no active request\n");
+		CAM_ERR(CAM_CTXT, "no active request");
 		return -EIO;
 	}
 
@@ -46,13 +45,13 @@
 	trace_cam_buf_done("UTILS", ctx, req);
 
 	if (done->request_id != req->request_id) {
-		pr_err("mismatch: done request [%lld], active request [%lld]\n",
+		CAM_ERR(CAM_CTXT, "mismatch: done req[%lld], active req[%lld]",
 			done->request_id, req->request_id);
 		return -EIO;
 	}
 
 	if (!req->num_out_map_entries) {
-		pr_err("active request with no output fence objects to signal\n");
+		CAM_ERR(CAM_CTXT, "no output fence to signal");
 		return -EIO;
 	}
 
@@ -80,13 +79,13 @@
 	struct cam_hw_config_args cfg;
 
 	if (!ctx->hw_mgr_intf) {
-		pr_err("HW interface is not ready\n");
+		CAM_ERR(CAM_CTXT, "HW interface is not ready");
 		rc = -EFAULT;
 		goto end;
 	}
 
 	if (list_empty(&ctx->pending_req_list)) {
-		pr_err("No available request for Apply id %lld\n",
+		CAM_ERR(CAM_CTXT, "No available request for Apply id %lld",
 			apply->request_id);
 		rc = -EFAULT;
 		goto end;
@@ -103,7 +102,7 @@
 	cfg.num_hw_update_entries = req->num_hw_update_entries;
 	cfg.out_map_entries = req->out_map_entries;
 	cfg.num_out_map_entries = req->num_out_map_entries;
-	cfg.priv = (void *)&req->request_id;
+	cfg.priv = req->req_priv;
 	list_add_tail(&req->list, &ctx->active_req_list);
 
 	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
@@ -127,7 +126,7 @@
 	spin_unlock(&ctx->lock);
 
 	if (!req) {
-		pr_err("No more request obj free\n");
+		CAM_ERR(CAM_CTXT, "No more request obj free");
 		return;
 	}
 
@@ -146,7 +145,7 @@
 	struct cam_ctx_request *req;
 
 	if ((!ctx->hw_mgr_intf) || (!ctx->hw_mgr_intf->hw_release)) {
-		pr_err("HW interface is not ready\n");
+		CAM_ERR(CAM_CTXT, "HW interface is not ready");
 		return -EINVAL;
 	}
 
@@ -168,7 +167,7 @@
 		req = list_first_entry(&ctx->active_req_list,
 			struct cam_ctx_request, list);
 		list_del_init(&req->list);
-		pr_debug("signal fence in active list. fence num %d\n",
+		CAM_DBG(CAM_CTXT, "signal fence in active list, num %d",
 			req->num_out_map_entries);
 		for (i = 0; i < req->num_out_map_entries; i++) {
 			if (req->out_map_entries[i].sync_id > 0)
@@ -187,7 +186,7 @@
 				cam_sync_deregister_callback(
 					cam_context_sync_callback, ctx,
 					req->in_map_entries[i].sync_id);
-		pr_debug("signal out fence in pending list. fence num %d\n",
+		CAM_DBG(CAM_CTXT, "signal fence in pending list, num %d",
 			req->num_out_map_entries);
 		for (i = 0; i < req->num_out_map_entries; i++)
 			if (req->out_map_entries[i].sync_id > 0)
@@ -211,7 +210,7 @@
 	int32_t i = 0;
 
 	if (!ctx->hw_mgr_intf) {
-		pr_err("HW interface is not ready\n");
+		CAM_ERR(CAM_CTXT, "HW interface is not ready");
 		rc = -EFAULT;
 		goto end;
 	}
@@ -225,7 +224,7 @@
 	spin_unlock(&ctx->lock);
 
 	if (!req) {
-		pr_err("No more request obj free\n");
+		CAM_ERR(CAM_CTXT, "No more request obj free");
 		rc = -ENOMEM;
 		goto end;
 	}
@@ -239,20 +238,12 @@
 		(uint64_t *) &packet_addr,
 		&len);
 	if (rc != 0) {
-		pr_err("Can not get packet address\n");
+		CAM_ERR(CAM_CTXT, "Can not get packet address");
 		rc = -EINVAL;
 		goto free_req;
 	}
 
 	packet = (struct cam_packet *) (packet_addr + cmd->offset);
-	pr_debug("pack_handle %llx\n", cmd->packet_handle);
-	pr_debug("packet address is 0x%llx\n", packet_addr);
-	pr_debug("packet with length %zu, offset 0x%llx\n",
-		len, cmd->offset);
-	pr_debug("Packet request id 0x%llx\n",
-		packet->header.request_id);
-	pr_debug("Packet size 0x%x\n", packet->header.size);
-	pr_debug("packet op %d\n", packet->header.op_code);
 
 	/* preprocess the configuration */
 	memset(&cfg, 0, sizeof(cfg));
@@ -269,7 +260,7 @@
 	rc = ctx->hw_mgr_intf->hw_prepare_update(
 		ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
 	if (rc != 0) {
-		pr_err("Prepare config packet failed in HW layer\n");
+		CAM_ERR(CAM_CTXT, "Prepare config packet failed in HW layer");
 		rc = -EFAULT;
 		goto free_req;
 	}
@@ -289,7 +280,7 @@
 					cam_context_sync_callback,
 					(void *)ctx,
 					req->in_map_entries[i].sync_id);
-			pr_debug("register in fence callback: %d ret = %d\n",
+			CAM_DBG(CAM_CTXT, "register in fence cb: %d ret = %d",
 				req->in_map_entries[i].sync_id, rc);
 		}
 		goto end;
@@ -302,7 +293,6 @@
 	list_add_tail(&req->list, &ctx->free_req_list);
 	spin_unlock(&ctx->lock);
 end:
-	pr_debug("Config dev successful\n");
 	return rc;
 }
 
@@ -315,25 +305,24 @@
 	struct cam_hw_release_args release;
 
 	if (!ctx->hw_mgr_intf) {
-		pr_err("HW interface is not ready\n");
+		CAM_ERR(CAM_CTXT, "HW interface is not ready");
 		rc = -EFAULT;
 		goto end;
 	}
 
-	pr_debug("acquire cmd: session_hdl 0x%x, num_resources %d\n",
-		cmd->session_handle, cmd->num_resources);
-	pr_debug(" handle type %d, res %lld\n", cmd->handle_type,
+	CAM_DBG(CAM_CTXT, "ses hdl: %x, num_res: %d, type: %d, res: %lld",
+		cmd->session_handle, cmd->num_resources, cmd->handle_type,
 		cmd->resource_hdl);
 
 	if (cmd->num_resources > CAM_CTX_RES_MAX) {
-		pr_err("Too much resources in the acquire\n");
+		CAM_ERR(CAM_CTXT, "resource limit exceeded");
 		rc = -ENOMEM;
 		goto end;
 	}
 
 	/* for now we only support user pointer */
 	if (cmd->handle_type != 1)  {
-		pr_err("Only user pointer is supported");
+		CAM_ERR(CAM_CTXT, "Only user pointer is supported");
 		rc = -EINVAL;
 		goto end;
 	}
@@ -344,15 +333,11 @@
 	param.num_acq = cmd->num_resources;
 	param.acquire_info = cmd->resource_hdl;
 
-	pr_debug("ctx %pK: acquire hw resource: hw_intf: 0x%pK, priv 0x%pK",
-		ctx, ctx->hw_mgr_intf, ctx->hw_mgr_intf->hw_mgr_priv);
-	pr_debug("acquire_hw_func 0x%pK\n", ctx->hw_mgr_intf->hw_acquire);
-
 	/* call HW manager to reserve the resource */
 	rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
 		&param);
 	if (rc != 0) {
-		pr_err("Acquire device failed\n");
+		CAM_ERR(CAM_CTXT, "Acquire device failed");
 		goto end;
 	}
 
@@ -365,11 +350,10 @@
 	req_hdl_param.media_entity_flag = 0;
 	req_hdl_param.priv = ctx;
 
-	pr_debug("get device handle from bridge\n");
 	ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
 	if (ctx->dev_hdl <= 0) {
 		rc = -EFAULT;
-		pr_err("Can not create device handle\n");
+		CAM_ERR(CAM_CTXT, "Can not create device handle");
 		goto free_hw;
 	}
 	cmd->dev_handle = ctx->dev_hdl;
@@ -377,7 +361,6 @@
 	/* store session information */
 	ctx->session_hdl = cmd->session_handle;
 
-	pr_err("dev_handle = %x\n", cmd->dev_handle);
 	return rc;
 
 free_hw:
@@ -395,14 +378,14 @@
 	struct cam_hw_start_args arg;
 
 	if (!ctx->hw_mgr_intf) {
-		pr_err("HW interface is not ready\n");
+		CAM_ERR(CAM_CTXT, "HW interface is not ready");
 		rc = -EFAULT;
 		goto end;
 	}
 
 	if ((cmd->session_handle != ctx->session_hdl) ||
 		(cmd->dev_handle != ctx->dev_hdl)) {
-		pr_err("Invalid session hdl[%d], dev_handle[%d]\n",
+		CAM_ERR(CAM_CTXT, "Invalid session hdl[%d], dev_handle[%d]",
 			cmd->session_handle, cmd->dev_handle);
 		rc = -EPERM;
 		goto end;
@@ -413,12 +396,11 @@
 				&arg);
 		if (rc) {
 			/* HW failure. user need to clean up the resource */
-			pr_err("Start HW failed\n");
+			CAM_ERR(CAM_CTXT, "Start HW failed");
 			goto end;
 		}
 	}
 
-	pr_debug("start device success\n");
 end:
 	return rc;
 }
@@ -431,7 +413,7 @@
 	struct cam_ctx_request *req;
 
 	if (!ctx->hw_mgr_intf) {
-		pr_err("HW interface is not ready\n");
+		CAM_ERR(CAM_CTXT, "HW interface is not ready");
 		rc = -EFAULT;
 		goto end;
 	}
@@ -449,7 +431,7 @@
 		req = list_first_entry(&ctx->pending_req_list,
 				struct cam_ctx_request, list);
 		list_del_init(&req->list);
-		pr_debug("signal fence in pending list. fence num %d\n",
+		CAM_DBG(CAM_CTXT, "signal fence in pending list. fence num %d",
 			req->num_out_map_entries);
 		for (i = 0; i < req->num_out_map_entries; i++)
 			if (req->out_map_entries[i].sync_id != -1)
@@ -462,7 +444,7 @@
 		req = list_first_entry(&ctx->active_req_list,
 				struct cam_ctx_request, list);
 		list_del_init(&req->list);
-		pr_debug("signal fence in active list. fence num %d\n",
+		CAM_DBG(CAM_CTXT, "signal fence in active list. fence num %d",
 			req->num_out_map_entries);
 		for (i = 0; i < req->num_out_map_entries; i++)
 			if (req->out_map_entries[i].sync_id != -1)
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index fa26ea0..043f44d 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -16,7 +16,7 @@
 
 #include "cam_node.h"
 #include "cam_trace.h"
-
+#include "cam_debug_util.h"
 static void  __cam_node_handle_shutdown(struct cam_node *node)
 {
 	if (node->hw_mgr_intf.hw_close)
@@ -30,7 +30,7 @@
 	int rc = -EFAULT;
 
 	if (!query) {
-		pr_err("%s: Invalid params\n", __func__);
+		CAM_ERR(CAM_CORE, "Invalid params");
 		return -EINVAL;
 	}
 
@@ -65,7 +65,7 @@
 
 	rc = cam_context_handle_acquire_dev(ctx, acquire);
 	if (rc) {
-		pr_err("%s: Acquire device failed\n", __func__);
+		CAM_ERR(CAM_CORE, "Acquire device failed");
 		goto free_ctx;
 	}
 
@@ -87,19 +87,19 @@
 		return -EINVAL;
 
 	if (start->dev_handle <= 0) {
-		pr_err("Invalid device handle for context\n");
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
 		return -EINVAL;
 	}
 
 	if (start->session_handle <= 0) {
-		pr_err("Invalid session handle for context\n");
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
 		return -EINVAL;
 	}
 
 	ctx = (struct cam_context *)cam_get_device_priv(start->dev_handle);
 	if (!ctx) {
-		pr_err("%s: Can not get context for handle %d\n",
-			__func__, start->dev_handle);
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			start->dev_handle);
 		return -EINVAL;
 	}
 
@@ -115,19 +115,19 @@
 		return -EINVAL;
 
 	if (stop->dev_handle <= 0) {
-		pr_err("Invalid device handle for context\n");
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
 		return -EINVAL;
 	}
 
 	if (stop->session_handle <= 0) {
-		pr_err("Invalid session handle for context\n");
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
 		return -EINVAL;
 	}
 
 	ctx = (struct cam_context *)cam_get_device_priv(stop->dev_handle);
 	if (!ctx) {
-		pr_err("%s: Can not get context for handle %d\n",
-			__func__, stop->dev_handle);
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			stop->dev_handle);
 		return -EINVAL;
 	}
 
@@ -143,19 +143,19 @@
 		return -EINVAL;
 
 	if (config->dev_handle <= 0) {
-		pr_err("Invalid device handle for context\n");
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
 		return -EINVAL;
 	}
 
 	if (config->session_handle <= 0) {
-		pr_err("Invalid session handle for context\n");
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
 		return -EINVAL;
 	}
 
 	ctx = (struct cam_context *)cam_get_device_priv(config->dev_handle);
 	if (!ctx) {
-		pr_err("%s: Can not get context for handle %d\n",
-			__func__, config->dev_handle);
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			config->dev_handle);
 		return -EINVAL;
 	}
 
@@ -172,29 +172,29 @@
 		return -EINVAL;
 
 	if (release->dev_handle <= 0) {
-		pr_err("Invalid device handle for context\n");
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
 		return -EINVAL;
 	}
 
 	if (release->session_handle <= 0) {
-		pr_err("Invalid session handle for context\n");
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
 		return -EINVAL;
 	}
 
 	ctx = (struct cam_context *)cam_get_device_priv(release->dev_handle);
 	if (!ctx) {
-		pr_err("%s: Can not get context for handle %d\n",
-			__func__, release->dev_handle);
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			release->dev_handle);
 		return -EINVAL;
 	}
 
 	rc = cam_context_handle_release_dev(ctx, release);
 	if (rc)
-		pr_err("%s: context release failed\n", __func__);
+		CAM_ERR(CAM_CORE, "context release failed");
 
 	rc = cam_destroy_device_hdl(release->dev_handle);
 	if (rc)
-		pr_err("%s: destroy device handle is failed\n", __func__);
+		CAM_ERR(CAM_CORE, "destroy device handle is failed");
 
 	mutex_lock(&node->list_mutex);
 	list_add_tail(&ctx->list, &node->free_ctx_list);
@@ -211,8 +211,8 @@
 
 	ctx = (struct cam_context *) cam_get_device_priv(info->dev_hdl);
 	if (!ctx) {
-		pr_err("%s: Can not get context  for handle %d\n",
-			__func__, info->dev_hdl);
+		CAM_ERR(CAM_CORE, "Can not get context  for handle %d",
+			info->dev_hdl);
 		return -EINVAL;
 	}
 	return cam_context_handle_crm_get_dev_info(ctx, info);
@@ -229,8 +229,8 @@
 
 	ctx = (struct cam_context *) cam_get_device_priv(setup->dev_hdl);
 	if (!ctx) {
-		pr_err("%s: Can not get context for handle %d\n",
-			__func__, setup->dev_hdl);
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			setup->dev_hdl);
 		return -EINVAL;
 	}
 
@@ -251,8 +251,8 @@
 
 	ctx = (struct cam_context *) cam_get_device_priv(apply->dev_hdl);
 	if (!ctx) {
-		pr_err("%s: Can not get context for handle %d\n",
-			__func__, apply->dev_hdl);
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			apply->dev_hdl);
 		return -EINVAL;
 	}
 
@@ -266,14 +266,14 @@
 	struct cam_context *ctx = NULL;
 
 	if (!flush) {
-		pr_err("%s: Invalid flush request payload\n", __func__);
+		CAM_ERR(CAM_CORE, "Invalid flush request payload");
 		return -EINVAL;
 	}
 
 	ctx = (struct cam_context *) cam_get_device_priv(flush->dev_hdl);
 	if (!ctx) {
-		pr_err("%s: Can not get context for handle %d\n",
-			__func__, flush->dev_hdl);
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			flush->dev_hdl);
 		return -EINVAL;
 	}
 
@@ -285,7 +285,7 @@
 	if (node)
 		memset(node, 0, sizeof(*node));
 
-	pr_debug("%s: deinit complete!\n", __func__);
+	CAM_DBG(CAM_CORE, "deinit complete");
 
 	return 0;
 }
@@ -317,8 +317,8 @@
 	node->ctx_size = ctx_size;
 	for (i = 0; i < ctx_size; i++) {
 		if (!ctx_list[i].state_machine) {
-			pr_err("%s: camera context %d is not initialized!",
-				__func__, i);
+			CAM_ERR(CAM_CORE,
+				"camera context %d is not initialized", i);
 			rc = -1;
 			goto err;
 		}
@@ -328,7 +328,7 @@
 
 	node->state = CAM_NODE_STATE_INIT;
 err:
-	pr_debug("%s: Exit. (rc = %d)\n", __func__, rc);
+	CAM_DBG(CAM_CORE, "Exit. (rc = %d)", rc);
 	return rc;
 }
 
@@ -339,7 +339,7 @@
 	if (!cmd)
 		return -EINVAL;
 
-	pr_debug("%s: handle cmd %d\n", __func__, cmd->op_code);
+	CAM_DBG(CAM_CORE, "handle cmd %d", cmd->op_code);
 
 	switch (cmd->op_code) {
 	case CAM_QUERY_CAP: {
@@ -353,8 +353,8 @@
 
 		rc = __cam_node_handle_query_cap(node, &query);
 		if (rc) {
-			pr_err("%s: querycap is failed(rc = %d)\n",
-				__func__,  rc);
+			CAM_ERR(CAM_CORE, "querycap is failed(rc = %d)",
+				rc);
 			break;
 		}
 
@@ -374,8 +374,8 @@
 		}
 		rc = __cam_node_handle_acquire_dev(node, &acquire);
 		if (rc) {
-			pr_err("%s: acquire device failed(rc = %d)\n",
-				__func__, rc);
+			CAM_ERR(CAM_CORE, "acquire device failed(rc = %d)",
+				rc);
 			break;
 		}
 		if (copy_to_user((void __user *)cmd->handle, &acquire,
@@ -392,8 +392,8 @@
 		else {
 			rc = __cam_node_handle_start_dev(node, &start);
 			if (rc)
-				pr_err("%s: start device failed(rc = %d)\n",
-					__func__, rc);
+				CAM_ERR(CAM_CORE,
+					"start device failed(rc = %d)", rc);
 		}
 		break;
 	}
@@ -406,8 +406,8 @@
 		else {
 			rc = __cam_node_handle_stop_dev(node, &stop);
 			if (rc)
-				pr_err("%s: stop device failed(rc = %d)\n",
-					__func__, rc);
+				CAM_ERR(CAM_CORE,
+					"stop device failed(rc = %d)", rc);
 		}
 		break;
 	}
@@ -420,8 +420,8 @@
 		else {
 			rc = __cam_node_handle_config_dev(node, &config);
 			if (rc)
-				pr_err("%s: config device failed(rc = %d)\n",
-					__func__, rc);
+				CAM_ERR(CAM_CORE,
+					"config device failed(rc = %d)", rc);
 		}
 		break;
 	}
@@ -434,8 +434,8 @@
 		else {
 			rc = __cam_node_handle_release_dev(node, &release);
 			if (rc)
-				pr_err("%s: release device failed(rc = %d)\n",
-					__func__, rc);
+				CAM_ERR(CAM_CORE,
+					"release device failed(rc = %d)", rc);
 		}
 		break;
 	}
@@ -443,7 +443,7 @@
 		__cam_node_handle_shutdown(node);
 		break;
 	default:
-		pr_err("Unknown op code %d\n", cmd->op_code);
+		CAM_ERR(CAM_CORE, "Unknown op code %d", cmd->op_code);
 		rc = -EINVAL;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_subdev.c b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
index 8664ce8..d690508 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
@@ -12,6 +12,7 @@
 
 #include "cam_subdev.h"
 #include "cam_node.h"
+#include "cam_debug_util.h"
 
 /**
  * cam_subdev_subscribe_event()
@@ -63,7 +64,7 @@
 			(struct cam_control *) arg);
 		break;
 	default:
-		pr_err("Invalid command %d for %s!\n", cmd,
+		CAM_ERR(CAM_CORE, "Invalid command %d for %s", cmd,
 			node->name);
 		rc = -EINVAL;
 	}
@@ -80,7 +81,7 @@
 
 	if (copy_from_user(&cmd_data, (void __user *)arg,
 		sizeof(cmd_data))) {
-		pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+		CAM_ERR(CAM_CORE, "Failed to copy from user_ptr=%pK size=%zu",
 			(void __user *)arg, sizeof(cmd_data));
 		return -EFAULT;
 	}
@@ -88,7 +89,8 @@
 	if (!rc) {
 		if (copy_to_user((void __user *)arg, &cmd_data,
 			sizeof(cmd_data))) {
-			pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+			CAM_ERR(CAM_CORE,
+				"Failed to copy to user_ptr=%pK size=%zu",
 				(void __user *)arg, sizeof(cmd_data));
 			rc = -EFAULT;
 		}
@@ -147,8 +149,8 @@
 
 	rc = cam_register_subdev(sd);
 	if (rc) {
-		pr_err("%s: cam_register_subdev() failed for dev: %s!\n",
-			__func__, sd->name);
+		CAM_ERR(CAM_CORE, "cam_register_subdev() failed for dev: %s",
+			sd->name);
 		goto err;
 	}
 	platform_set_drvdata(sd->pdev, sd);
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index 813f392..82035e9 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -21,22 +21,6 @@
 #include "cam_cpas_hw_intf.h"
 #include "cam_cpas_soc.h"
 
-int cam_cpas_util_get_string_index(const char **strings,
-	uint32_t num_strings, char *matching_string, uint32_t *index)
-{
-	int i;
-
-	for (i = 0; i < num_strings; i++) {
-		if (strnstr(strings[i], matching_string, strlen(strings[i]))) {
-			CPAS_CDBG("matched %s : %d\n", matching_string, i);
-			*index = i;
-			return 0;
-		}
-	}
-
-	return -EINVAL;
-}
-
 int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
 	enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info)
 {
@@ -62,7 +46,7 @@
 		value = reg_info->value;
 	}
 
-	CPAS_CDBG("Base[%d] Offset[0x%8x] Value[0x%8x]\n",
+	CAM_DBG(CAM_CPAS, "Base[%d] Offset[0x%8x] Value[0x%8x]",
 		reg_base, reg_info->offset, value);
 
 	cam_io_w_mb(value, soc_info->reg_map[reg_base_index].mem_base +
@@ -75,13 +59,13 @@
 	struct cam_cpas_bus_client *bus_client, unsigned int level)
 {
 	if (!bus_client->valid || (bus_client->dyn_vote == true)) {
-		pr_err("Invalid params %d %d\n", bus_client->valid,
+		CAM_ERR(CAM_CPAS, "Invalid params %d %d", bus_client->valid,
 			bus_client->dyn_vote);
 		return -EINVAL;
 	}
 
 	if (level >= bus_client->num_usecases) {
-		pr_err("Invalid vote level=%d, usecases=%d\n", level,
+		CAM_ERR(CAM_CPAS, "Invalid vote level=%d, usecases=%d", level,
 			bus_client->num_usecases);
 		return -EINVAL;
 	}
@@ -89,7 +73,8 @@
 	if (level == bus_client->curr_vote_level)
 		return 0;
 
-	CPAS_CDBG("Bus client[%d] index[%d]\n", bus_client->client_id, level);
+	CAM_DBG(CAM_CPAS, "Bus client[%d] index[%d]", bus_client->client_id,
+		level);
 	msm_bus_scale_client_update_request(bus_client->client_id, level);
 	bus_client->curr_vote_level = level;
 
@@ -104,14 +89,14 @@
 	int idx = 0;
 
 	if (!bus_client->valid) {
-		pr_err("bus client not valid\n");
+		CAM_ERR(CAM_CPAS, "bus client not valid");
 		return -EINVAL;
 	}
 
 	if ((bus_client->num_usecases != 2) ||
 		(bus_client->num_paths != 1) ||
 		(bus_client->dyn_vote != true)) {
-		pr_err("dynamic update not allowed %d %d %d\n",
+		CAM_ERR(CAM_CPAS, "dynamic update not allowed %d %d %d",
 			bus_client->num_usecases, bus_client->num_paths,
 			bus_client->dyn_vote);
 		return -EINVAL;
@@ -120,7 +105,7 @@
 	mutex_lock(&bus_client->lock);
 
 	if (bus_client->curr_vote_level > 1) {
-		pr_err("curr_vote_level %d cannot be greater than 1\n",
+		CAM_ERR(CAM_CPAS, "curr_vote_level %d cannot be greater than 1",
 			bus_client->curr_vote_level);
 		mutex_unlock(&bus_client->lock);
 		return -EINVAL;
@@ -136,7 +121,7 @@
 	path->vectors[0].ab = ab;
 	path->vectors[0].ib = ib;
 
-	CPAS_CDBG("Bus client[%d] :ab[%llu] ib[%llu], index[%d]\n",
+	CAM_DBG(CAM_CPAS, "Bus client[%d] :ab[%llu] ib[%llu], index[%d]",
 		bus_client->client_id, ab, ib, idx);
 	msm_bus_scale_client_update_request(bus_client->client_id, idx);
 
@@ -154,20 +139,20 @@
 	pdata = msm_bus_pdata_from_node(soc_info->pdev,
 		dev_node);
 	if (!pdata) {
-		pr_err("failed get_pdata\n");
+		CAM_ERR(CAM_CPAS, "failed get_pdata");
 		return -EINVAL;
 	}
 
 	if ((pdata->num_usecases == 0) ||
 		(pdata->usecase[0].num_paths == 0)) {
-		pr_err("usecase=%d\n", pdata->num_usecases);
+		CAM_ERR(CAM_CPAS, "usecase=%d", pdata->num_usecases);
 		rc = -EINVAL;
 		goto error;
 	}
 
 	client_id = msm_bus_scale_register_client(pdata);
 	if (!client_id) {
-		pr_err("failed in register ahb bus client\n");
+		CAM_ERR(CAM_CPAS, "failed in register ahb bus client");
 		rc = -EINVAL;
 		goto error;
 	}
@@ -176,7 +161,8 @@
 		"qcom,msm-bus-vector-dyn-vote");
 
 	if (bus_client->dyn_vote && (pdata->num_usecases != 2)) {
-		pr_err("Excess or less vectors %d\n", pdata->num_usecases);
+		CAM_ERR(CAM_CPAS, "Excess or less vectors %d",
+			pdata->num_usecases);
 		rc = -EINVAL;
 		goto fail_unregister_client;
 	}
@@ -193,7 +179,7 @@
 	bus_client->valid = true;
 	mutex_init(&bus_client->lock);
 
-	CPAS_CDBG("Bus Client : src=%d, dst=%d, bus_client=%d\n",
+	CAM_DBG(CAM_CPAS, "Bus Client : src=%d, dst=%d, bus_client=%d",
 		bus_client->src, bus_client->dst, bus_client->client_id);
 
 	return 0;
@@ -268,7 +254,7 @@
 	axi_port_list_node = of_find_node_by_name(soc_info->pdev->dev.of_node,
 		"qcom,axi-port-list");
 	if (!axi_port_list_node) {
-		pr_err("Node qcom,axi-port-list not found.\n");
+		CAM_ERR(CAM_CPAS, "Node qcom,axi-port-list not found.");
 		return -EINVAL;
 	}
 
@@ -286,14 +272,15 @@
 			"qcom,axi-port-name", 0,
 			(const char **)&axi_port->axi_port_name);
 		if (rc) {
-			pr_err("failed to read qcom,axi-port-name rc=%d\n", rc);
+			CAM_ERR(CAM_CPAS,
+				"failed to read qcom,axi-port-name rc=%d", rc);
 			goto port_name_fail;
 		}
 
 		axi_port_mnoc_node = of_find_node_by_name(axi_port_node,
 			"qcom,axi-port-mnoc");
 		if (!axi_port_mnoc_node) {
-			pr_err("Node qcom,axi-port-mnoc not found.\n");
+			CAM_ERR(CAM_CPAS, "Node qcom,axi-port-mnoc not found.");
 			rc = -EINVAL;
 			goto mnoc_node_get_fail;
 		}
@@ -308,7 +295,8 @@
 			axi_port_camnoc_node = of_find_node_by_name(
 				axi_port_node, "qcom,axi-port-camnoc");
 			if (!axi_port_camnoc_node) {
-				pr_err("Node qcom,axi-port-camnoc not found\n");
+				CAM_ERR(CAM_CPAS,
+					"Node qcom,axi-port-camnoc not found");
 				rc = -EINVAL;
 				goto camnoc_node_get_fail;
 			}
@@ -358,7 +346,8 @@
 	rc = cam_cpas_util_vote_bus_client_level(&cpas_core->ahb_bus_client,
 		(enable == true) ? CAM_SVS_VOTE : CAM_SUSPEND_VOTE);
 	if (rc) {
-		pr_err("Failed in AHB vote, enable=%d, rc=%d\n", enable, rc);
+		CAM_ERR(CAM_CPAS, "Failed in AHB vote, enable=%d, rc=%d",
+			enable, rc);
 		return rc;
 	}
 
@@ -375,7 +364,8 @@
 		rc = cam_cpas_util_vote_bus_client_bw(&curr_port->mnoc_bus,
 			mnoc_bw, 0);
 		if (rc) {
-			pr_err("Failed in mnoc vote, enable=%d, rc=%d\n",
+			CAM_ERR(CAM_CPAS,
+				"Failed in mnoc vote, enable=%d, rc=%d",
 				enable, rc);
 			goto remove_ahb_vote;
 		}
@@ -384,7 +374,8 @@
 			cam_cpas_util_vote_bus_client_bw(
 				&curr_port->camnoc_bus, camnoc_bw, 0);
 			if (rc) {
-				pr_err("Failed in mnoc vote, enable=%d, %d\n",
+				CAM_ERR(CAM_CPAS,
+					"Failed in mnoc vote, enable=%d, %d",
 					enable, rc);
 				cam_cpas_util_vote_bus_client_bw(
 					&curr_port->mnoc_bus, 0, 0);
@@ -446,7 +437,8 @@
 	int rc = 0;
 
 	if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) {
-		pr_err("Invalid reg_base=%d, reg_base_index=%d, num_map=%d\n",
+		CAM_ERR(CAM_CPAS,
+			"Invalid reg_base=%d, reg_base_index=%d, num_map=%d",
 			reg_base, reg_base_index, soc_info->num_reg_map);
 		return -EINVAL;
 	}
@@ -457,7 +449,7 @@
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
 
 	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		pr_err("client has not started%d\n", client_indx);
+		CAM_ERR(CAM_CPAS, "client has not started%d", client_indx);
 		rc = -EPERM;
 		goto unlock_client;
 	}
@@ -489,7 +481,8 @@
 		return -EINVAL;
 
 	if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) {
-		pr_err("Invalid reg_base=%d, reg_base_index=%d, num_map=%d\n",
+		CAM_ERR(CAM_CPAS,
+			"Invalid reg_base=%d, reg_base_index=%d, num_map=%d",
 			reg_base, reg_base_index, soc_info->num_reg_map);
 		return -EINVAL;
 	}
@@ -500,7 +493,7 @@
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
 
 	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		pr_err("client has not started%d\n", client_indx);
+		CAM_ERR(CAM_CPAS, "client has not started%d", client_indx);
 		rc = -EPERM;
 		goto unlock_client;
 	}
@@ -531,7 +524,7 @@
 	int rc = 0;
 
 	if (!axi_port) {
-		pr_err("axi port does not exists\n");
+		CAM_ERR(CAM_CPAS, "axi port does not exists");
 		return -EINVAL;
 	}
 
@@ -563,7 +556,8 @@
 	if ((!soc_private->axi_camnoc_based) && (mnoc_bw < camnoc_bw))
 		mnoc_bw = camnoc_bw;
 
-	CPAS_CDBG("axi[(%d, %d),(%d, %d)] : camnoc_bw[%llu], mnoc_bw[%llu]\n",
+	CAM_DBG(CAM_CPAS,
+		"axi[(%d, %d),(%d, %d)] : camnoc_bw[%llu], mnoc_bw[%llu]",
 		axi_port->mnoc_bus.src, axi_port->mnoc_bus.dst,
 		axi_port->camnoc_bus.src, axi_port->camnoc_bus.dst,
 		camnoc_bw, mnoc_bw);
@@ -571,7 +565,8 @@
 	rc = cam_cpas_util_vote_bus_client_bw(&axi_port->mnoc_bus,
 		mnoc_bw, 0);
 	if (rc) {
-		pr_err("Failed in mnoc vote ab[%llu] ib[%llu] rc=%d\n",
+		CAM_ERR(CAM_CPAS,
+			"Failed in mnoc vote ab[%llu] ib[%llu] rc=%d",
 			mnoc_bw, mnoc_bw, rc);
 		goto unlock_axi_port;
 	}
@@ -580,7 +575,8 @@
 		rc = cam_cpas_util_vote_bus_client_bw(&axi_port->camnoc_bus,
 			camnoc_bw, 0);
 		if (rc) {
-			pr_err("Failed camnoc vote ab[%llu] ib[%llu] rc=%d\n",
+			CAM_ERR(CAM_CPAS,
+				"Failed camnoc vote ab[%llu] ib[%llu] rc=%d",
 				camnoc_bw, camnoc_bw, rc);
 			goto unlock_axi_port;
 		}
@@ -600,7 +596,8 @@
 
 	if (!axi_vote || ((axi_vote->compressed_bw == 0) &&
 		(axi_vote->uncompressed_bw == 0))) {
-		pr_err("Invalid vote, client_handle=%d\n", client_handle);
+		CAM_ERR(CAM_CPAS, "Invalid vote, client_handle=%d",
+			client_handle);
 		return -EINVAL;
 	}
 
@@ -610,12 +607,13 @@
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
 
 	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		pr_err("client has not started %d\n", client_indx);
+		CAM_ERR(CAM_CPAS, "client has not started %d", client_indx);
 		rc = -EPERM;
 		goto unlock_client;
 	}
 
-	CPAS_CDBG("Client[%d] Requested compressed[%llu], uncompressed[%llu]\n",
+	CAM_DBG(CAM_CPAS,
+		"Client[%d] Requested compressed[%llu], uncompressed[%llu]",
 		client_indx, axi_vote->compressed_bw,
 		axi_vote->uncompressed_bw);
 
@@ -640,13 +638,14 @@
 	int i;
 
 	if (!dev || !req_level) {
-		pr_err("Invalid params %pK, %pK\n", dev, req_level);
+		CAM_ERR(CAM_CPAS, "Invalid params %pK, %pK", dev, req_level);
 		return -EINVAL;
 	}
 
 	opp = dev_pm_opp_find_freq_ceil(dev, &corner_freq);
 	if (IS_ERR(opp)) {
-		pr_err("Error on OPP freq :%ld, %pK\n", corner_freq, opp);
+		CAM_ERR(CAM_CPAS, "Error on OPP freq :%ld, %pK",
+			corner_freq, opp);
 		return -EINVAL;
 	}
 
@@ -656,7 +655,8 @@
 		if (corner == soc_private->vdd_ahb[i].vdd_corner)
 			level = soc_private->vdd_ahb[i].ahb_level;
 
-	CPAS_CDBG("From OPP table : freq=[%ld][%ld], corner=%d, level=%d\n",
+	CAM_DBG(CAM_CPAS,
+		"From OPP table : freq=[%ld][%ld], corner=%d, level=%d",
 		freq, corner_freq, corner, level);
 
 	*req_level = level;
@@ -675,7 +675,7 @@
 	int i, rc = 0;
 
 	if (!ahb_bus_client->valid) {
-		pr_err("AHB Bus client not valid\n");
+		CAM_ERR(CAM_CPAS, "AHB Bus client not valid");
 		return -EINVAL;
 	}
 
@@ -694,7 +694,7 @@
 	mutex_lock(&ahb_bus_client->lock);
 	cpas_client->ahb_level = required_level;
 
-	CPAS_CDBG("Clients required level[%d], curr_level[%d]\n",
+	CAM_DBG(CAM_CPAS, "Clients required level[%d], curr_level[%d]",
 		required_level, ahb_bus_client->curr_vote_level);
 
 	if (required_level == ahb_bus_client->curr_vote_level)
@@ -707,19 +707,20 @@
 			highest_level = cpas_core->cpas_client[i]->ahb_level;
 	}
 
-	CPAS_CDBG("Required highest_level[%d]\n", highest_level);
+	CAM_DBG(CAM_CPAS, "Required highest_level[%d]", highest_level);
 
 	rc = cam_cpas_util_vote_bus_client_level(ahb_bus_client,
 		highest_level);
 	if (rc) {
-		pr_err("Failed in ahb vote, level=%d, rc=%d\n",
+		CAM_ERR(CAM_CPAS, "Failed in ahb vote, level=%d, rc=%d",
 			highest_level, rc);
 		goto unlock_bus_client;
 	}
 
 	rc = cam_soc_util_set_clk_rate_level(&cpas_hw->soc_info, highest_level);
 	if (rc) {
-		pr_err("Failed in scaling clock rate level %d for AHB\n",
+		CAM_ERR(CAM_CPAS,
+			"Failed in scaling clock rate level %d for AHB",
 			highest_level);
 		goto unlock_bus_client;
 	}
@@ -740,7 +741,7 @@
 	int rc = 0;
 
 	if (!ahb_vote || (ahb_vote->vote.level == 0)) {
-		pr_err("Invalid AHB vote, %pK\n", ahb_vote);
+		CAM_ERR(CAM_CPAS, "Invalid AHB vote, %pK", ahb_vote);
 		return -EINVAL;
 	}
 
@@ -750,12 +751,13 @@
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
 
 	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		pr_err("client has not started %d\n", client_indx);
+		CAM_ERR(CAM_CPAS, "client has not started %d", client_indx);
 		rc = -EPERM;
 		goto unlock_client;
 	}
 
-	CPAS_CDBG("client[%d] : type[%d], level[%d], freq[%ld], applied[%d]\n",
+	CAM_DBG(CAM_CPAS,
+		"client[%d] : type[%d], level[%d], freq[%ld], applied[%d]",
 		client_indx, ahb_vote->type, ahb_vote->vote.level,
 		ahb_vote->vote.freq,
 		cpas_core->cpas_client[client_indx]->ahb_level);
@@ -782,12 +784,13 @@
 	int rc;
 
 	if (!hw_priv || !start_args) {
-		pr_err("Invalid arguments %pK %pK\n", hw_priv, start_args);
+		CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+			hw_priv, start_args);
 		return -EINVAL;
 	}
 
 	if (sizeof(struct cam_cpas_hw_cmd_start) != arg_size) {
-		pr_err("HW_CAPS size mismatch %ld %d\n",
+		CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %ld %d",
 			sizeof(struct cam_cpas_hw_cmd_start), arg_size);
 		return -EINVAL;
 	}
@@ -804,7 +807,7 @@
 
 	if ((ahb_vote->vote.level == 0) || ((axi_vote->compressed_bw == 0) &&
 		(axi_vote->uncompressed_bw == 0))) {
-		pr_err("Invalid vote ahb[%d], axi[%llu], [%llu]\n",
+		CAM_ERR(CAM_CPAS, "Invalid vote ahb[%d], axi[%llu], [%llu]",
 			ahb_vote->vote.level, axi_vote->compressed_bw,
 			axi_vote->uncompressed_bw);
 		return -EINVAL;
@@ -817,20 +820,20 @@
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
 
 	if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
-		pr_err("client is not registered %d\n", client_indx);
+		CAM_ERR(CAM_CPAS, "client is not registered %d", client_indx);
 		rc = -EPERM;
 		goto done;
 	}
 
 	if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		pr_err("Client %d is in start state\n", client_indx);
+		CAM_ERR(CAM_CPAS, "Client %d is in start state", client_indx);
 		rc = -EPERM;
 		goto done;
 	}
 
 	cpas_client = cpas_core->cpas_client[client_indx];
 
-	CPAS_CDBG("AHB :client[%d] type[%d], level[%d], applied[%d]\n",
+	CAM_DBG(CAM_CPAS, "AHB :client[%d] type[%d], level[%d], applied[%d]",
 		client_indx, ahb_vote->type, ahb_vote->vote.level,
 		cpas_client->ahb_level);
 	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
@@ -838,7 +841,8 @@
 	if (rc)
 		goto done;
 
-	CPAS_CDBG("AXI client[%d] compressed_bw[%llu], uncompressed_bw[%llu]\n",
+	CAM_DBG(CAM_CPAS,
+		"AXI client[%d] compressed_bw[%llu], uncompressed_bw[%llu]",
 		client_indx, axi_vote->compressed_bw,
 		axi_vote->uncompressed_bw);
 	rc = cam_cpas_util_apply_client_axi_vote(cpas_core,
@@ -850,7 +854,7 @@
 		rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info,
 			applied_level);
 		if (rc) {
-			pr_err("enable_resorce failed, rc=%d\n", rc);
+			CAM_ERR(CAM_CPAS, "enable_resorce failed, rc=%d", rc);
 			goto done;
 		}
 
@@ -859,7 +863,8 @@
 			if (rc) {
 				cam_cpas_soc_disable_resources(
 					&cpas_hw->soc_info);
-				pr_err("failed in power_on settings rc=%d\n",
+				CAM_ERR(CAM_CPAS,
+					"failed in power_on settings rc=%d",
 					rc);
 				goto done;
 			}
@@ -870,7 +875,7 @@
 	cpas_client->started = true;
 	cpas_core->streamon_clients++;
 
-	CPAS_CDBG("client_indx=%d, streamon_clients=%d\n",
+	CAM_DBG(CAM_CPAS, "client_indx=%d, streamon_clients=%d",
 		client_indx, cpas_core->streamon_clients);
 done:
 	mutex_unlock(&cpas_core->client_mutex[client_indx]);
@@ -892,12 +897,13 @@
 	int rc = 0;
 
 	if (!hw_priv || !stop_args) {
-		pr_err("Invalid arguments %pK %pK\n", hw_priv, stop_args);
+		CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+			hw_priv, stop_args);
 		return -EINVAL;
 	}
 
 	if (sizeof(struct cam_cpas_hw_cmd_stop) != arg_size) {
-		pr_err("HW_CAPS size mismatch %ld %d\n",
+		CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %ld %d",
 			sizeof(struct cam_cpas_hw_cmd_stop), arg_size);
 		return -EINVAL;
 	}
@@ -913,11 +919,11 @@
 	mutex_lock(&cpas_hw->hw_mutex);
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
 
-	CPAS_CDBG("client_indx=%d, streamon_clients=%d\n",
+	CAM_DBG(CAM_CPAS, "client_indx=%d, streamon_clients=%d",
 		client_indx, cpas_core->streamon_clients);
 
 	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		pr_err("Client %d is not started\n", client_indx);
+		CAM_ERR(CAM_CPAS, "Client %d is not started", client_indx);
 		rc = -EPERM;
 		goto done;
 	}
@@ -930,7 +936,8 @@
 		if (cpas_core->internal_ops.power_off) {
 			rc = cpas_core->internal_ops.power_off(cpas_hw);
 			if (rc) {
-				pr_err("failed in power_off settings rc=%d\n",
+				CAM_ERR(CAM_CPAS,
+					"failed in power_off settings rc=%d",
 					rc);
 				/* Do not return error, passthrough */
 			}
@@ -938,7 +945,7 @@
 
 		rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
 		if (rc) {
-			pr_err("disable_resorce failed, rc=%d\n", rc);
+			CAM_ERR(CAM_CPAS, "disable_resorce failed, rc=%d", rc);
 			goto done;
 		}
 		cpas_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
@@ -970,12 +977,13 @@
 	int rc = 0;
 
 	if (!hw_priv || !init_hw_args) {
-		pr_err("Invalid arguments %pK %pK\n", hw_priv, init_hw_args);
+		CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+			hw_priv, init_hw_args);
 		return -EINVAL;
 	}
 
 	if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
-		pr_err("INIT HW size mismatch %ld %d\n",
+		CAM_ERR(CAM_CPAS, "INIT HW size mismatch %ld %d",
 			sizeof(struct cam_cpas_hw_caps), arg_size);
 		return -EINVAL;
 	}
@@ -1002,7 +1010,7 @@
 	struct cam_cpas_private_soc *soc_private =
 		(struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
 
-	CPAS_CDBG("Register params : identifier=%s, cell_index=%d\n",
+	CAM_DBG(CAM_CPAS, "Register params : identifier=%s, cell_index=%d",
 		register_params->identifier, register_params->cell_index);
 
 	if (soc_private->client_id_based)
@@ -1015,11 +1023,11 @@
 
 	mutex_lock(&cpas_hw->hw_mutex);
 
-	rc = cam_cpas_util_get_string_index(soc_private->client_name,
+	rc = cam_common_util_get_string_index(soc_private->client_name,
 		soc_private->num_clients, client_name, &client_indx);
 	if (rc || !CAM_CPAS_CLIENT_VALID(client_indx) ||
 		CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
-		pr_err("Invalid Client register : %s %d, %d\n",
+		CAM_ERR(CAM_CPAS, "Invalid Client register : %s %d, %d",
 			register_params->identifier,
 			register_params->cell_index, client_indx);
 		mutex_unlock(&cpas_hw->hw_mutex);
@@ -1035,7 +1043,8 @@
 	rc = cam_cpas_util_insert_client_to_axi_port(cpas_core, soc_private,
 		cpas_client, client_indx);
 	if (rc) {
-		pr_err("axi_port_insert failed client_indx=%d, rc=%d\n",
+		CAM_ERR(CAM_CPAS,
+			"axi_port_insert failed client_indx=%d, rc=%d",
 			client_indx, rc);
 		kfree(cpas_client);
 		mutex_unlock(&cpas_hw->hw_mutex);
@@ -1051,7 +1060,7 @@
 
 	mutex_unlock(&cpas_hw->hw_mutex);
 
-	CPAS_CDBG("client_indx=%d, registered_clients=%d\n",
+	CAM_DBG(CAM_CPAS, "client_indx=%d, registered_clients=%d",
 		client_indx, cpas_core->registered_clients);
 
 	return 0;
@@ -1071,13 +1080,13 @@
 	mutex_lock(&cpas_core->client_mutex[client_indx]);
 
 	if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
-		pr_err("client not registered %d\n", client_indx);
+		CAM_ERR(CAM_CPAS, "client not registered %d", client_indx);
 		rc = -EPERM;
 		goto done;
 	}
 
 	if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
-		pr_err("Client %d is not stopped\n", client_indx);
+		CAM_ERR(CAM_CPAS, "Client %d is not stopped", client_indx);
 		rc = -EPERM;
 		goto done;
 	}
@@ -1085,7 +1094,7 @@
 	cam_cpas_util_remove_client_from_axi_port(
 		cpas_core->cpas_client[client_indx]);
 
-	CPAS_CDBG("client_indx=%d, registered_clients=%d\n",
+	CAM_DBG(CAM_CPAS, "client_indx=%d, registered_clients=%d",
 		client_indx, cpas_core->registered_clients);
 
 	kfree(cpas_core->cpas_client[client_indx]);
@@ -1105,12 +1114,13 @@
 	struct cam_cpas_hw_caps *hw_caps;
 
 	if (!hw_priv || !get_hw_cap_args) {
-		pr_err("Invalid arguments %pK %pK\n", hw_priv, get_hw_cap_args);
+		CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+			hw_priv, get_hw_cap_args);
 		return -EINVAL;
 	}
 
 	if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
-		pr_err("HW_CAPS size mismatch %ld %d\n",
+		CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %ld %d",
 			sizeof(struct cam_cpas_hw_caps), arg_size);
 		return -EINVAL;
 	}
@@ -1132,8 +1142,8 @@
 
 	if (!hw_priv || !cmd_args ||
 		(cmd_type >= CAM_CPAS_HW_CMD_INVALID)) {
-		pr_err("Invalid arguments %pK %pK %d\n", hw_priv, cmd_args,
-			cmd_type);
+		CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK %d",
+			hw_priv, cmd_args, cmd_type);
 		return -EINVAL;
 	}
 
@@ -1142,7 +1152,7 @@
 		struct cam_cpas_register_params *register_params;
 
 		if (sizeof(struct cam_cpas_register_params) != arg_size) {
-			pr_err("cmd_type %d, size mismatch %d\n",
+			CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
 				cmd_type, arg_size);
 			break;
 		}
@@ -1155,7 +1165,7 @@
 		uint32_t *client_handle;
 
 		if (sizeof(uint32_t) != arg_size) {
-			pr_err("cmd_type %d, size mismatch %d\n",
+			CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
 				cmd_type, arg_size);
 			break;
 		}
@@ -1169,7 +1179,7 @@
 
 		if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) !=
 			arg_size) {
-			pr_err("cmd_type %d, size mismatch %d\n",
+			CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
 				cmd_type, arg_size);
 			break;
 		}
@@ -1186,7 +1196,7 @@
 
 		if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) !=
 			arg_size) {
-			pr_err("cmd_type %d, size mismatch %d\n",
+			CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
 				cmd_type, arg_size);
 			break;
 		}
@@ -1203,7 +1213,7 @@
 		struct cam_cpas_hw_cmd_ahb_vote *cmd_ahb_vote;
 
 		if (sizeof(struct cam_cpas_hw_cmd_ahb_vote) != arg_size) {
-			pr_err("cmd_type %d, size mismatch %d\n",
+			CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
 				cmd_type, arg_size);
 			break;
 		}
@@ -1217,7 +1227,7 @@
 		struct cam_cpas_hw_cmd_axi_vote *cmd_axi_vote;
 
 		if (sizeof(struct cam_cpas_hw_cmd_axi_vote) != arg_size) {
-			pr_err("cmd_type %d, size mismatch %d\n",
+			CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
 				cmd_type, arg_size);
 			break;
 		}
@@ -1228,7 +1238,7 @@
 		break;
 	}
 	default:
-		pr_err("CPAS HW command not valid =%d\n", cmd_type);
+		CAM_ERR(CAM_CPAS, "CPAS HW command not valid =%d", cmd_type);
 		break;
 	}
 
@@ -1274,7 +1284,7 @@
 	rc = of_property_read_string_index(of_node, "arch-compat", 0,
 		(const char **)&compat_str);
 	if (rc) {
-		pr_err("failed to get arch-compat rc=%d\n", rc);
+		CAM_ERR(CAM_CPAS, "failed to get arch-compat rc=%d", rc);
 		return -EINVAL;
 	}
 
@@ -1285,7 +1295,7 @@
 		hw_intf->hw_type = CAM_HW_CPASTOP;
 		rc = cam_cpastop_get_internal_ops(internal_ops);
 	} else {
-		pr_err("arch-compat %s not supported\n", compat_str);
+		CAM_ERR(CAM_CPAS, "arch-compat %s not supported", compat_str);
 		rc = -EINVAL;
 	}
 
@@ -1375,7 +1385,7 @@
 
 	rc = cam_cpas_util_client_setup(cpas_hw);
 	if (rc) {
-		pr_err("failed in client setup, rc=%d\n", rc);
+		CAM_ERR(CAM_CPAS, "failed in client setup, rc=%d", rc);
 		goto deinit_platform_res;
 	}
 
@@ -1383,13 +1393,13 @@
 		cpas_hw->soc_info.pdev->dev.of_node,
 		&cpas_core->ahb_bus_client);
 	if (rc) {
-		pr_err("failed in ahb setup, rc=%d\n", rc);
+		CAM_ERR(CAM_CPAS, "failed in ahb setup, rc=%d", rc);
 		goto client_cleanup;
 	}
 
 	rc = cam_cpas_util_axi_setup(cpas_core, &cpas_hw->soc_info);
 	if (rc) {
-		pr_err("failed in axi setup, rc=%d\n", rc);
+		CAM_ERR(CAM_CPAS, "failed in axi setup, rc=%d", rc);
 		goto ahb_cleanup;
 	}
 
@@ -1400,18 +1410,18 @@
 
 	rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info, CAM_SVS_VOTE);
 	if (rc) {
-		pr_err("failed in soc_enable_resources, rc=%d\n", rc);
+		CAM_ERR(CAM_CPAS, "failed in soc_enable_resources, rc=%d", rc);
 		goto remove_default_vote;
 	}
 
 	if (internal_ops->get_hw_info) {
 		rc = internal_ops->get_hw_info(cpas_hw, &cpas_core->hw_caps);
 		if (rc) {
-			pr_err("failed in get_hw_info, rc=%d\n", rc);
+			CAM_ERR(CAM_CPAS, "failed in get_hw_info, rc=%d", rc);
 			goto disable_soc_res;
 		}
 	} else {
-		pr_err("Invalid get_hw_info\n");
+		CAM_ERR(CAM_CPAS, "Invalid get_hw_info");
 		goto disable_soc_res;
 	}
 
@@ -1422,7 +1432,7 @@
 
 	rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
 	if (rc) {
-		pr_err("failed in soc_disable_resources, rc=%d\n", rc);
+		CAM_ERR(CAM_CPAS, "failed in soc_disable_resources, rc=%d", rc);
 		goto remove_default_vote;
 	}
 
@@ -1453,7 +1463,7 @@
 	kfree(cpas_core);
 	kfree(cpas_hw);
 	kfree(cpas_hw_intf);
-	pr_err("failed in hw probe\n");
+	CAM_ERR(CAM_CPAS, "failed in hw probe");
 	return rc;
 }
 
@@ -1463,7 +1473,7 @@
 	struct cam_cpas *cpas_core;
 
 	if (!cpas_hw_intf) {
-		pr_err("cpas interface not initialized\n");
+		CAM_ERR(CAM_CPAS, "cpas interface not initialized");
 		return -EINVAL;
 	}
 
@@ -1471,7 +1481,7 @@
 	cpas_core = (struct cam_cpas *)cpas_hw->core_info;
 
 	if (cpas_hw->hw_state == CAM_HW_STATE_POWER_UP) {
-		pr_err("cpas hw is in power up state\n");
+		CAM_ERR(CAM_CPAS, "cpas hw is in power up state");
 		return -EINVAL;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
index 6d4fafe..bbc99b7 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
@@ -15,6 +15,7 @@
 
 #include "cam_cpas_api.h"
 #include "cam_cpas_hw_intf.h"
+#include "cam_common_util.h"
 
 #define CPAS_MAX_CLIENTS 20
 #define CAM_CPAS_INFLIGHT_WORKS 5
@@ -192,7 +193,5 @@
 
 int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
 	enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info);
-int cam_cpas_util_get_string_index(const char **strings,
-	uint32_t num_strings, char *matching_string, uint32_t *index);
 
 #endif /* _CAM_CPAS_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
index 9ee5a43..fa4018e 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
@@ -18,16 +18,7 @@
 #include "cam_cpas_api.h"
 #include "cam_hw.h"
 #include "cam_hw_intf.h"
-
-#ifdef CONFIG_CAM_CPAS_DBG
-#define CPAS_CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CPAS_CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-#undef pr_fmt
-#define pr_fmt(fmt) "CAM-CPAS %s:%d " fmt, __func__, __LINE__
-
-#define BITS_MASK_SHIFT(x, mask, shift) (((x) & (mask)) >> shift)
+#include "cam_debug_util.h"
 
 /* Number of times to retry while polling */
 #define CAM_CPAS_POLL_RETRY_CNT 5
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
index aba0caa..3846784 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
@@ -55,12 +55,12 @@
 	struct cam_hw_version *cpas_version)
 {
 	if (!CAM_CPAS_INTF_INITIALIZED()) {
-		pr_err("cpas intf not initialized\n");
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
 		return -ENODEV;
 	}
 
 	if (!camera_family || !camera_version || !cpas_version) {
-		pr_err("invalid input %pK %pK %pK\n", camera_family,
+		CAM_ERR(CAM_CPAS, "invalid input %pK %pK %pK", camera_family,
 			camera_version, cpas_version);
 		return -EINVAL;
 	}
@@ -80,7 +80,7 @@
 	int rc;
 
 	if (!CAM_CPAS_INTF_INITIALIZED()) {
-		pr_err("cpas intf not initialized\n");
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
 		return -ENODEV;
 	}
 
@@ -98,9 +98,9 @@
 			CAM_CPAS_HW_CMD_REG_WRITE, &cmd_reg_write,
 			sizeof(struct cam_cpas_hw_cmd_reg_read_write));
 		if (rc)
-			pr_err("Failed in process_cmd, rc=%d\n", rc);
+			CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
 	} else {
-		pr_err("Invalid process_cmd ops\n");
+		CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
 		rc = -EINVAL;
 	}
 
@@ -115,12 +115,12 @@
 	int rc;
 
 	if (!CAM_CPAS_INTF_INITIALIZED()) {
-		pr_err("cpas intf not initialized\n");
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
 		return -ENODEV;
 	}
 
 	if (!value) {
-		pr_err("Invalid arg value\n");
+		CAM_ERR(CAM_CPAS, "Invalid arg value");
 		return -EINVAL;
 	}
 
@@ -138,13 +138,13 @@
 			CAM_CPAS_HW_CMD_REG_READ, &cmd_reg_read,
 			sizeof(struct cam_cpas_hw_cmd_reg_read_write));
 		if (rc) {
-			pr_err("Failed in process_cmd, rc=%d\n", rc);
+			CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
 			return rc;
 		}
 
 		*value = cmd_reg_read.value;
 	} else {
-		pr_err("Invalid process_cmd ops\n");
+		CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
 		rc = -EINVAL;
 	}
 
@@ -158,7 +158,7 @@
 	int rc;
 
 	if (!CAM_CPAS_INTF_INITIALIZED()) {
-		pr_err("cpas intf not initialized\n");
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
 		return -ENODEV;
 	}
 
@@ -173,9 +173,9 @@
 			CAM_CPAS_HW_CMD_AXI_VOTE, &cmd_axi_vote,
 			sizeof(struct cam_cpas_hw_cmd_axi_vote));
 		if (rc)
-			pr_err("Failed in process_cmd, rc=%d\n", rc);
+			CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
 	} else {
-		pr_err("Invalid process_cmd ops\n");
+		CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
 		rc = -EINVAL;
 	}
 
@@ -189,7 +189,7 @@
 	int rc;
 
 	if (!CAM_CPAS_INTF_INITIALIZED()) {
-		pr_err("cpas intf not initialized\n");
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
 		return -ENODEV;
 	}
 
@@ -204,9 +204,9 @@
 			CAM_CPAS_HW_CMD_AHB_VOTE, &cmd_ahb_vote,
 			sizeof(struct cam_cpas_hw_cmd_ahb_vote));
 		if (rc)
-			pr_err("Failed in process_cmd, rc=%d\n", rc);
+			CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
 	} else {
-		pr_err("Invalid process_cmd ops\n");
+		CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
 		rc = -EINVAL;
 	}
 
@@ -219,7 +219,7 @@
 	int rc;
 
 	if (!CAM_CPAS_INTF_INITIALIZED()) {
-		pr_err("cpas intf not initialized\n");
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
 		return -ENODEV;
 	}
 
@@ -232,9 +232,9 @@
 			g_cpas_intf->hw_intf->hw_priv, &cmd_hw_stop,
 			sizeof(struct cam_cpas_hw_cmd_stop));
 		if (rc)
-			pr_err("Failed in stop, rc=%d\n", rc);
+			CAM_ERR(CAM_CPAS, "Failed in stop, rc=%d", rc);
 	} else {
-		pr_err("Invalid stop ops\n");
+		CAM_ERR(CAM_CPAS, "Invalid stop ops");
 		rc = -EINVAL;
 	}
 
@@ -248,7 +248,7 @@
 	int rc;
 
 	if (!CAM_CPAS_INTF_INITIALIZED()) {
-		pr_err("cpas intf not initialized\n");
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
 		return -ENODEV;
 	}
 
@@ -263,9 +263,9 @@
 			g_cpas_intf->hw_intf->hw_priv, &cmd_hw_start,
 			sizeof(struct cam_cpas_hw_cmd_start));
 		if (rc)
-			pr_err("Failed in start, rc=%d\n", rc);
+			CAM_ERR(CAM_CPAS, "Failed in start, rc=%d", rc);
 	} else {
-		pr_err("Invalid start ops\n");
+		CAM_ERR(CAM_CPAS, "Invalid start ops");
 		rc = -EINVAL;
 	}
 
@@ -278,7 +278,7 @@
 	int rc;
 
 	if (!CAM_CPAS_INTF_INITIALIZED()) {
-		pr_err("cpas intf not initialized\n");
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
 		return -ENODEV;
 	}
 
@@ -288,9 +288,9 @@
 			CAM_CPAS_HW_CMD_UNREGISTER_CLIENT,
 			&client_handle, sizeof(uint32_t));
 		if (rc)
-			pr_err("Failed in process_cmd, rc=%d\n", rc);
+			CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
 	} else {
-		pr_err("Invalid process_cmd ops\n");
+		CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
 		rc = -EINVAL;
 	}
 
@@ -304,7 +304,7 @@
 	int rc;
 
 	if (!CAM_CPAS_INTF_INITIALIZED()) {
-		pr_err("cpas intf not initialized\n");
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
 		return -ENODEV;
 	}
 
@@ -314,9 +314,9 @@
 			CAM_CPAS_HW_CMD_REGISTER_CLIENT, register_params,
 			sizeof(struct cam_cpas_register_params));
 		if (rc)
-			pr_err("Failed in process_cmd, rc=%d\n", rc);
+			CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
 	} else {
-		pr_err("Invalid process_cmd ops\n");
+		CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
 		rc = -EINVAL;
 	}
 
@@ -330,7 +330,7 @@
 	int rc = 0;
 
 	if (!cmd) {
-		pr_err("Invalid input cmd\n");
+		CAM_ERR(CAM_CPAS, "Invalid input cmd");
 		return -EINVAL;
 	}
 
@@ -341,7 +341,8 @@
 		rc = copy_from_user(&query, (void __user *) cmd->handle,
 			sizeof(query));
 		if (rc) {
-			pr_err("Failed in copy from user, rc=%d\n", rc);
+			CAM_ERR(CAM_CPAS, "Failed in copy from user, rc=%d",
+				rc);
 			break;
 		}
 
@@ -353,14 +354,14 @@
 		rc = copy_to_user((void __user *) cmd->handle, &query,
 			sizeof(query));
 		if (rc)
-			pr_err("Failed in copy to user, rc=%d\n", rc);
+			CAM_ERR(CAM_CPAS, "Failed in copy to user, rc=%d", rc);
 
 		break;
 	}
 	case CAM_SD_SHUTDOWN:
 		break;
 	default:
-		pr_err("Unknown op code %d for CPAS\n", cmd->op_code);
+		CAM_ERR(CAM_CPAS, "Unknown op code %d for CPAS", cmd->op_code);
 		rc = -EINVAL;
 		break;
 	}
@@ -374,13 +375,13 @@
 	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
 
 	if (!cpas_intf || !cpas_intf->probe_done) {
-		pr_err("CPAS not initialized\n");
+		CAM_ERR(CAM_CPAS, "CPAS not initialized");
 		return -ENODEV;
 	}
 
 	mutex_lock(&cpas_intf->intf_lock);
 	cpas_intf->open_cnt++;
-	CPAS_CDBG("CPAS Subdev open count %d\n", cpas_intf->open_cnt);
+	CAM_DBG(CAM_CPAS, "CPAS Subdev open count %d", cpas_intf->open_cnt);
 	mutex_unlock(&cpas_intf->intf_lock);
 
 	return 0;
@@ -392,13 +393,13 @@
 	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
 
 	if (!cpas_intf || !cpas_intf->probe_done) {
-		pr_err("CPAS not initialized\n");
+		CAM_ERR(CAM_CPAS, "CPAS not initialized");
 		return -ENODEV;
 	}
 
 	mutex_lock(&cpas_intf->intf_lock);
 	cpas_intf->open_cnt--;
-	CPAS_CDBG("CPAS Subdev close count %d\n", cpas_intf->open_cnt);
+	CAM_DBG(CAM_CPAS, "CPAS Subdev close count %d", cpas_intf->open_cnt);
 	mutex_unlock(&cpas_intf->intf_lock);
 
 	return 0;
@@ -411,7 +412,7 @@
 	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
 
 	if (!cpas_intf || !cpas_intf->probe_done) {
-		pr_err("CPAS not initialized\n");
+		CAM_ERR(CAM_CPAS, "CPAS not initialized");
 		return -ENODEV;
 	}
 
@@ -420,7 +421,7 @@
 		rc = cam_cpas_subdev_cmd(cpas_intf, (struct cam_control *) arg);
 		break;
 	default:
-		pr_err("Invalid command %d for CPAS!\n", cmd);
+		CAM_ERR(CAM_CPAS, "Invalid command %d for CPAS!", cmd);
 		rc = -EINVAL;
 		break;
 	}
@@ -437,13 +438,13 @@
 	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
 
 	if (!cpas_intf || !cpas_intf->probe_done) {
-		pr_err("CPAS not initialized\n");
+		CAM_ERR(CAM_CPAS, "CPAS not initialized");
 		return -ENODEV;
 	}
 
 	if (copy_from_user(&cmd_data, (void __user *)arg,
 		sizeof(cmd_data))) {
-		pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+		CAM_ERR(CAM_CPAS, "Failed to copy from user_ptr=%pK size=%zu",
 			(void __user *)arg, sizeof(cmd_data));
 		return -EFAULT;
 	}
@@ -453,7 +454,7 @@
 		rc = cam_cpas_subdev_cmd(cpas_intf, &cmd_data);
 		break;
 	default:
-		pr_err("Invalid command %d for CPAS!\n", cmd);
+		CAM_ERR(CAM_CPAS, "Invalid command %d for CPAS!", cmd);
 		rc = -EINVAL;
 		break;
 	}
@@ -461,7 +462,8 @@
 	if (!rc) {
 		if (copy_to_user((void __user *)arg, &cmd_data,
 			sizeof(cmd_data))) {
-			pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+			CAM_ERR(CAM_CPAS,
+				"Failed to copy to user_ptr=%pK size=%zu",
 				(void __user *)arg, sizeof(cmd_data));
 			rc = -EFAULT;
 		}
@@ -508,7 +510,8 @@
 
 	rc = cam_register_subdev(subdev);
 	if (rc) {
-		pr_err("failed register subdev: %s!\n", CAM_CPAS_DEV_NAME);
+		CAM_ERR(CAM_CPAS, "failed register subdev: %s!",
+			CAM_CPAS_DEV_NAME);
 		return rc;
 	}
 
@@ -523,7 +526,7 @@
 	int rc;
 
 	if (g_cpas_intf) {
-		pr_err("cpas dev proble already done\n");
+		CAM_ERR(CAM_CPAS, "cpas dev proble already done");
 		return -EALREADY;
 	}
 
@@ -536,7 +539,7 @@
 
 	rc = cam_cpas_hw_probe(pdev, &g_cpas_intf->hw_intf);
 	if (rc || (g_cpas_intf->hw_intf == NULL)) {
-		pr_err("Failed in hw probe, rc=%d\n", rc);
+		CAM_ERR(CAM_CPAS, "Failed in hw probe, rc=%d", rc);
 		goto error_destroy_mem;
 	}
 
@@ -546,11 +549,11 @@
 		rc = hw_intf->hw_ops.get_hw_caps(hw_intf->hw_priv,
 			hw_caps, sizeof(struct cam_cpas_hw_caps));
 		if (rc) {
-			pr_err("Failed in get_hw_caps, rc=%d\n", rc);
+			CAM_ERR(CAM_CPAS, "Failed in get_hw_caps, rc=%d", rc);
 			goto error_hw_remove;
 		}
 	} else {
-		pr_err("Invalid get_hw_caps ops\n");
+		CAM_ERR(CAM_CPAS, "Invalid get_hw_caps ops");
 		goto error_hw_remove;
 	}
 
@@ -559,7 +562,8 @@
 		goto error_hw_remove;
 
 	g_cpas_intf->probe_done = true;
-	CPAS_CDBG("CPAS INTF Probe success %d, %d.%d.%d, %d.%d.%d, 0x%x\n",
+	CAM_DBG(CAM_CPAS,
+		"CPAS INTF Probe success %d, %d.%d.%d, %d.%d.%d, 0x%x",
 		hw_caps->camera_family, hw_caps->camera_version.major,
 		hw_caps->camera_version.minor, hw_caps->camera_version.incr,
 		hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
@@ -573,14 +577,14 @@
 	mutex_destroy(&g_cpas_intf->intf_lock);
 	kfree(g_cpas_intf);
 	g_cpas_intf = NULL;
-	pr_err("CPAS probe failed\n");
+	CAM_ERR(CAM_CPAS, "CPAS probe failed");
 	return rc;
 }
 
 static int cam_cpas_dev_remove(struct platform_device *dev)
 {
 	if (!CAM_CPAS_INTF_INITIALIZED()) {
-		pr_err("cpas intf not initialized\n");
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
 		return -ENODEV;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
index 09c2ae5..f85f461 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
@@ -29,7 +29,8 @@
 	int count = 0, i = 0, rc = 0;
 
 	if (!soc_private || !pdev) {
-		pr_err("invalid input arg %pK %pK\n", soc_private, pdev);
+		CAM_ERR(CAM_CPAS, "invalid input arg %pK %pK",
+			soc_private, pdev);
 		return -EINVAL;
 	}
 
@@ -38,7 +39,8 @@
 	rc = of_property_read_string_index(of_node, "arch-compat", 0,
 		(const char **)&soc_private->arch_compat);
 	if (rc) {
-		pr_err("device %s failed to read arch-compat\n", pdev->name);
+		CAM_ERR(CAM_CPAS, "device %s failed to read arch-compat",
+			pdev->name);
 		return rc;
 	}
 
@@ -47,12 +49,13 @@
 
 	count = of_property_count_strings(of_node, "client-names");
 	if (count <= 0) {
-		pr_err("no client-names found\n");
+		CAM_ERR(CAM_CPAS, "no client-names found");
 		count = 0;
 		return -EINVAL;
 	}
 	soc_private->num_clients = count;
-	CPAS_CDBG("arch-compat=%s, client_id_based = %d, num_clients=%d\n",
+	CAM_DBG(CAM_CPAS,
+		"arch-compat=%s, client_id_based = %d, num_clients=%d",
 		soc_private->arch_compat, soc_private->client_id_based,
 		soc_private->num_clients);
 
@@ -60,15 +63,16 @@
 		rc = of_property_read_string_index(of_node,
 			"client-names", i, &soc_private->client_name[i]);
 		if (rc) {
-			pr_err("no client-name at cnt=%d\n", i);
+			CAM_ERR(CAM_CPAS, "no client-name at cnt=%d", i);
 			return -ENODEV;
 		}
-		CPAS_CDBG("Client[%d] : %s\n", i, soc_private->client_name[i]);
+		CAM_DBG(CAM_CPAS, "Client[%d] : %s", i,
+			soc_private->client_name[i]);
 	}
 
 	count = of_property_count_strings(of_node, "client-axi-port-names");
 	if ((count <= 0) || (count != soc_private->num_clients)) {
-		pr_err("incorrect client-axi-port-names info %d %d\n",
+		CAM_ERR(CAM_CPAS, "incorrect client-axi-port-names info %d %d",
 			count, soc_private->num_clients);
 		count = 0;
 		return -EINVAL;
@@ -79,10 +83,10 @@
 			"client-axi-port-names", i,
 			&soc_private->client_axi_port_name[i]);
 		if (rc) {
-			pr_err("no client-name at cnt=%d\n", i);
+			CAM_ERR(CAM_CPAS, "no client-name at cnt=%d", i);
 			return -ENODEV;
 		}
-		CPAS_CDBG("Client AXI Port[%d] : %s\n", i,
+		CAM_DBG(CAM_CPAS, "Client AXI Port[%d] : %s", i,
 			soc_private->client_axi_port_name[i]);
 	}
 
@@ -99,25 +103,29 @@
 			rc = of_property_read_u32_index(of_node, "vdd-corners",
 				i, &soc_private->vdd_ahb[i].vdd_corner);
 			if (rc) {
-				pr_err("vdd-corners failed at index=%d\n", i);
+				CAM_ERR(CAM_CPAS,
+					"vdd-corners failed at index=%d", i);
 				return -ENODEV;
 			}
 
 			rc = of_property_read_string_index(of_node,
 				"vdd-corner-ahb-mapping", i, &ahb_string);
 			if (rc) {
-				pr_err("no ahb-mapping at index=%d\n", i);
+				CAM_ERR(CAM_CPAS,
+					"no ahb-mapping at index=%d", i);
 				return -ENODEV;
 			}
 
 			rc = cam_soc_util_get_level_from_string(ahb_string,
 				&soc_private->vdd_ahb[i].ahb_level);
 			if (rc) {
-				pr_err("invalid ahb-string at index=%d\n", i);
+				CAM_ERR(CAM_CPAS,
+					"invalid ahb-string at index=%d", i);
 				return -EINVAL;
 			}
 
-			CPAS_CDBG("Vdd-AHB mapping [%d] : [%d] [%s] [%d]\n", i,
+			CAM_DBG(CAM_CPAS,
+				"Vdd-AHB mapping [%d] : [%d] [%s] [%d]", i,
 				soc_private->vdd_ahb[i].vdd_corner,
 				ahb_string, soc_private->vdd_ahb[i].ahb_level);
 		}
@@ -135,19 +143,20 @@
 
 	rc = cam_soc_util_get_dt_properties(soc_info);
 	if (rc) {
-		pr_err("failed in get_dt_properties, rc=%d\n", rc);
+		CAM_ERR(CAM_CPAS, "failed in get_dt_properties, rc=%d", rc);
 		return rc;
 	}
 
 	if (soc_info->irq_line && !irq_handler) {
-		pr_err("Invalid IRQ handler\n");
+		CAM_ERR(CAM_CPAS, "Invalid IRQ handler");
 		return -EINVAL;
 	}
 
 	rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
 		irq_data);
 	if (rc) {
-		pr_err("failed in request_platform_resource, rc=%d\n", rc);
+		CAM_ERR(CAM_CPAS, "failed in request_platform_resource, rc=%d",
+			rc);
 		return rc;
 	}
 
@@ -160,7 +169,7 @@
 
 	rc = cam_cpas_get_custom_dt_info(soc_info->pdev, soc_info->soc_private);
 	if (rc) {
-		pr_err("failed in get_custom_info, rc=%d\n", rc);
+		CAM_ERR(CAM_CPAS, "failed in get_custom_info, rc=%d", rc);
 		goto free_soc_private;
 	}
 
@@ -179,7 +188,7 @@
 
 	rc = cam_soc_util_release_platform_resource(soc_info);
 	if (rc)
-		pr_err("release platform failed, rc=%d\n", rc);
+		CAM_ERR(CAM_CPAS, "release platform failed, rc=%d", rc);
 
 	kfree(soc_info->soc_private);
 	soc_info->soc_private = NULL;
@@ -195,7 +204,7 @@
 	rc = cam_soc_util_enable_platform_resource(soc_info, true,
 		default_level, true);
 	if (rc)
-		pr_err("enable platform resource failed, rc=%d\n", rc);
+		CAM_ERR(CAM_CPAS, "enable platform resource failed, rc=%d", rc);
 
 	return rc;
 }
@@ -206,7 +215,7 @@
 
 	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
 	if (rc)
-		pr_err("disable platform failed, rc=%d\n", rc);
+		CAM_ERR(CAM_CPAS, "disable platform failed, rc=%d", rc);
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
index 95e26c5..0669070 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
@@ -29,13 +29,13 @@
 
 	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
 	hw_caps->camera_version.major =
-		BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
 	hw_caps->camera_version.minor =
-		BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+		CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
 	hw_caps->camera_version.incr =
-		BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+		CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
 
-	CPAS_CDBG("Family %d, version %d.%d.%d\n",
+	CAM_DBG(CAM_FD, "Family %d, version %d.%d.%d",
 		hw_caps->camera_family, hw_caps->camera_version.major,
 		hw_caps->camera_version.minor, hw_caps->camera_version.incr);
 
@@ -49,21 +49,22 @@
 	int rc;
 
 	if (num_reg_map > CAM_CPAS_REG_MAX) {
-		pr_err("invalid num_reg_map=%d\n", num_reg_map);
+		CAM_ERR(CAM_CPAS, "invalid num_reg_map=%d", num_reg_map);
 		return -EINVAL;
 	}
 
 	if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
-		pr_err("invalid num_mem_block=%d\n", soc_info->num_mem_block);
+		CAM_ERR(CAM_CPAS, "invalid num_mem_block=%d",
+			soc_info->num_mem_block);
 		return -EINVAL;
 	}
 
-	rc = cam_cpas_util_get_string_index(soc_info->mem_block_name,
+	rc = cam_common_util_get_string_index(soc_info->mem_block_name,
 		soc_info->num_mem_block, "cam_camss", &index);
 	if ((rc == 0) && (index < num_reg_map)) {
 		regbase_index[CAM_CPAS_REG_CAMSS] = index;
 	} else {
-		pr_err("regbase not found for CAM_CPAS_REG_CAMSS\n");
+		CAM_ERR(CAM_CPAS, "regbase not found for CAM_CPAS_REG_CAMSS");
 		return -EINVAL;
 	}
 
@@ -73,7 +74,7 @@
 int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
 {
 	if (!internal_ops) {
-		pr_err("invalid NULL param\n");
+		CAM_ERR(CAM_CPAS, "invalid NULL param");
 		return -EINVAL;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
index b901410..32ef2e4 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -38,24 +38,24 @@
 
 	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
 	hw_caps->camera_version.major =
-		BITS_MASK_SHIFT(reg_value, 0xff0000, 0x10);
+		CAM_BITS_MASK_SHIFT(reg_value, 0xff0000, 0x10);
 	hw_caps->camera_version.minor =
-		BITS_MASK_SHIFT(reg_value, 0xff00, 0x8);
+		CAM_BITS_MASK_SHIFT(reg_value, 0xff00, 0x8);
 	hw_caps->camera_version.incr =
-		BITS_MASK_SHIFT(reg_value, 0xff, 0x0);
+		CAM_BITS_MASK_SHIFT(reg_value, 0xff, 0x0);
 
 	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x4);
 	hw_caps->cpas_version.major =
-		BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
 	hw_caps->cpas_version.minor =
-		BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+		CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
 	hw_caps->cpas_version.incr =
-		BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+		CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
 
 	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x8);
 	hw_caps->camera_capability = reg_value;
 
-	CPAS_CDBG("Family %d, version %d.%d.%d, cpas %d.%d.%d, cap 0x%x\n",
+	CAM_DBG(CAM_FD, "Family %d, version %d.%d.%d, cpas %d.%d.%d, cap 0x%x",
 		hw_caps->camera_family, hw_caps->camera_version.major,
 		hw_caps->camera_version.minor, hw_caps->camera_version.incr,
 		hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
@@ -71,31 +71,32 @@
 	int rc;
 
 	if (num_reg_map > CAM_CPAS_REG_MAX) {
-		pr_err("invalid num_reg_map=%d\n", num_reg_map);
+		CAM_ERR(CAM_CPAS, "invalid num_reg_map=%d", num_reg_map);
 		return -EINVAL;
 	}
 
 	if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
-		pr_err("invalid num_mem_block=%d\n", soc_info->num_mem_block);
+		CAM_ERR(CAM_CPAS, "invalid num_mem_block=%d",
+			soc_info->num_mem_block);
 		return -EINVAL;
 	}
 
-	rc = cam_cpas_util_get_string_index(soc_info->mem_block_name,
+	rc = cam_common_util_get_string_index(soc_info->mem_block_name,
 		soc_info->num_mem_block, "cam_cpas_top", &index);
 	if ((rc == 0) && (index < num_reg_map)) {
 		regbase_index[CAM_CPAS_REG_CPASTOP] = index;
 	} else {
-		pr_err("regbase not found for CPASTOP, rc=%d, %d %d\n",
+		CAM_ERR(CAM_CPAS, "regbase not found for CPASTOP, rc=%d, %d %d",
 			rc, index, num_reg_map);
 		return -EINVAL;
 	}
 
-	rc = cam_cpas_util_get_string_index(soc_info->mem_block_name,
+	rc = cam_common_util_get_string_index(soc_info->mem_block_name,
 		soc_info->num_mem_block, "cam_camnoc", &index);
 	if ((rc == 0) && (index < num_reg_map)) {
 		regbase_index[CAM_CPAS_REG_CAMNOC] = index;
 	} else {
-		pr_err("regbase not found for CAMNOC, rc=%d, %d %d\n",
+		CAM_ERR(CAM_CPAS, "regbase not found for CAMNOC, rc=%d, %d %d",
 			rc, index, num_reg_map);
 		return -EINVAL;
 	}
@@ -124,7 +125,8 @@
 		reg_value[3] = cam_io_r_mb(
 			soc_info->reg_map[camnoc_index].mem_base +
 			camnoc_info->error_logger[i + 3]);
-		pr_err("offset[0x%x] values [0x%x] [0x%x] [0x%x] [0x%x]\n",
+		CAM_ERR(CAM_CPAS,
+			"offset[0x%x] values [0x%x] [0x%x] [0x%x] [0x%x]",
 			camnoc_info->error_logger[i], reg_value[0],
 			reg_value[1], reg_value[2], reg_value[3]);
 	}
@@ -139,7 +141,7 @@
 		reg_value[2] = cam_io_r_mb(
 			soc_info->reg_map[camnoc_index].mem_base +
 			camnoc_info->error_logger[i + 2]);
-		pr_err("offset[0x%x] values [0x%x] [0x%x] [0x%x]\n",
+		CAM_ERR(CAM_CPAS, "offset[0x%x] values [0x%x] [0x%x] [0x%x]",
 			camnoc_info->error_logger[i], reg_value[0],
 			reg_value[1], reg_value[2]);
 		i = i + 3;
@@ -152,7 +154,7 @@
 		reg_value[1] = cam_io_r_mb(
 			soc_info->reg_map[camnoc_index].mem_base +
 			camnoc_info->error_logger[i + 1]);
-		pr_err("offset[0x%x] values [0x%x] [0x%x]\n",
+		CAM_ERR(CAM_CPAS, "offset[0x%x] values [0x%x] [0x%x]",
 			camnoc_info->error_logger[i], reg_value[0],
 			reg_value[1]);
 		i = i + 2;
@@ -162,7 +164,7 @@
 		reg_value[0] = cam_io_r_mb(
 			soc_info->reg_map[camnoc_index].mem_base +
 			camnoc_info->error_logger[i]);
-		pr_err("offset[0x%x] values [0x%x]\n",
+		CAM_ERR(CAM_CPAS, "offset[0x%x] values [0x%x]",
 			camnoc_info->error_logger[i], reg_value[0]);
 	}
 
@@ -178,7 +180,8 @@
 	reg_value = cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
 		camnoc_info->irq_err[i].err_status.offset);
 
-	pr_err("Dumping ubwc error status [%d]: offset[0x%x] value[0x%x]\n",
+	CAM_ERR(CAM_CPAS,
+		"Dumping ubwc error status [%d]: offset[0x%x] value[0x%x]",
 		i, camnoc_info->irq_err[i].err_status.offset, reg_value);
 
 	return reg_value;
@@ -186,7 +189,7 @@
 
 static int cam_cpastop_handle_ahb_timeout_err(struct cam_hw_info *cpas_hw)
 {
-	pr_err("ahb timout error\n");
+	CAM_ERR(CAM_CPAS, "ahb timout error");
 
 	return 0;
 }
@@ -229,7 +232,8 @@
 	int i;
 	struct cam_cpas_client *cpas_client;
 
-	CPAS_CDBG("Notify CB : num_clients=%d, registered=%d, started=%d\n",
+	CAM_DBG(CAM_CPAS,
+		"Notify CB : num_clients=%d, registered=%d, started=%d",
 		cpas_core->num_clients, cpas_core->registered_clients,
 		cpas_core->streamon_clients);
 
@@ -237,7 +241,8 @@
 		if (CAM_CPAS_CLIENT_STARTED(cpas_core, i)) {
 			cpas_client = cpas_core->cpas_client[i];
 			if (cpas_client->data.cam_cpas_client_cb) {
-				CPAS_CDBG("Calling client CB %d : %d 0x%x\n",
+				CAM_DBG(CAM_CPAS,
+					"Calling client CB %d : %d 0x%x",
 					i, irq_type, irq_data);
 				cpas_client->data.cam_cpas_client_cb(
 					cpas_client->data.client_handle,
@@ -261,7 +266,7 @@
 
 	payload = container_of(work, struct cam_cpas_work_payload, work);
 	if (!payload) {
-		pr_err("NULL payload");
+		CAM_ERR(CAM_CPAS, "NULL payload");
 		return;
 	}
 
@@ -273,7 +278,7 @@
 		if ((payload->irq_status & camnoc_info->irq_err[i].sbm_port) &&
 			(camnoc_info->irq_err[i].enable)) {
 			irq_type = camnoc_info->irq_err[i].irq_type;
-			pr_err("Error occurred, type=%d\n", irq_type);
+			CAM_ERR(CAM_CPAS, "Error occurred, type=%d", irq_type);
 			irq_data = 0;
 
 			switch (irq_type) {
@@ -293,10 +298,10 @@
 					cpas_hw);
 				break;
 			case CAM_CAMNOC_HW_IRQ_CAMNOC_TEST:
-				CPAS_CDBG("TEST IRQ\n");
+				CAM_DBG(CAM_CPAS, "TEST IRQ");
 				break;
 			default:
-				pr_err("Invalid IRQ type\n");
+				CAM_ERR(CAM_CPAS, "Invalid IRQ type");
 				break;
 			}
 
@@ -309,7 +314,7 @@
 	}
 
 	if (payload->irq_status)
-		pr_err("IRQ not handled irq_status=0x%x\n",
+		CAM_ERR(CAM_CPAS, "IRQ not handled irq_status=0x%x",
 			payload->irq_status);
 
 	kfree(payload);
@@ -331,7 +336,7 @@
 		soc_info->reg_map[camnoc_index].mem_base +
 		camnoc_info->irq_sbm->sbm_status.offset);
 
-	CPAS_CDBG("IRQ callback, irq_status=0x%x\n", payload->irq_status);
+	CAM_DBG(CAM_CPAS, "IRQ callback, irq_status=0x%x", payload->irq_status);
 
 	payload->hw = cpas_hw;
 	INIT_WORK((struct work_struct *)&payload->work, cam_cpastop_work);
@@ -396,7 +401,8 @@
 			CAM_CPAS_POLL_RETRY_CNT,
 			CAM_CPAS_POLL_MIN_USECS, CAM_CPAS_POLL_MAX_USECS);
 		if (rc) {
-			pr_err("camnoc flush slave pending trans failed\n");
+			CAM_ERR(CAM_CPAS,
+				"camnoc flush slave pending trans failed");
 			/* Do not return error, passthrough */
 		}
 	}
@@ -415,14 +421,14 @@
 			(hw_caps->cpas_version.incr == 0)) {
 			camnoc_info = &cam170_cpas100_camnoc_info;
 		} else {
-			pr_err("CPAS Version not supported %d.%d.%d\n",
+			CAM_ERR(CAM_CPAS, "CPAS Version not supported %d.%d.%d",
 				hw_caps->cpas_version.major,
 				hw_caps->cpas_version.minor,
 				hw_caps->cpas_version.incr);
 			return -EINVAL;
 		}
 	} else {
-		pr_err("Camera Version not supported %d.%d.%d\n",
+		CAM_ERR(CAM_CPAS, "Camera Version not supported %d.%d.%d",
 			hw_caps->camera_version.major,
 			hw_caps->camera_version.minor,
 			hw_caps->camera_version.incr);
@@ -435,7 +441,7 @@
 int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
 {
 	if (!internal_ops) {
-		pr_err("invalid NULL param\n");
+		CAM_ERR(CAM_CPAS, "invalid NULL param");
 		return -EINVAL;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
index 801d09d..3977b68 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
@@ -94,7 +94,7 @@
 	struct device  *dev;
 	void           *userdata;
 	void          (*cam_cpas_client_cb)(
-			int32_t                   client_handle,
+			uint32_t                  client_handle,
 			void                     *userdata,
 			enum cam_camnoc_irq_type  event_type,
 			uint32_t                  event_data);
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
index 776847d..15bd98c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "CAM-ICP-CTXT %s:%d " fmt, __func__, __LINE__
-
 #include <linux/debugfs.h>
 #include <linux/videodev2.h>
 #include <linux/slab.h>
@@ -26,6 +24,7 @@
 #include "cam_req_mgr_util.h"
 #include "cam_mem_mgr.h"
 #include "cam_trace.h"
+#include "cam_debug_util.h"
 
 static int __cam_icp_acquire_dev_in_available(struct cam_context *ctx,
 	struct cam_acquire_dev_cmd *cmd)
@@ -48,7 +47,7 @@
 
 	rc = cam_context_release_dev_to_hw(ctx, cmd);
 	if (rc)
-		pr_err("Unable to release device\n");
+		CAM_ERR(CAM_ICP, "Unable to release device");
 
 	ctx->state = CAM_CTX_AVAILABLE;
 	trace_cam_context_state("ICP", ctx);
@@ -76,7 +75,7 @@
 
 	rc = cam_context_prepare_dev_to_hw(ctx, cmd);
 	if (rc)
-		pr_err("Unable to prepare device\n");
+		CAM_ERR(CAM_ICP, "Failed to prepare device");
 
 	return rc;
 }
@@ -88,7 +87,7 @@
 
 	rc = cam_context_stop_dev_to_hw(ctx);
 	if (rc)
-		pr_err("Unable to stop device\n");
+		CAM_ERR(CAM_ICP, "Failed to stop device");
 
 	ctx->state = CAM_CTX_ACQUIRED;
 	trace_cam_context_state("ICP", ctx);
@@ -102,11 +101,11 @@
 
 	rc = __cam_icp_stop_dev_in_ready(ctx, NULL);
 	if (rc)
-		pr_err("Unable to stop device\n");
+		CAM_ERR(CAM_ICP, "Failed to stop device");
 
 	rc = __cam_icp_release_dev_in_acquired(ctx, cmd);
 	if (rc)
-		pr_err("Unable to stop device\n");
+		CAM_ERR(CAM_ICP, "Failed to release device");
 
 	return rc;
 }
@@ -167,7 +166,7 @@
 	int rc;
 
 	if ((!ctx) || (!ctx->base) || (!hw_intf)) {
-		pr_err("Invalid params: %pK %pK\n", ctx, hw_intf);
+		CAM_ERR(CAM_ICP, "Invalid params: %pK %pK", ctx, hw_intf);
 		rc = -EINVAL;
 		goto err;
 	}
@@ -175,7 +174,7 @@
 	rc = cam_context_init(ctx->base, NULL, hw_intf, ctx->req_base,
 		CAM_CTX_REQ_MAX);
 	if (rc) {
-		pr_err("Camera Context Base init failed!\n");
+		CAM_ERR(CAM_ICP, "Camera Context Base init failed");
 		goto err;
 	}
 
@@ -190,7 +189,7 @@
 int cam_icp_context_deinit(struct cam_icp_context *ctx)
 {
 	if ((!ctx) || (!ctx->base)) {
-		pr_err("Invalid params: %pK\n", ctx);
+		CAM_ERR(CAM_ICP, "Invalid params: %pK", ctx);
 		return -EINVAL;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
index 69c2e03..bbdff27 100644
--- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "CAM-ICP %s:%d " fmt, __func__, __LINE__
-
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/of.h>
@@ -36,6 +34,7 @@
 #include "cam_icp_context.h"
 #include "cam_hw_mgr_intf.h"
 #include "cam_icp_hw_mgr_intf.h"
+#include "cam_debug_util.h"
 
 #define CAM_ICP_DEV_NAME        "cam-icp"
 
@@ -65,13 +64,13 @@
 
 	mutex_lock(&g_icp_dev.icp_lock);
 	if (g_icp_dev.open_cnt >= 1) {
-		pr_err("ICP subdev is already opened\n");
+		CAM_ERR(CAM_ICP, "ICP subdev is already opened");
 		rc = -EALREADY;
 		goto end;
 	}
 
 	if (!node) {
-		pr_err("Invalid args\n");
+		CAM_ERR(CAM_ICP, "Invalid args");
 		rc = -EINVAL;
 		goto end;
 	}
@@ -79,7 +78,7 @@
 	hw_mgr_intf = &node->hw_mgr_intf;
 	rc = hw_mgr_intf->download_fw(hw_mgr_intf->hw_mgr_priv, NULL);
 	if (rc < 0) {
-		pr_err("FW download failed\n");
+		CAM_ERR(CAM_ICP, "FW download failed");
 		goto end;
 	}
 	g_icp_dev.open_cnt++;
@@ -97,27 +96,27 @@
 
 	mutex_lock(&g_icp_dev.icp_lock);
 	if (g_icp_dev.open_cnt <= 0) {
-		pr_err("ICP subdev is already closed\n");
+		CAM_ERR(CAM_ICP, "ICP subdev is already closed");
 		rc = -EINVAL;
 		goto end;
 	}
 	g_icp_dev.open_cnt--;
 	if (!node) {
-		pr_err("Invalid args\n");
+		CAM_ERR(CAM_ICP, "Invalid args");
 		rc = -EINVAL;
 		goto end;
 	}
 
 	hw_mgr_intf = &node->hw_mgr_intf;
 	if (!hw_mgr_intf) {
-		pr_err("hw_mgr_intf is not initialized\n");
+		CAM_ERR(CAM_ICP, "hw_mgr_intf is not initialized");
 		rc = -EINVAL;
 		goto end;
 	}
 
 	rc = hw_mgr_intf->hw_close(hw_mgr_intf->hw_mgr_priv, NULL);
 	if (rc < 0) {
-		pr_err("HW close failed\n");
+		CAM_ERR(CAM_ICP, "HW close failed");
 		goto end;
 	}
 
@@ -138,7 +137,7 @@
 	struct cam_hw_mgr_intf *hw_mgr_intf;
 
 	if (!pdev) {
-		pr_err("pdev is NULL\n");
+		CAM_ERR(CAM_ICP, "pdev is NULL");
 		return -EINVAL;
 	}
 
@@ -147,7 +146,7 @@
 	rc = cam_subdev_probe(&g_icp_dev.sd, pdev, CAM_ICP_DEV_NAME,
 		CAM_ICP_DEVICE_TYPE);
 	if (rc) {
-		pr_err("ICP cam_subdev_probe failed!\n");
+		CAM_ERR(CAM_ICP, "ICP cam_subdev_probe failed");
 		goto probe_fail;
 	}
 
@@ -161,26 +160,24 @@
 
 	rc = cam_icp_hw_mgr_init(pdev->dev.of_node, (uint64_t *)hw_mgr_intf);
 	if (rc) {
-		pr_err("ICP HW manager init failed: %d\n", rc);
+		CAM_ERR(CAM_ICP, "ICP HW manager init failed: %d", rc);
 		goto hw_init_fail;
 	}
 
-	pr_debug("Initializing the ICP contexts\n");
 	for (i = 0; i < CAM_CTX_MAX; i++) {
 		g_icp_dev.ctx_icp[i].base = &g_icp_dev.ctx[i];
 		rc = cam_icp_context_init(&g_icp_dev.ctx_icp[i],
 					hw_mgr_intf);
 		if (rc) {
-			pr_err("ICP context init failed!\n");
+			CAM_ERR(CAM_ICP, "ICP context init failed");
 			goto ctx_fail;
 		}
 	}
 
-	pr_debug("Initializing the ICP Node\n");
 	rc = cam_node_init(node, hw_mgr_intf, g_icp_dev.ctx,
 				CAM_CTX_MAX, CAM_ICP_DEV_NAME);
 	if (rc) {
-		pr_err("ICP node init failed!\n");
+		CAM_ERR(CAM_ICP, "ICP node init failed");
 		goto ctx_fail;
 	}
 
@@ -207,20 +204,20 @@
 	struct cam_subdev *subdev;
 
 	if (!pdev) {
-		pr_err("pdev is NULL\n");
-		return -EINVAL;
+		CAM_ERR(CAM_ICP, "pdev is NULL");
+		return -ENODEV;
 	}
 
 	sd = platform_get_drvdata(pdev);
 	if (!sd) {
-		pr_err("V4l2 subdev is NULL\n");
-		return -EINVAL;
+		CAM_ERR(CAM_ICP, "V4l2 subdev is NULL");
+		return -ENODEV;
 	}
 
 	subdev = v4l2_get_subdevdata(sd);
 	if (!subdev) {
-		pr_err("cam subdev is NULL\n");
-		return -EINVAL;
+		CAM_ERR(CAM_ICP, "cam subdev is NULL");
+		return -ENODEV;
 	}
 
 	for (i = 0; i < CAM_CTX_MAX; i++)
diff --git a/drivers/media/platform/msm/camera/cam_icp/hfi.c b/drivers/media/platform/msm/camera/cam_icp/hfi.c
index b763a39..48e1f1c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/hfi.c
+++ b/drivers/media/platform/msm/camera/cam_icp/hfi.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "HFI-FW %s:%d " fmt, __func__, __LINE__
-
 #include <linux/io.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
@@ -27,6 +25,7 @@
 #include "hfi_session_defs.h"
 #include "hfi_intf.h"
 #include "cam_icp_hw_mgr_intf.h"
+#include "cam_debug_util.h"
 
 #define HFI_VERSION_INFO_MAJOR_VAL  1
 #define HFI_VERSION_INFO_MINOR_VAL  1
@@ -39,11 +38,10 @@
 #define HFI_VERSION_INFO_STEP_BMSK   0xFF
 #define HFI_VERSION_INFO_STEP_SHFT  0
 
-#undef  HFI_DBG
-#define HFI_DBG(fmt, args...) pr_debug(fmt, ##args)
-
 static struct hfi_info *g_hfi;
 unsigned int g_icp_mmu_hdl;
+static DEFINE_MUTEX(hfi_cmd_q_mutex);
+static DEFINE_MUTEX(hfi_msg_q_mutex);
 
 int hfi_write_cmd(void *cmd_ptr)
 {
@@ -52,23 +50,25 @@
 	struct hfi_qtbl *q_tbl;
 	struct hfi_q_hdr *q;
 	int rc = 0;
-	int i = 0;
 
 	if (!cmd_ptr) {
-		pr_err("Invalid args\n");
+		CAM_ERR(CAM_HFI, "command is null");
 		return -EINVAL;
 	}
 
-	if (!g_hfi || (g_hfi->hfi_state != HFI_READY)) {
-		pr_err("HFI interface not ready yet\n");
-		return -EIO;
+	mutex_lock(&hfi_cmd_q_mutex);
+	if (!g_hfi) {
+		CAM_ERR(CAM_HFI, "HFI interface not setup");
+		rc = -ENODEV;
+		goto err;
 	}
 
-	mutex_lock(&g_hfi->cmd_q_lock);
-	if (!g_hfi->cmd_q_state) {
-		pr_err("HFI command interface not ready yet\n");
-		mutex_unlock(&g_hfi->cmd_q_lock);
-		return -EIO;
+	if (g_hfi->hfi_state != HFI_READY ||
+		!g_hfi->cmd_q_state) {
+		CAM_ERR(CAM_HFI, "HFI state: %u, cmd q state: %u",
+			g_hfi->hfi_state, g_hfi->cmd_q_state);
+		rc = -ENODEV;
+		goto err;
 	}
 
 	q_tbl = (struct hfi_qtbl *)g_hfi->map.qtbl.kva;
@@ -78,24 +78,20 @@
 
 	size_in_words = (*(uint32_t *)cmd_ptr) >> BYTE_WORD_SHIFT;
 	if (!size_in_words) {
-		pr_debug("failed");
+		CAM_DBG(CAM_HFI, "failed");
 		rc = -EINVAL;
 		goto err;
 	}
 
-	HFI_DBG("size_in_words : %u, q->qhdr_write_idx %x\n", size_in_words,
-		q->qhdr_write_idx);
-
 	read_idx = q->qhdr_read_idx;
 	empty_space = (q->qhdr_write_idx >= read_idx) ?
 		(q->qhdr_q_size - (q->qhdr_write_idx - read_idx)) :
 		(read_idx - q->qhdr_write_idx);
 	if (empty_space <= size_in_words) {
-		pr_err("failed");
+		CAM_ERR(CAM_HFI, "failed");
 		rc = -EIO;
 		goto err;
 	}
-	HFI_DBG("empty_space : %u\n", empty_space);
 
 	new_write_idx = q->qhdr_write_idx + size_in_words;
 	write_ptr = (uint32_t *)(write_q + q->qhdr_write_idx);
@@ -110,15 +106,12 @@
 		memcpy(write_q, (uint8_t *)cmd_ptr + temp,
 			new_write_idx << BYTE_WORD_SHIFT);
 	}
-	for (i = 0; i < size_in_words; i++)
-		pr_debug("%x\n", write_ptr[i]);
 
 	q->qhdr_write_idx = new_write_idx;
-	HFI_DBG("q->qhdr_write_idx %x\n", q->qhdr_write_idx);
 	cam_io_w((uint32_t)INTR_ENABLE,
 		g_hfi->csr_base + HFI_REG_A5_CSR_HOST2ICPINT);
 err:
-	mutex_unlock(&g_hfi->cmd_q_lock);
+	mutex_unlock(&hfi_cmd_q_mutex);
 	return rc;
 }
 
@@ -129,32 +122,40 @@
 	uint32_t new_read_idx, size_in_words, temp;
 	uint32_t *read_q, *read_ptr;
 	int rc = 0;
-	int i = 0;
 
-	if (!pmsg || q_id > Q_DBG) {
-		pr_err("Inavlid args\n");
+	if (!pmsg) {
+		CAM_ERR(CAM_HFI, "Invalid msg");
 		return -EINVAL;
 	}
 
-	if (!g_hfi || (g_hfi->hfi_state != HFI_READY)) {
-		pr_err("HFI interface not ready yet\n");
-		return -EIO;
+	if (q_id > Q_DBG) {
+		CAM_ERR(CAM_HFI, "Inavlid q :%u", q_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hfi_msg_q_mutex);
+	if (!g_hfi) {
+		CAM_ERR(CAM_HFI, "hfi not set up yet");
+		rc = -ENODEV;
+		goto err;
+	}
+
+	if ((g_hfi->hfi_state != HFI_READY) ||
+		!g_hfi->msg_q_state) {
+		CAM_ERR(CAM_HFI, "hfi state: %u, msg q state: %u",
+			g_hfi->hfi_state, g_hfi->msg_q_state);
+		rc = -ENODEV;
+		goto err;
 	}
 
 	q_tbl_ptr = (struct hfi_qtbl *)g_hfi->map.qtbl.kva;
 	q = &q_tbl_ptr->q_hdr[q_id];
 
 	if (q->qhdr_read_idx == q->qhdr_write_idx) {
-		pr_debug("FW or Q not ready, hfi state : %u, r idx : %u, w idx : %u\n",
+		CAM_DBG(CAM_HFI, "Q not ready, state:%u, r idx:%u, w idx:%u",
 			g_hfi->hfi_state, q->qhdr_read_idx, q->qhdr_write_idx);
-		return -EIO;
-	}
-
-	mutex_lock(&g_hfi->msg_q_lock);
-	if (!g_hfi->msg_q_state) {
-		pr_err("HFI message interface not ready yet\n");
-		mutex_unlock(&g_hfi->msg_q_lock);
-		return -EIO;
+		rc = -EIO;
+		goto err;
 	}
 
 	if (q_id == Q_MSG)
@@ -165,12 +166,9 @@
 	read_ptr = (uint32_t *)(read_q + q->qhdr_read_idx);
 	size_in_words = (*read_ptr) >> BYTE_WORD_SHIFT;
 
-	HFI_DBG("size_in_words : %u, read_ptr : %pK\n", size_in_words,
-		(void *)read_ptr);
-
 	if ((size_in_words == 0) ||
 		(size_in_words > ICP_HFI_MAX_MSG_SIZE_IN_WORDS)) {
-		pr_err("invalid HFI message packet size - 0x%08x\n",
+		CAM_ERR(CAM_HFI, "invalid HFI message packet size - 0x%08x",
 			size_in_words << BYTE_WORD_SHIFT);
 		q->qhdr_read_idx = q->qhdr_write_idx;
 		rc = -EIO;
@@ -178,7 +176,6 @@
 	}
 
 	new_read_idx = q->qhdr_read_idx + size_in_words;
-	HFI_DBG("new_read_idx : %u\n", new_read_idx);
 
 	if (new_read_idx < q->qhdr_q_size) {
 		memcpy(pmsg, read_ptr, size_in_words << BYTE_WORD_SHIFT);
@@ -190,12 +187,9 @@
 			new_read_idx << BYTE_WORD_SHIFT);
 	}
 
-	for (i = 0; i < size_in_words; i++)
-		HFI_DBG("%x\n", read_ptr[i]);
-
 	q->qhdr_read_idx = new_read_idx;
 err:
-	mutex_unlock(&g_hfi->msg_q_lock);
+	mutex_unlock(&hfi_msg_q_mutex);
 	return rc;
 }
 
@@ -265,7 +259,7 @@
 	case HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT:
 		break;
 	default:
-		pr_err("command not supported :%d\n", type);
+		CAM_ERR(CAM_HFI, "command not supported :%d", type);
 		break;
 	}
 }
@@ -277,7 +271,7 @@
 	struct cam_icp_query_cap_cmd *query_cmd = NULL;
 
 	if (!query_buf) {
-		pr_err("%s: query buf is NULL\n", __func__);
+		CAM_ERR(CAM_HFI, "query buf is NULL");
 		return -EINVAL;
 	}
 
@@ -332,6 +326,9 @@
 	struct hfi_q_hdr *cmd_q_hdr, *msg_q_hdr, *dbg_q_hdr;
 	uint32_t hw_version, fw_version, status = 0;
 
+	mutex_lock(&hfi_cmd_q_mutex);
+	mutex_lock(&hfi_msg_q_mutex);
+
 	if (!g_hfi) {
 		g_hfi = kzalloc(sizeof(struct hfi_info), GFP_KERNEL);
 		if (!g_hfi) {
@@ -340,13 +337,13 @@
 		}
 	}
 
-	HFI_DBG("g_hfi: %pK\n", (void *)g_hfi);
 	if (g_hfi->hfi_state != HFI_DEINIT) {
-		pr_err("hfi_init: invalid state\n");
+		CAM_ERR(CAM_HFI, "hfi_init: invalid state");
 		return -EINVAL;
 	}
 
 	memcpy(&g_hfi->map, hfi_mem, sizeof(g_hfi->map));
+	g_hfi->hfi_state = HFI_DEINIT;
 
 	if (debug) {
 		cam_io_w_mb(
@@ -373,7 +370,6 @@
 	qtbl_hdr->qtbl_num_active_q = ICP_HFI_NUMBER_OF_QS;
 
 	/* setup host-to-firmware command queue */
-	pr_debug("updating the command queue info\n");
 	cmd_q_hdr = &qtbl->q_hdr[Q_CMD];
 	cmd_q_hdr->qhdr_status = QHDR_ACTIVE;
 	cmd_q_hdr->qhdr_start_addr = hfi_mem->cmd_q.iova;
@@ -384,7 +380,6 @@
 	cmd_q_hdr->qhdr_write_idx = RESET;
 
 	/* setup firmware-to-Host message queue */
-	pr_debug("updating the message queue info\n");
 	msg_q_hdr = &qtbl->q_hdr[Q_MSG];
 	msg_q_hdr->qhdr_status = QHDR_ACTIVE;
 	msg_q_hdr->qhdr_start_addr = hfi_mem->msg_q.iova;
@@ -395,7 +390,6 @@
 	msg_q_hdr->qhdr_write_idx = RESET;
 
 	/* setup firmware-to-Host message queue */
-	pr_debug("updating the debug queue info\n");
 	dbg_q_hdr = &qtbl->q_hdr[Q_DBG];
 	dbg_q_hdr->qhdr_status = QHDR_ACTIVE;
 	dbg_q_hdr->qhdr_start_addr = hfi_mem->dbg_q.iova;
@@ -404,7 +398,6 @@
 	dbg_q_hdr->qhdr_pkt_drop_cnt = RESET;
 	dbg_q_hdr->qhdr_read_idx = RESET;
 	dbg_q_hdr->qhdr_write_idx = RESET;
-	pr_debug("Done updating the debug queue info\n");
 
 	switch (event_driven_mode) {
 	case INTR_MODE:
@@ -473,7 +466,8 @@
 		break;
 
 	default:
-		pr_err("Invalid event driven mode :%u", event_driven_mode);
+		CAM_ERR(CAM_HFI, "Invalid event driven mode :%u",
+			event_driven_mode);
 		break;
 	}
 
@@ -490,56 +484,58 @@
 		icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
 
 	hw_version = cam_io_r(icp_base + HFI_REG_A5_HW_VERSION);
-	HFI_DBG("hw version : [%x]\n", hw_version);
 
 	rc = readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
 		status, status != ICP_INIT_RESP_SUCCESS, 15, 200);
 	if (rc) {
-		pr_err("timed out , status = %u\n", status);
+		CAM_ERR(CAM_HFI, "timed out , status = %u", status);
 		goto regions_fail;
 	}
 
 	fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
-	HFI_DBG("fw version : %u[%x]\n", fw_version, fw_version);
+	CAM_DBG(CAM_HFI, "hw version : : [%x], fw version : [%x]",
+		hw_version, fw_version);
 
 	g_hfi->csr_base = icp_base;
 	g_hfi->hfi_state = HFI_READY;
 	g_hfi->cmd_q_state = true;
 	g_hfi->msg_q_state = true;
-	mutex_init(&g_hfi->cmd_q_lock);
-	mutex_init(&g_hfi->msg_q_lock);
 	cam_io_w((uint32_t)INTR_ENABLE, icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
 
+	mutex_unlock(&hfi_cmd_q_mutex);
+	mutex_unlock(&hfi_msg_q_mutex);
+
 	return rc;
 regions_fail:
 	kfree(g_hfi);
 alloc_fail:
+	mutex_unlock(&hfi_cmd_q_mutex);
+	mutex_unlock(&hfi_msg_q_mutex);
 	return rc;
 }
 
 
 void cam_hfi_deinit(void)
 {
+	mutex_lock(&hfi_cmd_q_mutex);
+	mutex_lock(&hfi_msg_q_mutex);
+
 	if (!g_hfi) {
-		pr_err("hfi path not established yet\n");
-		return;
+		CAM_ERR(CAM_HFI, "hfi path not established yet");
+		goto err;
 	}
+
+	g_hfi->cmd_q_state = false;
+	g_hfi->msg_q_state = false;
+
 	cam_io_w((uint32_t)INTR_DISABLE,
 		g_hfi->csr_base + HFI_REG_A5_CSR_A2HOSTINTEN);
-
-	mutex_lock(&g_hfi->cmd_q_lock);
-	g_hfi->cmd_q_state = false;
-	mutex_unlock(&g_hfi->cmd_q_lock);
-
-	mutex_lock(&g_hfi->msg_q_lock);
-	g_hfi->msg_q_state = false;
-	mutex_unlock(&g_hfi->msg_q_lock);
-
-	mutex_destroy(&g_hfi->cmd_q_lock);
-	mutex_destroy(&g_hfi->msg_q_lock);
-
-	kfree(g_hfi);
+	kzfree(g_hfi);
 	g_hfi = NULL;
+
+err:
+	mutex_unlock(&hfi_cmd_q_mutex);
+	mutex_unlock(&hfi_msg_q_mutex);
 }
 
 void icp_enable_fw_debug(void)
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
index 9f6f940..e200f6f 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "A5-CORE %s:%d " fmt, __func__, __LINE__
-
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/debugfs.h>
@@ -35,6 +33,7 @@
 #include "hfi_sys_defs.h"
 #include "cam_icp_hw_mgr_intf.h"
 #include "cam_cpas_api.h"
+#include "cam_debug_util.h"
 
 static int cam_a5_cpas_vote(struct cam_a5_device_core_info *core_info,
 	struct cam_icp_cpas_vote *cpas_vote)
@@ -50,7 +49,7 @@
 			&cpas_vote->axi_vote);
 
 	if (rc)
-		pr_err("cpas vote is failed: %d\n", rc);
+		CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
 
 	return rc;
 }
@@ -60,26 +59,26 @@
 	struct elf32_hdr *elf_hdr;
 
 	if (!elf) {
-		pr_err("Invalid params\n");
+		CAM_ERR(CAM_ICP, "Invalid params");
 		return -EINVAL;
 	}
 
 	elf_hdr = (struct elf32_hdr *)elf;
 
 	if (memcmp(elf_hdr->e_ident, ELFMAG, SELFMAG)) {
-		pr_err("ICP elf identifier is failed\n");
+		CAM_ERR(CAM_ICP, "ICP elf identifier is failed");
 		return -EINVAL;
 	}
 
 	/* check architecture */
 	if (elf_hdr->e_machine != EM_ARM) {
-		pr_err("unsupported arch\n");
+		CAM_ERR(CAM_ICP, "unsupported arch");
 		return -EINVAL;
 	}
 
 	/* check elf bit format */
 	if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
-		pr_err("elf doesn't support 32 bit format\n");
+		CAM_ERR(CAM_ICP, "elf doesn't support 32 bit format");
 		return -EINVAL;
 	}
 
@@ -97,7 +96,7 @@
 	struct elf32_phdr *prg_hdr;
 
 	if (!elf || !fw_size) {
-		pr_err("invalid args\n");
+		CAM_ERR(CAM_ICP, "invalid args");
 		return -EINVAL;
 	}
 
@@ -109,11 +108,11 @@
 	prg_hdr = (struct elf32_phdr *)&icp_prg_hdr_tbl[0];
 
 	if (!prg_hdr) {
-		pr_err("failed to get elf program header attr\n");
+		CAM_ERR(CAM_ICP, "failed to get elf program header attr");
 		return -EINVAL;
 	}
 
-	pr_debug("num_prg_hdrs = %d\n", num_prg_hdrs);
+	CAM_DBG(CAM_ICP, "num_prg_hdrs = %d", num_prg_hdrs);
 	for (i = 0; i < num_prg_hdrs; i++, prg_hdr++) {
 		if (prg_hdr->p_flags == 0)
 			continue;
@@ -121,7 +120,7 @@
 		seg_mem_size = (prg_hdr->p_memsz + prg_hdr->p_align - 1) &
 					~(prg_hdr->p_align - 1);
 		seg_mem_size += prg_hdr->p_vaddr;
-		pr_debug("p_memsz = %x p_align = %x p_vaddr = %x seg_mem_size = %x\n",
+		CAM_DBG(CAM_ICP, "memsz:%x align:%x addr:%x seg_mem_size:%x",
 			(int)prg_hdr->p_memsz, (int)prg_hdr->p_align,
 			(int)prg_hdr->p_vaddr, (int)seg_mem_size);
 		if (*fw_size < seg_mem_size)
@@ -130,7 +129,7 @@
 	}
 
 	if (*fw_size == 0) {
-		pr_err("invalid elf fw file\n");
+		CAM_ERR(CAM_ICP, "invalid elf fw file");
 		return -EINVAL;
 	}
 
@@ -155,7 +154,7 @@
 	prg_hdr = (struct elf32_phdr *)&icp_prg_hdr_tbl[0];
 
 	if (!prg_hdr) {
-		pr_err("failed to get elf program header attr\n");
+		CAM_ERR(CAM_ICP, "failed to get elf program header attr");
 		return -EINVAL;
 	}
 
@@ -163,15 +162,14 @@
 		if (prg_hdr->p_flags == 0)
 			continue;
 
-		pr_debug("Loading FW header size: %u\n", prg_hdr->p_filesz);
+		CAM_DBG(CAM_ICP, "Loading FW header size: %u",
+			prg_hdr->p_filesz);
 		if (prg_hdr->p_filesz != 0) {
 			src = (u8 *)((u8 *)elf + prg_hdr->p_offset);
 			dest = (u8 *)(((u8 *)core_info->fw_kva_addr) +
 						prg_hdr->p_vaddr);
 
 			memcpy_toio(dest, src, prg_hdr->p_filesz);
-			pr_debug("fw kva: %pK, p_vaddr: 0x%x\n",
-					dest, prg_hdr->p_vaddr);
 		}
 	}
 
@@ -191,7 +189,7 @@
 	struct a5_soc_info *cam_a5_soc_info = NULL;
 
 	if (!device_priv) {
-		pr_err("Invalid cam_dev_info\n");
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
 		return -EINVAL;
 	}
 
@@ -202,44 +200,38 @@
 	cam_a5_soc_info = soc_info->soc_private;
 
 	rc = request_firmware(&core_info->fw_elf, "CAMERA_ICP.elf", &pdev->dev);
-	pr_debug("request_firmware: %d\n", rc);
-	if (rc < 0) {
-		pr_err("Failed to locate fw\n");
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Failed to locate fw: %d", rc);
 		return rc;
 	}
 
 	if (!core_info->fw_elf) {
-		pr_err("request_firmware is failed\n");
+		CAM_ERR(CAM_ICP, "Invalid elf size");
 		return -EINVAL;
 	}
 
 	fw_start = core_info->fw_elf->data;
 	rc = cam_icp_validate_fw(fw_start);
-	if (rc < 0) {
-		pr_err("fw elf validation failed\n");
+	if (rc) {
+		CAM_ERR(CAM_ICP, "fw elf validation failed");
 		return -EINVAL;
 	}
 
 	rc = cam_icp_get_fw_size(fw_start, &fw_size);
-	if (rc < 0) {
-		pr_err("unable to get fw file size\n");
+	if (rc) {
+		CAM_ERR(CAM_ICP, "unable to get fw size");
 		return rc;
 	}
-	pr_debug("cam_icp_get_fw_size: %u\n", fw_size);
-
-	/* Check FW firmware memory allocation is OK or not */
-	pr_debug("cam_icp_get_fw_size: %u %llu\n",
-		fw_size, core_info->fw_buf_len);
 
 	if (core_info->fw_buf_len < fw_size) {
-		pr_err("fw allocation failed\n");
+		CAM_ERR(CAM_ICP, "mismatch in fw size: %u %llu",
+			fw_size, core_info->fw_buf_len);
 		goto fw_alloc_failed;
 	}
 
-	/* download fw */
 	rc = cam_icp_program_fw(fw_start, core_info);
-	if (rc < 0) {
-		pr_err("fw program is failed\n");
+	if (rc) {
+		CAM_ERR(CAM_ICP, "fw program is failed");
 		goto fw_program_failed;
 	}
 
@@ -259,7 +251,7 @@
 	int rc = 0;
 
 	if (!device_priv) {
-		pr_err("Invalid cam_dev_info\n");
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
 		return -EINVAL;
 	}
 
@@ -267,7 +259,8 @@
 	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
 
 	if ((!soc_info) || (!core_info)) {
-		pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+		CAM_ERR(CAM_ICP, "soc_info: %pK core_info: %pK",
+			soc_info, core_info);
 		return -EINVAL;
 	}
 
@@ -279,16 +272,16 @@
 	rc = cam_cpas_start(core_info->cpas_handle,
 		&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
 	if (rc) {
-		pr_err("cpass start failed: %d\n", rc);
+		CAM_ERR(CAM_ICP, "cpass start failed: %d", rc);
 		return rc;
 	}
 	core_info->cpas_start = true;
 
 	rc = cam_a5_enable_soc_resources(soc_info);
 	if (rc) {
-		pr_err("soc enable is failed: %d\n", rc);
+		CAM_ERR(CAM_ICP, "soc enable is failed: %d", rc);
 		if (cam_cpas_stop(core_info->cpas_handle))
-			pr_err("cpas stop is failed\n");
+			CAM_ERR(CAM_ICP, "cpas stop is failed");
 		else
 			core_info->cpas_start = false;
 	}
@@ -305,24 +298,25 @@
 	int rc = 0;
 
 	if (!device_priv) {
-		pr_err("Invalid cam_dev_info\n");
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
 		return -EINVAL;
 	}
 
 	soc_info = &a5_dev->soc_info;
 	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
 	if ((!soc_info) || (!core_info)) {
-		pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+		CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
 		return -EINVAL;
 	}
 
 	rc = cam_a5_disable_soc_resources(soc_info);
 	if (rc)
-		pr_err("soc disable is failed: %d\n", rc);
+		CAM_ERR(CAM_ICP, "soc disable is failed: %d", rc);
 
 	if (core_info->cpas_start) {
 		if (cam_cpas_stop(core_info->cpas_handle))
-			pr_err("cpas stop is failed\n");
+			CAM_ERR(CAM_ICP, "cpas stop is failed");
 		else
 			core_info->cpas_start = false;
 	}
@@ -339,7 +333,7 @@
 	uint32_t irq_status = 0;
 
 	if (!data) {
-		pr_err("Invalid cam_dev_info or query_cap args\n");
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info or query_cap args");
 		return IRQ_HANDLED;
 	}
 
@@ -354,18 +348,15 @@
 			soc_info->reg_map[A5_SIERRA_BASE].mem_base +
 			core_info->a5_hw_info->a5_host_int_clr);
 
-	pr_debug("irq_status = %x\n", irq_status);
-	if (irq_status & A5_HOST_INT)
-		pr_debug("A5 to Host interrupt, read msg Q\n");
-
 	if ((irq_status & A5_WDT_0) ||
 		(irq_status & A5_WDT_1)) {
-		pr_err_ratelimited("watch dog interrupt from A5\n");
+		CAM_ERR_RATE_LIMIT(CAM_ICP, "watch dog interrupt from A5");
 	}
 
 	if (core_info->irq_cb.icp_hw_mgr_cb)
 		core_info->irq_cb.icp_hw_mgr_cb(irq_status,
 					core_info->irq_cb.data);
+
 	return IRQ_HANDLED;
 }
 
@@ -379,12 +370,12 @@
 	int rc = 0;
 
 	if (!device_priv) {
-		pr_err("Invalid arguments\n");
+		CAM_ERR(CAM_ICP, "Invalid arguments");
 		return -EINVAL;
 	}
 
 	if (cmd_type >= CAM_ICP_A5_CMD_MAX) {
-		pr_err("Invalid command : %x\n", cmd_type);
+		CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
 		return -EINVAL;
 	}
 
@@ -401,7 +392,7 @@
 		struct cam_icp_a5_set_fw_buf_info *fw_buf_info = cmd_args;
 
 		if (!cmd_args) {
-			pr_err("cmd args NULL\n");
+			CAM_ERR(CAM_ICP, "cmd args NULL");
 			return -EINVAL;
 		}
 
@@ -409,15 +400,16 @@
 		core_info->fw_kva_addr = fw_buf_info->kva;
 		core_info->fw_buf_len = fw_buf_info->len;
 
-		pr_debug("fw buf info = %x %llx %lld\n", core_info->fw_buf,
-			core_info->fw_kva_addr, core_info->fw_buf_len);
+		CAM_DBG(CAM_ICP, "fw buf info = %x %llx %lld",
+			core_info->fw_buf, core_info->fw_kva_addr,
+			core_info->fw_buf_len);
 		break;
 	}
 	case CAM_ICP_A5_SET_IRQ_CB: {
 		struct cam_icp_a5_set_irq_cb *irq_cb = cmd_args;
 
 		if (!cmd_args) {
-			pr_err("cmd args NULL\n");
+			CAM_ERR(CAM_ICP, "cmd args NULL");
 			return -EINVAL;
 		}
 
@@ -433,7 +425,7 @@
 		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
 
 		if (!cmd_args) {
-			pr_err("cmd args NULL\n");
+			CAM_ERR(CAM_ICP, "cmd args NULL");
 			return -EINVAL;
 		}
 
@@ -445,7 +437,7 @@
 		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
 
 		if (!cmd_args) {
-			pr_err("cmd args NULL\n");
+			CAM_ERR(CAM_ICP, "cmd args NULL");
 			return -EINVAL;
 		}
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
index f649c3b..08b934e 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
@@ -23,6 +23,7 @@
 #include "cam_a5_hw_intf.h"
 #include "cam_icp_hw_mgr_intf.h"
 #include "cam_cpas_api.h"
+#include "cam_debug_util.h"
 
 struct a5_soc_info cam_a5_soc_info;
 EXPORT_SYMBOL(cam_a5_soc_info);
@@ -64,7 +65,7 @@
 
 	rc = cam_cpas_register_client(&cpas_register_params);
 	if (rc < 0) {
-		pr_err("cam_cpas_register_client is failed: %d\n", rc);
+		CAM_ERR(CAM_ICP, "failed: %d", rc);
 		return rc;
 	}
 
@@ -101,7 +102,7 @@
 	a5_dev_intf->hw_ops.process_cmd = cam_a5_process_cmd;
 	a5_dev_intf->hw_type = CAM_ICP_DEV_A5;
 
-	pr_debug("%s: type %d index %d\n", __func__,
+	CAM_DBG(CAM_ICP, "type %d index %d",
 		a5_dev_intf->hw_type,
 		a5_dev_intf->hw_idx);
 
@@ -118,9 +119,9 @@
 	match_dev = of_match_device(pdev->dev.driver->of_match_table,
 		&pdev->dev);
 	if (!match_dev) {
-		pr_err("%s: No a5 hardware info\n", __func__);
+		CAM_ERR(CAM_ICP, "No a5 hardware info");
 		rc = -EINVAL;
-		goto pr_err;
+		goto match_err;
 	}
 	hw_info = (struct cam_a5_device_hw_info *)match_dev->data;
 	core_info->a5_hw_info = hw_info;
@@ -130,16 +131,16 @@
 	rc = cam_a5_init_soc_resources(&a5_dev->soc_info, cam_a5_irq,
 		a5_dev);
 	if (rc < 0) {
-		pr_err("%s: failed to init_soc\n", __func__);
+		CAM_ERR(CAM_ICP, "failed to init_soc");
 		goto init_soc_failure;
 	}
 
-	pr_debug("cam_a5_init_soc_resources : %pK\n",
+	CAM_DBG(CAM_ICP, "soc info : %pK",
 				(void *)&a5_dev->soc_info);
 	rc = cam_a5_register_cpas(&a5_dev->soc_info,
 			core_info, a5_dev_intf->hw_idx);
 	if (rc < 0) {
-		pr_err("a5 cpas registration failed\n");
+		CAM_ERR(CAM_ICP, "a5 cpas registration failed");
 		goto cpas_reg_failed;
 	}
 	a5_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
@@ -147,13 +148,13 @@
 	spin_lock_init(&a5_dev->hw_lock);
 	init_completion(&a5_dev->hw_complete);
 
-	pr_debug("%s: A5%d probe successful\n", __func__,
+	CAM_DBG(CAM_ICP, "A5%d probe successful",
 		a5_dev_intf->hw_idx);
 	return 0;
 
 cpas_reg_failed:
 init_soc_failure:
-pr_err:
+match_err:
 	kfree(a5_dev->core_info);
 core_info_alloc_failure:
 	kfree(a5_dev);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
index a98f01f..f252931 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
@@ -18,9 +18,7 @@
 #include <media/cam_icp.h>
 #include "a5_soc.h"
 #include "cam_soc_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 static int cam_a5_get_dt_properties(struct cam_hw_soc_info *soc_info)
 {
@@ -35,7 +33,7 @@
 
 	rc = cam_soc_util_get_dt_properties(soc_info);
 	if (rc < 0) {
-		pr_err("%s: get a5 dt prop is failed\n", __func__);
+		CAM_ERR(CAM_ICP, "get a5 dt prop is failed");
 		return rc;
 	}
 
@@ -44,7 +42,7 @@
 
 	rc = of_property_read_string(of_node, "fw_name", &fw_name);
 	if (rc < 0)
-		pr_err("%s: fw_name read failed\n", __func__);
+		CAM_ERR(CAM_ICP, "fw_name read failed");
 
 	return rc;
 }
@@ -85,7 +83,7 @@
 	rc = cam_soc_util_enable_platform_resource(soc_info, true,
 		CAM_TURBO_VOTE, true);
 	if (rc)
-		pr_err("%s: enable platform failed\n", __func__);
+		CAM_ERR(CAM_ICP, "enable platform failed");
 
 	return rc;
 }
@@ -96,7 +94,7 @@
 
 	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
 	if (rc)
-		pr_err("%s: disable platform failed\n", __func__);
+		CAM_ERR(CAM_ICP, "disable platform failed");
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
index cabdc8a..557eaf1 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "BPS-CORE %s:%d " fmt, __func__, __LINE__
-
 #include <linux/of.h>
 #include <linux/debugfs.h>
 #include <linux/videodev2.h>
@@ -31,6 +29,7 @@
 #include "cam_icp_hw_intf.h"
 #include "cam_icp_hw_mgr_intf.h"
 #include "cam_cpas_api.h"
+#include "cam_debug_util.h"
 
 static int cam_bps_cpas_vote(struct cam_bps_device_core_info *core_info,
 			struct cam_icp_cpas_vote *cpas_vote)
@@ -45,7 +44,7 @@
 				&cpas_vote->axi_vote);
 
 	if (rc < 0)
-		pr_err("cpas vote is failed: %d\n", rc);
+		CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
 
 	return rc;
 }
@@ -61,7 +60,7 @@
 	int rc = 0;
 
 	if (!device_priv) {
-		pr_err("Invalid cam_dev_info\n");
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
 		return -EINVAL;
 	}
 
@@ -69,7 +68,8 @@
 	core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
 
 	if ((!soc_info) || (!core_info)) {
-		pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+		CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
 		return -EINVAL;
 	}
 
@@ -81,16 +81,16 @@
 	rc = cam_cpas_start(core_info->cpas_handle,
 			&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
 	if (rc) {
-		pr_err("cpass start failed: %d\n", rc);
+		CAM_ERR(CAM_ICP, "cpass start failed: %d", rc);
 		return rc;
 	}
 	core_info->cpas_start = true;
 
 	rc = cam_bps_enable_soc_resources(soc_info);
 	if (rc) {
-		pr_err("soc enable is failed: %d\n", rc);
+		CAM_ERR(CAM_ICP, "soc enable is failed: %d", rc);
 		if (cam_cpas_stop(core_info->cpas_handle))
-			pr_err("cpas stop is failed\n");
+			CAM_ERR(CAM_ICP, "cpas stop is failed");
 		else
 			core_info->cpas_start = false;
 	}
@@ -107,24 +107,25 @@
 	int rc = 0;
 
 	if (!device_priv) {
-		pr_err("Invalid cam_dev_info\n");
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
 		return -EINVAL;
 	}
 
 	soc_info = &bps_dev->soc_info;
 	core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
 	if ((!soc_info) || (!core_info)) {
-		pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+		CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
 		return -EINVAL;
 	}
 
 	rc = cam_bps_disable_soc_resources(soc_info);
 	if (rc)
-		pr_err("soc disable is failed: %d\n", rc);
+		CAM_ERR(CAM_ICP, "soc disable is failed: %d", rc);
 
 	if (core_info->cpas_start) {
 		if (cam_cpas_stop(core_info->cpas_handle))
-			pr_err("cpas stop is failed\n");
+			CAM_ERR(CAM_ICP, "cpas stop is failed");
 		else
 			core_info->cpas_start = false;
 	}
@@ -142,12 +143,12 @@
 	int rc = 0;
 
 	if (!device_priv) {
-		pr_err("Invalid arguments\n");
+		CAM_ERR(CAM_ICP, "Invalid arguments");
 		return -EINVAL;
 	}
 
 	if (cmd_type >= CAM_ICP_BPS_CMD_MAX) {
-		pr_err("Invalid command : %x\n", cmd_type);
+		CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
 		return -EINVAL;
 	}
 
@@ -160,7 +161,7 @@
 		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
 
 		if (!cmd_args) {
-			pr_err("cmd args NULL\n");
+			CAM_ERR(CAM_ICP, "cmd args NULL");
 			return -EINVAL;
 		}
 
@@ -172,7 +173,7 @@
 		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
 
 		if (!cmd_args) {
-			pr_err("cmd args NULL\n");
+			CAM_ERR(CAM_ICP, "cmd args NULL");
 			return -EINVAL;
 		}
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_dev.c
index c3477ee..ddff677 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_dev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_dev.c
@@ -23,9 +23,7 @@
 #include "cam_icp_hw_intf.h"
 #include "cam_icp_hw_mgr_intf.h"
 #include "cam_cpas_api.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 struct cam_bps_device_hw_info cam_bps_hw_info = {
 	.reserved = 0,
@@ -47,7 +45,7 @@
 
 	rc = cam_cpas_register_client(&cpas_register_params);
 	if (rc < 0) {
-		pr_err("cam_cpas_register_client is failed: %d\n", rc);
+		CAM_ERR(CAM_ICP, "failed: %d", rc);
 		return rc;
 	}
 	core_info->cpas_handle = cpas_register_params.client_handle;
@@ -95,7 +93,7 @@
 	match_dev = of_match_device(pdev->dev.driver->of_match_table,
 		&pdev->dev);
 	if (!match_dev) {
-		pr_err("%s: No bps hardware info\n", __func__);
+		CAM_ERR(CAM_ICP, "No bps hardware info");
 		kfree(bps_dev->core_info);
 		kfree(bps_dev);
 		kfree(bps_dev_intf);
@@ -108,13 +106,13 @@
 	rc = cam_bps_init_soc_resources(&bps_dev->soc_info, cam_bps_irq,
 		bps_dev);
 	if (rc < 0) {
-		pr_err("%s: failed to init_soc\n", __func__);
+		CAM_ERR(CAM_ICP, "failed to init_soc");
 		kfree(bps_dev->core_info);
 		kfree(bps_dev);
 		kfree(bps_dev_intf);
 		return rc;
 	}
-	pr_debug("cam_bps_init_soc_resources : %pK\n",
+	CAM_DBG(CAM_ICP, "soc info : %pK",
 		(void *)&bps_dev->soc_info);
 
 	rc = cam_bps_register_cpas(&bps_dev->soc_info,
@@ -129,7 +127,7 @@
 	mutex_init(&bps_dev->hw_mutex);
 	spin_lock_init(&bps_dev->hw_lock);
 	init_completion(&bps_dev->hw_complete);
-	pr_debug("%s: BPS%d probe successful\n", __func__,
+	CAM_DBG(CAM_ICP, "BPS%d probe successful",
 		bps_dev_intf->hw_idx);
 
 	return rc;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
index 8a3c7ac..54e898c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
@@ -18,9 +18,7 @@
 #include <media/cam_icp.h>
 #include "bps_soc.h"
 #include "cam_soc_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 static int cam_bps_get_dt_properties(struct cam_hw_soc_info *soc_info)
 {
@@ -28,7 +26,7 @@
 
 	rc = cam_soc_util_get_dt_properties(soc_info);
 	if (rc < 0)
-		pr_err("get bps dt prop is failed\n");
+		CAM_ERR(CAM_ICP, "get bps dt prop is failed");
 
 	return rc;
 }
@@ -69,7 +67,7 @@
 	rc = cam_soc_util_enable_platform_resource(soc_info, true,
 		CAM_TURBO_VOTE, false);
 	if (rc)
-		pr_err("%s: enable platform failed\n", __func__);
+		CAM_ERR(CAM_ICP, "enable platform failed");
 
 	return rc;
 }
@@ -80,7 +78,7 @@
 
 	rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
 	if (rc)
-		pr_err("%s: disable platform failed\n", __func__);
+		CAM_ERR(CAM_ICP, "disable platform failed");
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index f273a7b..1b3afc0 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "ICP-HW-MGR %s:%d " fmt, __func__, __LINE__
-
 #include <linux/uaccess.h>
 #include <linux/slab.h>
 #include <linux/of.h>
@@ -46,9 +44,7 @@
 #include "cam_mem_mgr.h"
 #include "a5_core.h"
 #include "hfi_sys_defs.h"
-
-#undef  ICP_DBG
-#define ICP_DBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 #define ICP_WORKQ_NUM_TASK 30
 #define ICP_WORKQ_TASK_CMD_TYPE 1
@@ -80,7 +76,7 @@
 	struct cam_icp_hw_mgr *hw_mgr;
 
 	if (!data || !priv) {
-		pr_err("Invalid params%pK %pK\n", data, priv);
+		CAM_ERR(CAM_ICP, "Invalid params%pK %pK", data, priv);
 		return -EINVAL;
 	}
 
@@ -88,7 +84,6 @@
 	task_data = (struct hfi_cmd_work_data *)data;
 
 	rc = hfi_write_cmd(task_data->data);
-	ICP_DBG("task type : %u, rc : %d\n", task_data->type, rc);
 
 	return rc;
 }
@@ -106,14 +101,15 @@
 
 	ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
 	if (ioconfig_ack->err_type != HFI_ERR_SYS_NONE) {
-		pr_err("failed with error : %u\n", ioconfig_ack->err_type);
+		CAM_ERR(CAM_ICP, "failed with error : %u",
+			ioconfig_ack->err_type);
 		return -EIO;
 	}
 
 	frame_done =
 		(struct hfi_msg_frame_process_done *)ioconfig_ack->msg_data;
 	if (frame_done->result) {
-		pr_err("result : %u\n", frame_done->result);
+		CAM_ERR(CAM_ICP, "result : %u", frame_done->result);
 		return -EIO;
 	}
 
@@ -126,7 +122,7 @@
 			break;
 
 	if (i >= CAM_FRAME_CMD_MAX) {
-		pr_err("unable to find pkt in ctx data for req_id =%lld\n",
+		CAM_ERR(CAM_ICP, "pkt not found in ctx data for req_id =%lld",
 			request_id);
 		return -EINVAL;
 	}
@@ -136,7 +132,7 @@
 	ctx_data->ctxt_event_cb(ctx_data->context_priv, false, &buf_data);
 
 	/* now release memory for hfi frame process command */
-	ICP_DBG("matching request id: %lld\n",
+	CAM_DBG(CAM_ICP, "matching request id: %lld",
 			hfi_frame_process->request_id[idx]);
 	mutex_lock(&ctx_data->hfi_frame_process.lock);
 	hfi_frame_process->request_id[idx] = 0;
@@ -153,40 +149,37 @@
 	struct hfi_msg_bps_common *bps_config_ack = NULL;
 
 	ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
-	ICP_DBG("opcode : %u\n", ioconfig_ack->opcode);
 
 	if (ioconfig_ack->opcode == HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO) {
 		ipe_config_ack =
 			(struct hfi_msg_ipe_config *)(ioconfig_ack->msg_data);
 		if (ipe_config_ack->rc) {
-			pr_err("rc = %d err = %u\n",
+			CAM_ERR(CAM_ICP, "rc = %d err = %u",
 				ipe_config_ack->rc, ioconfig_ack->err_type);
 			return -EIO;
 		}
 		ctx_data =
 			(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
 		if (!ctx_data) {
-			pr_err("wrong ctx data from IPE response\n");
+			CAM_ERR(CAM_ICP, "wrong ctx data from IPE response");
 			return -EINVAL;
 		}
 
 		mutex_lock(&ctx_data->ctx_mutex);
 		ctx_data->scratch_mem_size = ipe_config_ack->scratch_mem_size;
 		mutex_unlock(&ctx_data->ctx_mutex);
-		ICP_DBG("scratch_mem_size = %u\n",
-			ipe_config_ack->scratch_mem_size);
 	} else {
 		bps_config_ack =
 			(struct hfi_msg_bps_common *)(ioconfig_ack->msg_data);
 		if (bps_config_ack->rc) {
-			pr_err("rc : %u, opcode :%u\n",
+			CAM_ERR(CAM_ICP, "rc : %u, opcode :%u",
 				bps_config_ack->rc, ioconfig_ack->opcode);
 			return -EIO;
 		}
 		ctx_data =
 			(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
 		if (!ctx_data) {
-			pr_err("wrong ctx data from BPS response\n");
+			CAM_ERR(CAM_ICP, "wrong ctx data from BPS response");
 			return -EINVAL;
 		}
 	}
@@ -202,22 +195,20 @@
 
 	create_handle_ack = (struct hfi_msg_create_handle_ack *)msg_ptr;
 	if (!create_handle_ack) {
-		pr_err("Invalid create_handle_ack\n");
+		CAM_ERR(CAM_ICP, "Invalid create_handle_ack");
 		return -EINVAL;
 	}
 
-	ICP_DBG("err type : %u\n", create_handle_ack->err_type);
-
 	ctx_data = (struct cam_icp_hw_ctx_data *)create_handle_ack->user_data1;
 	if (!ctx_data) {
-		pr_err("Invalid ctx_data\n");
+		CAM_ERR(CAM_ICP, "Invalid ctx_data");
 		return -EINVAL;
 	}
 
 	mutex_lock(&ctx_data->ctx_mutex);
 	ctx_data->fw_handle = create_handle_ack->fw_handle;
 	mutex_unlock(&ctx_data->ctx_mutex);
-	ICP_DBG("fw_handle = %x\n", ctx_data->fw_handle);
+	CAM_DBG(CAM_ICP, "fw_handle = %x", ctx_data->fw_handle);
 	complete(&ctx_data->wait_complete);
 
 	return 0;
@@ -230,18 +221,16 @@
 
 	ping_ack = (struct hfi_msg_ping_ack *)msg_ptr;
 	if (!ping_ack) {
-		pr_err("Empty ping ack message\n");
+		CAM_ERR(CAM_ICP, "Empty ping ack message");
 		return -EINVAL;
 	}
 
 	ctx_data = (struct cam_icp_hw_ctx_data *)ping_ack->user_data;
 	if (!ctx_data) {
-		pr_err("Invalid ctx_data\n");
+		CAM_ERR(CAM_ICP, "Invalid ctx_data");
 		return -EINVAL;
 	}
 
-	ICP_DBG("%x %x %pK\n", ping_ack->size, ping_ack->pkt_type,
-		(void *)ping_ack->user_data);
 	complete(&ctx_data->wait_complete);
 
 	return 0;
@@ -254,7 +243,7 @@
 	switch (msg_ptr[ICP_PACKET_OPCODE]) {
 	case HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO:
 	case HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO:
-		ICP_DBG("received HFI_IPEBPS_CMD_OPCODE_IPE/BPS_CONFIG_IO:\n");
+		CAM_DBG(CAM_ICP, "received IPE/BPS_CONFIG_IO:");
 		rc = cam_icp_mgr_process_msg_config_io(msg_ptr);
 		if (rc)
 			return rc;
@@ -262,13 +251,12 @@
 
 	case HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS:
 	case HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS:
-		ICP_DBG("received OPCODE_IPE/BPS_FRAME_PROCESS:\n");
 		rc = cam_icp_mgr_process_msg_frame_process(msg_ptr);
 		if (rc)
 			return rc;
 		break;
 	default:
-		pr_err("Invalid opcode : %u\n",
+		CAM_ERR(CAM_ICP, "Invalid opcode : %u",
 			msg_ptr[ICP_PACKET_OPCODE]);
 		break;
 	}
@@ -287,14 +275,14 @@
 	case HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY:
 	case HFI_IPEBPS_CMD_OPCODE_IPE_ABORT:
 	case HFI_IPEBPS_CMD_OPCODE_BPS_ABORT:
-		ICP_DBG("received IPE/BPS_DESTROY/ABORT:\n");
+		CAM_DBG(CAM_ICP, "received IPE/BPS_DESTROY/ABORT:");
 		ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
 		ctx_data =
 			(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
 		complete(&ctx_data->wait_complete);
 		break;
 	default:
-		pr_err("Invalid opcode : %u\n",
+		CAM_ERR(CAM_ICP, "Invalid opcode : %u",
 			msg_ptr[ICP_PACKET_OPCODE]);
 		rc = -EINVAL;
 		break;
@@ -312,37 +300,35 @@
 	int read_len;
 
 	if (!data || !priv) {
-		pr_err("Invalid data\n");
+		CAM_ERR(CAM_ICP, "Invalid data");
 		return -EINVAL;
 	}
 
 	task_data = data;
 	hw_mgr = priv;
-	ICP_DBG("irq status : %u\n", task_data->irq_status);
 
 	read_len = hfi_read_message(icp_hw_mgr.msg_buf, Q_MSG);
 	if (read_len < 0) {
-		ICP_DBG("Unable to read msg q\n");
+		CAM_DBG(CAM_ICP, "Unable to read msg q");
 		return read_len;
 	}
 
 	msg_ptr = (uint32_t *)icp_hw_mgr.msg_buf;
-	ICP_DBG("packet type: %x\n", msg_ptr[ICP_PACKET_TYPE]);
 
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	switch (msg_ptr[ICP_PACKET_TYPE]) {
 	case HFI_MSG_SYS_INIT_DONE:
-		ICP_DBG("received HFI_MSG_SYS_INIT_DONE\n");
+		CAM_DBG(CAM_ICP, "received SYS_INIT_DONE");
 		complete(&hw_mgr->a5_complete);
 		break;
 
 	case HFI_MSG_SYS_PING_ACK:
-		ICP_DBG("received HFI_MSG_SYS_PING_ACK\n");
+		CAM_DBG(CAM_ICP, "received SYS_PING_ACK");
 		rc = cam_icp_mgr_process_msg_ping_ack(msg_ptr);
 		break;
 
 	case HFI_MSG_IPEBPS_CREATE_HANDLE_ACK:
-		ICP_DBG("received HFI_MSG_IPEBPS_CREATE_HANDLE_ACK\n");
+		CAM_DBG(CAM_ICP, "received IPEBPS_CREATE_HANDLE_ACK");
 		rc = cam_icp_mgr_process_msg_create_handle(msg_ptr);
 		break;
 
@@ -355,11 +341,12 @@
 		break;
 
 	case HFI_MSG_EVENT_NOTIFY:
-		ICP_DBG("received HFI_MSG_EVENT_NOTIFY\n");
+		CAM_DBG(CAM_ICP, "received EVENT_NOTIFY");
 		break;
 
 	default:
-		pr_err("invalid msg : %u\n", msg_ptr[ICP_PACKET_TYPE]);
+		CAM_ERR(CAM_ICP, "invalid msg : %u",
+			msg_ptr[ICP_PACKET_TYPE]);
 		break;
 	}
 
@@ -379,7 +366,7 @@
 	spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags);
 	task = cam_req_mgr_workq_get_task(icp_hw_mgr.msg_work);
 	if (!task) {
-		pr_err("no empty task\n");
+		CAM_ERR(CAM_ICP, "no empty task");
 		spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
 		return -ENOMEM;
 	}
@@ -424,12 +411,8 @@
 		return rc;
 
 	*qtbl = out;
-	ICP_DBG("kva = %llX\n", out.kva);
-	ICP_DBG("qtbl IOVA = %X\n", out.iova);
-	ICP_DBG("SMMU HDL = %X\n", out.smmu_hdl);
-	ICP_DBG("MEM HDL = %X\n", out.mem_handle);
-	ICP_DBG("length = %lld\n", out.len);
-	ICP_DBG("region = %d\n", out.region);
+	CAM_DBG(CAM_ICP, "kva: %llX, iova: %x, hdl: %x, len: %lld",
+		out.kva, out.iova, out.mem_handle, out.len);
 
 	return rc;
 }
@@ -451,9 +434,8 @@
 	icp_hw_mgr.hfi_mem.fw_buf.iova = iova;
 	icp_hw_mgr.hfi_mem.fw_buf.smmu_hdl = icp_hw_mgr.iommu_hdl;
 
-	ICP_DBG("kva = %llX\n", kvaddr);
-	ICP_DBG("IOVA = %llX\n", iova);
-	ICP_DBG("length = %zu\n", len);
+	CAM_DBG(CAM_ICP, "kva: %llX, iova: %llx, len: %zu",
+		kvaddr, iova, len);
 
 	return rc;
 }
@@ -466,43 +448,43 @@
 		CAM_SMMU_REGION_SHARED,
 		&icp_hw_mgr.hfi_mem.shmem);
 	if (rc) {
-		pr_err("Unable to get shared memory info\n");
+		CAM_ERR(CAM_ICP, "Unable to get shared memory info");
 		return rc;
 	}
 
 	rc = cam_icp_allocate_fw_mem();
 	if (rc) {
-		pr_err("Unable to allocate FW memory\n");
+		CAM_ERR(CAM_ICP, "Unable to allocate FW memory");
 		return rc;
 	}
 
 	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.qtbl);
 	if (rc) {
-		pr_err("Unable to allocate qtbl memory\n");
+		CAM_ERR(CAM_ICP, "Unable to allocate qtbl memory");
 		goto qtbl_alloc_failed;
 	}
 
 	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.cmd_q);
 	if (rc) {
-		pr_err("Unable to allocate cmd q memory\n");
+		CAM_ERR(CAM_ICP, "Unable to allocate cmd q memory");
 		goto cmd_q_alloc_failed;
 	}
 
 	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.msg_q);
 	if (rc) {
-		pr_err("Unable to allocate msg q memory\n");
+		CAM_ERR(CAM_ICP, "Unable to allocate msg q memory");
 		goto msg_q_alloc_failed;
 	}
 
 	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.dbg_q);
 	if (rc) {
-		pr_err("Unable to allocate dbg q memory\n");
+		CAM_ERR(CAM_ICP, "Unable to allocate dbg q memory");
 		goto dbg_q_alloc_failed;
 	}
 
 	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.sec_heap);
 	if (rc) {
-		pr_err("Unable to allocate sec heap q memory\n");
+		CAM_ERR(CAM_ICP, "Unable to allocate sec heap q memory");
 		goto sec_heap_alloc_failed;
 	}
 
@@ -586,13 +568,13 @@
 	if (rc)
 		return rc;
 
-	ICP_DBG("fw_handle = %x ctx_data = %pK\n",
+	CAM_DBG(CAM_ICP, "fw_handle = %x ctx_data = %pK",
 		ctx_data->fw_handle, ctx_data);
 	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
 			msecs_to_jiffies((timeout)));
 	if (!rem_jiffies) {
 		rc = -ETIMEDOUT;
-		ICP_DBG("FW timeout/err in abort handle command\n");
+		CAM_DBG(CAM_ICP, "FW timeout/err in abort handle command");
 	}
 
 	return rc;
@@ -640,13 +622,13 @@
 	if (rc)
 		return rc;
 
-	ICP_DBG("fw_handle = %x ctx_data = %pK\n",
+	CAM_DBG(CAM_ICP, "fw_handle = %x ctx_data = %pK",
 		ctx_data->fw_handle, ctx_data);
 	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
 			msecs_to_jiffies((timeout)));
 	if (!rem_jiffies) {
 		rc = -ETIMEDOUT;
-		ICP_DBG("FW response timeout: %d\n", rc);
+		CAM_ERR(CAM_ICP, "FW response timeout: %d", rc);
 	}
 
 	return rc;
@@ -657,13 +639,12 @@
 	int i = 0;
 
 	if (ctx_id >= CAM_ICP_CTX_MAX) {
-		pr_err("ctx_id is wrong: %d\n", ctx_id);
+		CAM_ERR(CAM_ICP, "ctx_id is wrong: %d", ctx_id);
 		return -EINVAL;
 	}
 
 	mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
 	if (!hw_mgr->ctx_data[ctx_id].in_use) {
-		ICP_DBG("ctx is not in use: %d\n", ctx_id);
 		mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
 		return 0;
 	}
@@ -704,7 +685,7 @@
 	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
 
 	if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
-		pr_err("dev intfs are wrong, failed to close\n");
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
 		return;
 	}
 
@@ -725,14 +706,14 @@
 
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	if ((hw_mgr->fw_download ==  false) && (!hw_mgr->ctxt_cnt)) {
-		ICP_DBG("hw mgr is already closed\n");
+		CAM_DBG(CAM_ICP, "hw mgr is already closed");
 		mutex_unlock(&hw_mgr->hw_mgr_mutex);
 		return 0;
 	}
 
 	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
 	if (!a5_dev_intf) {
-		pr_err("a5_dev_intf is NULL\n");
+		CAM_ERR(CAM_ICP, "a5_dev_intf is NULL");
 		mutex_unlock(&hw_mgr->hw_mgr_mutex);
 		return -EINVAL;
 	}
@@ -744,7 +725,7 @@
 		CAM_ICP_A5_SET_IRQ_CB,
 		&irq_cb, sizeof(irq_cb));
 	if (rc)
-		pr_err("deregister irq call back failed\n");
+		CAM_ERR(CAM_ICP, "deregister irq call back failed");
 
 	fw_buf_info.kva = 0;
 	fw_buf_info.iova = 0;
@@ -755,7 +736,7 @@
 		&fw_buf_info,
 		sizeof(fw_buf_info));
 	if (rc)
-		pr_err("nullify the fw buf failed\n");
+		CAM_ERR(CAM_ICP, "nullify the fw buf failed");
 
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 	for (i = 0; i < CAM_ICP_CTX_MAX; i++)
@@ -784,7 +765,7 @@
 	bps_dev_intf = hw_mgr->devices[CAM_ICP_DEV_BPS][0];
 
 	if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
-		pr_err("dev intfs are wrong\n");
+		CAM_ERR(CAM_ICP, "dev intfs are wrong");
 		return -EINVAL;
 	}
 
@@ -877,30 +858,18 @@
 	hfi_mem.qtbl.kva = icp_hw_mgr.hfi_mem.qtbl.kva;
 	hfi_mem.qtbl.iova = icp_hw_mgr.hfi_mem.qtbl.iova;
 	hfi_mem.qtbl.len = icp_hw_mgr.hfi_mem.qtbl.len;
-	ICP_DBG("kva = %llX\n", hfi_mem.qtbl.kva);
-	ICP_DBG("IOVA = %X\n", hfi_mem.qtbl.iova);
-	ICP_DBG("length = %lld\n", hfi_mem.qtbl.len);
 
 	hfi_mem.cmd_q.kva = icp_hw_mgr.hfi_mem.cmd_q.kva;
 	hfi_mem.cmd_q.iova = icp_hw_mgr.hfi_mem.cmd_q.iova;
 	hfi_mem.cmd_q.len = icp_hw_mgr.hfi_mem.cmd_q.len;
-	ICP_DBG("kva = %llX\n", hfi_mem.cmd_q.kva);
-	ICP_DBG("IOVA = %X\n", hfi_mem.cmd_q.iova);
-	ICP_DBG("length = %lld\n", hfi_mem.cmd_q.len);
 
 	hfi_mem.msg_q.kva = icp_hw_mgr.hfi_mem.msg_q.kva;
 	hfi_mem.msg_q.iova = icp_hw_mgr.hfi_mem.msg_q.iova;
 	hfi_mem.msg_q.len = icp_hw_mgr.hfi_mem.msg_q.len;
-	ICP_DBG("kva = %llX\n", hfi_mem.msg_q.kva);
-	ICP_DBG("IOVA = %X\n", hfi_mem.msg_q.iova);
-	ICP_DBG("length = %lld\n", hfi_mem.msg_q.len);
 
 	hfi_mem.dbg_q.kva = icp_hw_mgr.hfi_mem.dbg_q.kva;
 	hfi_mem.dbg_q.iova = icp_hw_mgr.hfi_mem.dbg_q.iova;
 	hfi_mem.dbg_q.len = icp_hw_mgr.hfi_mem.dbg_q.len;
-	ICP_DBG("kva = %llX\n", hfi_mem.dbg_q.kva);
-	ICP_DBG("IOVA = %X\n",  hfi_mem.dbg_q.iova);
-	ICP_DBG("length = %lld\n", hfi_mem.dbg_q.len);
 
 	hfi_mem.sec_heap.kva = icp_hw_mgr.hfi_mem.sec_heap.kva;
 	hfi_mem.sec_heap.iova = icp_hw_mgr.hfi_mem.sec_heap.iova;
@@ -908,6 +877,7 @@
 
 	hfi_mem.shmem.iova = icp_hw_mgr.hfi_mem.shmem.iova_start;
 	hfi_mem.shmem.len = icp_hw_mgr.hfi_mem.shmem.iova_len;
+
 	return cam_hfi_init(0, &hfi_mem,
 		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
 		hw_mgr->a5_debug);
@@ -922,7 +892,7 @@
 
 	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
 	reinit_completion(&hw_mgr->a5_complete);
-	ICP_DBG("Sending HFI init command\n");
+	CAM_DBG(CAM_ICP, "Sending HFI init command");
 	rc = a5_dev_intf->hw_ops.process_cmd(
 		a5_dev_intf->hw_priv,
 		CAM_ICP_A5_SEND_INIT,
@@ -930,14 +900,13 @@
 	if (rc)
 		return rc;
 
-	ICP_DBG("Wait for INIT DONE Message\n");
 	rem_jiffies = wait_for_completion_timeout(&icp_hw_mgr.a5_complete,
 		msecs_to_jiffies((timeout)));
 	if (!rem_jiffies) {
 		rc = -ETIMEDOUT;
-		ICP_DBG("FW response timed out %d\n", rc);
+		CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
 	}
-	ICP_DBG("Done Waiting for INIT DONE Message\n");
+	CAM_DBG(CAM_ICP, "Done Waiting for INIT DONE Message");
 
 	return rc;
 }
@@ -950,13 +919,13 @@
 	int rc = 0;
 
 	if (!hw_mgr) {
-		pr_err("hw_mgr is NULL\n");
+		CAM_ERR(CAM_ICP, "hw_mgr is NULL");
 		return -EINVAL;
 	}
 
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	if (hw_mgr->fw_download) {
-		ICP_DBG("FW already downloaded\n");
+		CAM_DBG(CAM_ICP, "FW already downloaded");
 		mutex_unlock(&hw_mgr->hw_mgr_mutex);
 		return rc;
 	}
@@ -998,7 +967,7 @@
 		NULL, 0);
 	hw_mgr->fw_download = true;
 	hw_mgr->ctxt_cnt = 0;
-	ICP_DBG("FW download done successfully\n");
+	CAM_DBG(CAM_ICP, "FW download done successfully");
 	if (!download_fw_args)
 		cam_icp_mgr_hw_close(hw_mgr, NULL);
 	return rc;
@@ -1040,19 +1009,17 @@
 
 	request_id = *(uint64_t *)config_args->priv;
 	hw_update_entries = config_args->hw_update_entries;
-	ICP_DBG("req_id = %lld %pK\n", request_id, config_args->priv);
+	CAM_DBG(CAM_ICP, "req_id = %lld %pK", request_id, config_args->priv);
 
 	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
 	if (!task) {
-		pr_err("no empty task\n");
+		CAM_ERR(CAM_ICP, "no empty task");
 		return -ENOMEM;
 	}
 
 	task_data = (struct hfi_cmd_work_data *)task->payload;
 	task_data->data = (void *)hw_update_entries->addr;
 	hfi_cmd = (struct hfi_cmd_ipebps_async *)hw_update_entries->addr;
-	ICP_DBG("request from hfi_cmd :%llu, hfi_cmd: %pK\n",
-		hfi_cmd->user_data2, hfi_cmd);
 	task_data->request_id = request_id;
 	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
 	task->process_cb = cam_icp_mgr_process_cmd;
@@ -1070,19 +1037,20 @@
 	struct cam_icp_hw_ctx_data *ctx_data = NULL;
 
 	if (!hw_mgr || !config_args) {
-		pr_err("Invalid arguments %pK %pK\n", hw_mgr, config_args);
+		CAM_ERR(CAM_ICP, "Invalid arguments %pK %pK",
+			hw_mgr, config_args);
 		return -EINVAL;
 	}
 
 	if (!config_args->num_hw_update_entries) {
-		pr_err("No hw update enteries are available\n");
+		CAM_ERR(CAM_ICP, "No hw update enteries are available");
 		return -EINVAL;
 	}
 
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	ctx_data = config_args->ctxt_to_hw_map;
 	if (!ctx_data->in_use) {
-		pr_err("ctx is not in use\n");
+		CAM_ERR(CAM_ICP, "ctx is not in use");
 		mutex_unlock(&hw_mgr->hw_mgr_mutex);
 		rc = -EINVAL;
 		goto config_err;
@@ -1117,7 +1085,7 @@
 	hfi_cmd->user_data1 = (uint64_t)ctx_data;
 	hfi_cmd->user_data2 = request_id;
 
-	ICP_DBG("ctx_data : %pK, request_id :%lld cmd_buf %x\n",
+	CAM_DBG(CAM_ICP, "ctx_data : %pK, request_id :%lld cmd_buf %x",
 		(void *)ctx_data->context_priv, request_id,
 		fw_cmd_buf_iova_addr);
 
@@ -1126,32 +1094,18 @@
 
 static int cam_icp_mgr_pkt_validation(struct cam_packet *packet)
 {
-	ICP_DBG("packet header : opcode = %x size = %x",
-		packet->header.op_code,	packet->header.size);
-
-	ICP_DBG(" req_id = %x flags = %x\n",
-		(uint32_t)packet->header.request_id, packet->header.flags);
-
-	ICP_DBG("packet data : c_off = %x c_num = %x\n",
-		packet->cmd_buf_offset,	packet->num_cmd_buf);
-
-	ICP_DBG("io_off = %x io_num = %x p_off = %x p_num = %x %x %x\n",
-		packet->io_configs_offset, packet->num_io_configs,
-		packet->patch_offset, packet->num_patches,
-		packet->kmd_cmd_buf_index, packet->kmd_cmd_buf_offset);
-
 	if (((packet->header.op_code & 0xff) !=
 		CAM_ICP_OPCODE_IPE_UPDATE) &&
 		((packet->header.op_code & 0xff) !=
 		CAM_ICP_OPCODE_BPS_UPDATE)) {
-		pr_err("Invalid Opcode in pkt: %d\n",
+		CAM_ERR(CAM_ICP, "Invalid Opcode in pkt: %d",
 			packet->header.op_code & 0xff);
 		return -EINVAL;
 	}
 
 	if ((packet->num_cmd_buf > 1) || (!packet->num_patches) ||
 		(!packet->num_io_configs)) {
-		pr_err("wrong number of cmd/patch info: %u %u\n",
+		CAM_ERR(CAM_ICP, "wrong number of cmd/patch info: %u %u",
 			packet->num_cmd_buf, packet->num_patches);
 		return -EINVAL;
 	}
@@ -1170,18 +1124,15 @@
 
 	cmd_desc = (struct cam_cmd_buf_desc *)
 		((uint32_t *) &packet->payload + packet->cmd_buf_offset/4);
-	ICP_DBG("packet = %pK cmd_desc = %pK size = %lu\n",
-		(void *)packet, (void *)cmd_desc,
-		sizeof(struct cam_cmd_buf_desc));
 
 	rc = cam_mem_get_io_buf(cmd_desc->mem_handle,
 		hw_mgr->iommu_hdl, &iova_addr, &fw_cmd_buf_len);
 	if (rc) {
-		pr_err("unable to get src buf info for cmd buf: %x\n",
+		CAM_ERR(CAM_ICP, "unable to get src buf info for cmd buf: %x",
 			hw_mgr->iommu_hdl);
 		return rc;
 	}
-	ICP_DBG("cmd_buf desc cpu and iova address: %pK %zu\n",
+	CAM_DBG(CAM_ICP, "cmd_buf desc cpu and iova address: %pK %zu",
 		(void *)iova_addr, fw_cmd_buf_len);
 
 	*fw_cmd_buf_iova_addr = iova_addr;
@@ -1190,65 +1141,32 @@
 	return rc;
 }
 
-static int cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr,
+static void cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr,
 	struct cam_icp_hw_ctx_data *ctx_data,
 	struct cam_packet *packet,
 	struct cam_hw_prepare_update_args *prepare_args)
 {
-	int        rc = 0, i, j;
-	int32_t    sync_in_obj[CAM_ICP_IPE_IMAGE_MAX];
-	int32_t    merged_sync_in_obj;
+	int i, j, k;
 	struct cam_buf_io_cfg *io_cfg_ptr = NULL;
 
 	io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
 				packet->io_configs_offset/4);
-	ICP_DBG("packet = %pK io_cfg_ptr = %pK size = %lu\n",
-		(void *)packet, (void *)io_cfg_ptr,
-		sizeof(struct cam_buf_io_cfg));
-
 	prepare_args->num_out_map_entries = 0;
-	for (i = 0, j = 0; i < packet->num_io_configs; i++) {
+	prepare_args->num_in_map_entries = 0;
+
+	for (i = 0, j = 0, k = 0; i < packet->num_io_configs; i++) {
 		if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
-			ICP_DBG("direction is i : %d :%u\n",
-				i, io_cfg_ptr[i].direction);
-			ICP_DBG("fence is i : %d :%d\n",
-				i, io_cfg_ptr[i].fence);
-			continue;
+			prepare_args->in_map_entries[j++].sync_id =
+				io_cfg_ptr[i].fence;
+			prepare_args->num_in_map_entries++;
+		} else {
+			prepare_args->out_map_entries[k++].sync_id =
+				io_cfg_ptr[i].fence;
+			prepare_args->num_out_map_entries++;
 		}
-
-		prepare_args->out_map_entries[j++].sync_id =
-			io_cfg_ptr[i].fence;
-		prepare_args->num_out_map_entries++;
-		ICP_DBG(" out fence = %x index = %d\n", io_cfg_ptr[i].fence, i);
+		CAM_DBG(CAM_ICP, "dir[%d]: %u, fence: %u",
+			i, io_cfg_ptr[i].direction, io_cfg_ptr[i].fence);
 	}
-
-	for (i = 0, j = 0; i < packet->num_io_configs; i++) {
-		if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
-			sync_in_obj[j++] = io_cfg_ptr[i].fence;
-			ICP_DBG(" in fence = %x index = %d\n",
-					io_cfg_ptr[i].fence, i);
-		}
-	}
-
-	if (j == 1) {
-		merged_sync_in_obj = sync_in_obj[j - 1];
-	} else if (j > 1) {
-		rc = cam_sync_merge(&sync_in_obj[0], j, &merged_sync_in_obj);
-		if (rc) {
-			pr_err("unable to create in merged object: %d\n", rc);
-			return rc;
-		}
-	} else {
-		pr_err("no input fence provided %u\n", j);
-		return -EINVAL;
-	}
-
-	prepare_args->in_map_entries[0].sync_id = merged_sync_in_obj;
-	prepare_args->in_map_entries[0].resource_handle =
-		ctx_data->icp_dev_acquire_info->dev_type;
-	prepare_args->num_in_map_entries = 1;
-
-	return rc;
 }
 
 static int cam_icp_mgr_update_hfi_frame_process(
@@ -1263,7 +1181,7 @@
 	index = find_first_zero_bit(ctx_data->hfi_frame_process.bitmap,
 		ctx_data->hfi_frame_process.bits);
 	if (index < 0 || index >= CAM_FRAME_CMD_MAX) {
-		pr_err("request idx is wrong: %d\n", index);
+		CAM_ERR(CAM_ICP, "request idx is wrong: %d", index);
 		mutex_unlock(&ctx_data->hfi_frame_process.lock);
 		return -EINVAL;
 	}
@@ -1272,8 +1190,7 @@
 
 	ctx_data->hfi_frame_process.request_id[index] =
 		packet->header.request_id;
-	ICP_DBG("slot[%d]: %lld\n", index,
-		ctx_data->hfi_frame_process.request_id[index]);
+
 	*idx = index;
 
 	return 0;
@@ -1293,7 +1210,7 @@
 		prepare_hw_update_args;
 
 	if ((!prepare_args) || (!hw_mgr) || (!prepare_args->packet)) {
-		pr_err("Invalid args\n");
+		CAM_ERR(CAM_ICP, "Invalid args");
 		return -EINVAL;
 	}
 
@@ -1301,7 +1218,7 @@
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	if (!ctx_data->in_use) {
 		mutex_unlock(&hw_mgr->hw_mgr_mutex);
-		pr_err("ctx is not in use\n");
+		CAM_ERR(CAM_ICP, "ctx is not in use");
 		return -EINVAL;
 	}
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
@@ -1322,10 +1239,8 @@
 	if (rc)
 		return rc;
 
-	rc = cam_icp_mgr_process_io_cfg(hw_mgr, ctx_data,
+	cam_icp_mgr_process_io_cfg(hw_mgr, ctx_data,
 		packet, prepare_args);
-	if (rc)
-		return rc;
 
 	rc = cam_icp_mgr_update_hfi_frame_process(ctx_data, packet,
 		prepare_args, &idx);
@@ -1346,9 +1261,6 @@
 	prepare_args->num_hw_update_entries = 1;
 	prepare_args->hw_update_entries[0].addr = (uint64_t)hfi_cmd;
 	prepare_args->priv = &ctx_data->hfi_frame_process.request_id[idx];
-	ICP_DBG("slot : %d, hfi_cmd : %pK, request : %lld\n", idx,
-		(void *)hfi_cmd,
-		ctx_data->hfi_frame_process.request_id[idx]);
 
 	return rc;
 }
@@ -1385,14 +1297,14 @@
 	struct cam_icp_hw_ctx_data *ctx_data = NULL;
 
 	if (!release_hw || !hw_mgr) {
-		pr_err("Invalid args: %pK %pK\n", release_hw, hw_mgr);
+		CAM_ERR(CAM_ICP, "Invalid args: %pK %pK", release_hw, hw_mgr);
 		return -EINVAL;
 	}
 
 	ctx_data = release_hw->ctxt_to_hw_map;
 	ctx_id = ctx_data->ctx_id;
 	if (ctx_id < 0 || ctx_id >= CAM_ICP_CTX_MAX) {
-		pr_err("Invalid ctx id: %d\n", ctx_id);
+		CAM_ERR(CAM_ICP, "Invalid ctx id: %d", ctx_id);
 		return -EINVAL;
 	}
 
@@ -1430,8 +1342,7 @@
 		ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO;
 
 	reinit_completion(&ctx_data->wait_complete);
-	ICP_DBG("Sending HFI_CMD_IPEBPS_ASYNC_COMMAND: opcode :%u\n",
-						ioconfig_cmd.opcode);
+
 	ioconfig_cmd.num_fw_handles = 1;
 	ioconfig_cmd.fw_handles[0] = ctx_data->fw_handle;
 	ioconfig_cmd.payload.indirect = io_buf_addr;
@@ -1447,13 +1358,11 @@
 	if (rc)
 		return rc;
 
-	ICP_DBG("fw_hdl = %x ctx_data = %pK\n", ctx_data->fw_handle, ctx_data);
-
 	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
 			msecs_to_jiffies((timeout)));
 	if (!rem_jiffies) {
 		rc = -ETIMEDOUT;
-		ICP_DBG("FW response timed out %d\n", rc);
+		CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
 	}
 
 	return rc;
@@ -1477,9 +1386,6 @@
 	create_handle.pkt_type = HFI_CMD_IPEBPS_CREATE_HANDLE;
 	create_handle.handle_type = dev_type;
 	create_handle.user_data1 = (uint64_t)ctx_data;
-	ICP_DBG("%x %x %x %pK\n", create_handle.size,	create_handle.pkt_type,
-		create_handle.handle_type, (void *)create_handle.user_data1);
-	ICP_DBG("Sending HFI_CMD_IPEBPS_CREATE_HANDLE\n");
 
 	reinit_completion(&ctx_data->wait_complete);
 	task_data = (struct hfi_cmd_work_data *)task->payload;
@@ -1496,7 +1402,7 @@
 			msecs_to_jiffies((timeout)));
 	if (!rem_jiffies) {
 		rc = -ETIMEDOUT;
-		ICP_DBG("FW response timed out %d\n", rc);
+		CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
 	}
 
 	return rc;
@@ -1513,16 +1419,13 @@
 
 	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
 	if (!task) {
-		pr_err("No free task to send ping command\n");
+		CAM_ERR(CAM_ICP, "No free task to send ping command");
 		return -ENOMEM;
 	}
 
 	ping_pkt.size = sizeof(struct hfi_cmd_ping_pkt);
 	ping_pkt.pkt_type = HFI_CMD_SYS_PING;
 	ping_pkt.user_data = (uint64_t)ctx_data;
-	ICP_DBG("Sending HFI_CMD_SYS_PING\n");
-	ICP_DBG("%x %x %pK\n", ping_pkt.size,	ping_pkt.pkt_type,
-		(void *)ping_pkt.user_data);
 
 	init_completion(&ctx_data->wait_complete);
 	task_data = (struct hfi_cmd_work_data *)task->payload;
@@ -1540,7 +1443,7 @@
 			msecs_to_jiffies((timeout)));
 	if (!rem_jiffies) {
 		rc = -ETIMEDOUT;
-		ICP_DBG("FW response timed out %d\n", rc);
+		CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
 	}
 
 	return rc;
@@ -1561,7 +1464,7 @@
 		return -EFAULT;
 
 	if (icp_dev_acquire_info.num_out_res > ICP_MAX_OUTPUT_SUPPORTED) {
-		pr_err("num of out resources exceeding : %u\n",
+		CAM_ERR(CAM_ICP, "num of out resources exceeding : %u",
 			icp_dev_acquire_info.num_out_res);
 		return -EINVAL;
 	}
@@ -1580,7 +1483,7 @@
 		return -EFAULT;
 	}
 
-	ICP_DBG("%x %x %x %x %x %x %x\n",
+	CAM_DBG(CAM_ICP, "%x %x %x %x %x %x %x",
 		ctx_data->icp_dev_acquire_info->dev_type,
 		ctx_data->icp_dev_acquire_info->in_res.format,
 		ctx_data->icp_dev_acquire_info->in_res.width,
@@ -1591,7 +1494,7 @@
 
 	p_icp_out = ctx_data->icp_dev_acquire_info->out_res;
 	for (i = 0; i < ctx_data->icp_dev_acquire_info->num_out_res; i++)
-		ICP_DBG("out[i] %x %x %x %x\n",
+		CAM_DBG(CAM_ICP, "out[i] %x %x %x %x",
 			p_icp_out[i].format,
 			p_icp_out[i].width,
 			p_icp_out[i].height,
@@ -1612,20 +1515,21 @@
 	struct cam_icp_acquire_dev_info *icp_dev_acquire_info;
 
 	if ((!hw_mgr_priv) || (!acquire_hw_args)) {
-		pr_err("Invalid params: %pK %pK\n", hw_mgr_priv,
+		CAM_ERR(CAM_ICP, "Invalid params: %pK %pK", hw_mgr_priv,
 			acquire_hw_args);
 		return -EINVAL;
 	}
 
 	if (args->num_acq > 1) {
-		pr_err("number of resources are wrong: %u\n", args->num_acq);
+		CAM_ERR(CAM_ICP, "number of resources are wrong: %u",
+			args->num_acq);
 		return -EINVAL;
 	}
 
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	ctx_id = cam_icp_mgr_get_free_ctx(hw_mgr);
 	if (ctx_id >= CAM_ICP_CTX_MAX) {
-		pr_err("No free ctx space in hw_mgr\n");
+		CAM_ERR(CAM_ICP, "No free ctx space in hw_mgr");
 		mutex_unlock(&hw_mgr->hw_mgr_mutex);
 		return -ENOSPC;
 	}
@@ -1655,13 +1559,13 @@
 			&io_buf_addr, &io_buf_size);
 
 	if (rc) {
-		pr_err("unable to get src buf info from io desc\n");
+		CAM_ERR(CAM_ICP, "unable to get src buf info from io desc");
 		goto get_io_buf_failed;
 	}
-	ICP_DBG("io_config_cmd_handle : %d\n",
-		icp_dev_acquire_info->io_config_cmd_handle);
-	ICP_DBG("io_buf_addr : %pK\n", (void *)io_buf_addr);
-	ICP_DBG("io_buf_size : %zu\n", io_buf_size);
+
+	CAM_DBG(CAM_ICP, "hdl: %d, addr: %pK, size: %zu",
+		icp_dev_acquire_info->io_config_cmd_handle,
+		(void *)io_buf_addr, io_buf_size);
 
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	if (!hw_mgr->ctxt_cnt) {
@@ -1675,20 +1579,20 @@
 
 	rc = cam_icp_mgr_send_ping(ctx_data);
 	if (rc) {
-		pr_err("ping ack not received\n");
+		CAM_ERR(CAM_ICP, "ping ack not received");
 		goto send_ping_failed;
 	}
 
 	rc = cam_icp_mgr_create_handle(icp_dev_acquire_info->dev_type,
 		ctx_data);
 	if (rc) {
-		pr_err("create handle failed\n");
+		CAM_ERR(CAM_ICP, "create handle failed");
 		goto create_handle_failed;
 	}
 
 	rc = cam_icp_mgr_send_config_io(ctx_data, io_buf_addr);
 	if (rc) {
-		pr_err("IO Config command failed\n");
+		CAM_ERR(CAM_ICP, "IO Config command failed");
 		goto ioconfig_failed;
 	}
 
@@ -1711,7 +1615,7 @@
 		icp_dev_acquire_info, sizeof(struct cam_icp_acquire_dev_info)))
 		goto copy_to_user_failed;
 
-	ICP_DBG("scratch mem size = %x fw_handle = %x\n",
+	CAM_DBG(CAM_ICP, "scratch size = %x fw_handle = %x",
 			(unsigned int)icp_dev_acquire_info->scratch_mem_size,
 			(unsigned int)ctx_data->fw_handle);
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
@@ -1744,14 +1648,15 @@
 	struct cam_query_cap_cmd *query_cap = hw_caps_args;
 
 	if ((!hw_mgr_priv) || (!hw_caps_args)) {
-		pr_err("Invalid params: %pK %pK\n", hw_mgr_priv, hw_caps_args);
+		CAM_ERR(CAM_ICP, "Invalid params: %pK %pK",
+			hw_mgr_priv, hw_caps_args);
 		return -EINVAL;
 	}
 
 	if (copy_from_user(&icp_hw_mgr.icp_caps,
 		(void __user *)query_cap->caps_handle,
 		sizeof(struct cam_icp_query_cap_cmd))) {
-		pr_err("copy_from_user failed\n");
+		CAM_ERR(CAM_ICP, "copy_from_user failed");
 		return -EFAULT;
 	}
 
@@ -1765,7 +1670,7 @@
 
 	if (copy_to_user((void __user *)query_cap->caps_handle,
 		&icp_hw_mgr.icp_caps, sizeof(struct cam_icp_query_cap_cmd))) {
-		pr_err("copy_to_user failed\n");
+		CAM_ERR(CAM_ICP, "copy_to_user failed");
 		rc = -EFAULT;
 		goto hfi_get_caps_fail;
 	}
@@ -1788,7 +1693,7 @@
 
 	hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr_hdl;
 	if (!of_node || !hw_mgr_intf) {
-		pr_err("Invalid args of_node %pK hw_mgr %pK\n",
+		CAM_ERR(CAM_ICP, "Invalid args of_node %pK hw_mgr %pK",
 			of_node, hw_mgr_intf);
 		return -EINVAL;
 	}
@@ -1811,7 +1716,7 @@
 	/* Get number of device objects */
 	count = of_property_count_strings(of_node, "compat-hw-name");
 	if (!count) {
-		pr_err("no compat hw found in dev tree, count = %d\n", count);
+		CAM_ERR(CAM_ICP, "no compat hw found, count = %d", count);
 		rc = -EINVAL;
 		goto num_dev_failed;
 	}
@@ -1819,7 +1724,7 @@
 	/* Get number of a5 device nodes and a5 mem allocation */
 	rc = of_property_read_u32(of_node, "num-a5", &num_dev);
 	if (rc) {
-		pr_err("getting num of a5 failed\n");
+		CAM_ERR(CAM_ICP, "getting num of a5 failed");
 		goto num_dev_failed;
 	}
 
@@ -1833,7 +1738,7 @@
 	/* Get number of ipe device nodes and ipe mem allocation */
 	rc = of_property_read_u32(of_node, "num-ipe", &num_dev);
 	if (rc) {
-		pr_err("getting number of ipe dev nodes failed\n");
+		CAM_ERR(CAM_ICP, "getting number of ipe dev nodes failed");
 		goto num_ipe_failed;
 	}
 
@@ -1847,7 +1752,7 @@
 	/* Get number of bps device nodes and bps mem allocation */
 	rc = of_property_read_u32(of_node, "num-bps", &num_dev);
 	if (rc) {
-		pr_err("read num bps devices failed\n");
+		CAM_ERR(CAM_ICP, "read num bps devices failed");
 		goto num_bps_failed;
 	}
 	icp_hw_mgr.devices[CAM_ICP_DEV_BPS] = kzalloc(
@@ -1861,20 +1766,20 @@
 		rc = of_property_read_string_index(of_node, "compat-hw-name",
 			i, &name);
 		if (rc) {
-			pr_err("getting dev object name failed\n");
+			CAM_ERR(CAM_ICP, "getting dev object name failed");
 			goto compat_hw_name_failed;
 		}
 
 		child_node = of_find_node_by_name(NULL, name);
 		if (!child_node) {
-			pr_err("error! Cannot find node in dtsi %s\n", name);
+			CAM_ERR(CAM_ICP, "Cannot find node in dtsi %s", name);
 			rc = -ENODEV;
 			goto compat_hw_name_failed;
 		}
 
 		child_pdev = of_find_device_by_node(child_node);
 		if (!child_pdev) {
-			pr_err("failed to find device on bus %s\n",
+			CAM_ERR(CAM_ICP, "failed to find device on bus %s",
 				child_node->name);
 			rc = -ENODEV;
 			of_node_put(child_node);
@@ -1884,13 +1789,10 @@
 		child_dev_intf = (struct cam_hw_intf *)platform_get_drvdata(
 			child_pdev);
 		if (!child_dev_intf) {
-			pr_err("no child device\n");
+			CAM_ERR(CAM_ICP, "no child device");
 			of_node_put(child_node);
 			goto compat_hw_name_failed;
 		}
-		ICP_DBG("child_intf %pK\n", child_dev_intf);
-		ICP_DBG("child type %d index %d\n",	child_dev_intf->hw_type,
-				child_dev_intf->hw_idx);
 
 		icp_hw_mgr.devices[child_dev_intf->hw_type]
 			[child_dev_intf->hw_idx] = child_dev_intf;
@@ -1900,27 +1802,27 @@
 
 	rc = cam_smmu_get_handle("icp", &icp_hw_mgr.iommu_hdl);
 	if (rc) {
-		pr_err("icp get iommu handle failed: %d\n", rc);
+		CAM_ERR(CAM_ICP, "icp get iommu handle failed: %d", rc);
 		goto compat_hw_name_failed;
 	}
 
 	rc = cam_smmu_ops(icp_hw_mgr.iommu_hdl, CAM_SMMU_ATTACH);
 	if (rc) {
-		pr_err("icp attach failed: %d\n", rc);
+		CAM_ERR(CAM_ICP, "icp attach failed: %d", rc);
 		goto icp_attach_failed;
 	}
 
 	rc = cam_req_mgr_workq_create("icp_command_queue", ICP_WORKQ_NUM_TASK,
 		&icp_hw_mgr.cmd_work, CRM_WORKQ_USAGE_NON_IRQ);
 	if (rc) {
-		pr_err("unable to create a worker\n");
+		CAM_ERR(CAM_ICP, "unable to create a worker");
 		goto cmd_work_failed;
 	}
 
 	rc = cam_req_mgr_workq_create("icp_message_queue", ICP_WORKQ_NUM_TASK,
 		&icp_hw_mgr.msg_work, CRM_WORKQ_USAGE_IRQ);
 	if (rc) {
-		pr_err("unable to create a worker\n");
+		CAM_ERR(CAM_ICP, "unable to create a worker");
 		goto msg_work_failed;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
index 99b45aa..b7b3d7b 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "IPE-CORE %s:%d " fmt, __func__, __LINE__
-
 #include <linux/of.h>
 #include <linux/debugfs.h>
 #include <linux/videodev2.h>
@@ -30,6 +28,7 @@
 #include "cam_ipe_hw_intf.h"
 #include "cam_icp_hw_mgr_intf.h"
 #include "cam_cpas_api.h"
+#include "cam_debug_util.h"
 
 static int cam_ipe_caps_vote(struct cam_ipe_device_core_info *core_info,
 	struct cam_icp_cpas_vote *cpas_vote)
@@ -44,7 +43,7 @@
 			&cpas_vote->axi_vote);
 
 	if (rc)
-		pr_err("cpas vote is failed: %d\n", rc);
+		CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
 
 	return rc;
 }
@@ -59,7 +58,7 @@
 	int rc = 0;
 
 	if (!device_priv) {
-		pr_err("Invalid cam_dev_info\n");
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
 		return -EINVAL;
 	}
 
@@ -67,7 +66,8 @@
 	core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
 
 	if ((!soc_info) || (!core_info)) {
-		pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+		CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
 		return -EINVAL;
 	}
 
@@ -79,16 +79,16 @@
 	rc = cam_cpas_start(core_info->cpas_handle,
 		&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
 	if (rc) {
-		pr_err("cpass start failed: %d\n", rc);
+		CAM_ERR(CAM_ICP, "cpass start failed: %d", rc);
 		return rc;
 	}
 	core_info->cpas_start = true;
 
 	rc = cam_ipe_enable_soc_resources(soc_info);
 	if (rc) {
-		pr_err("soc enable is failed : %d\n", rc);
+		CAM_ERR(CAM_ICP, "soc enable is failed : %d", rc);
 		if (cam_cpas_stop(core_info->cpas_handle))
-			pr_err("cpas stop is failed\n");
+			CAM_ERR(CAM_ICP, "cpas stop is failed");
 		else
 			core_info->cpas_start = false;
 	}
@@ -105,24 +105,25 @@
 	int rc = 0;
 
 	if (!device_priv) {
-		pr_err("Invalid cam_dev_info\n");
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
 		return -EINVAL;
 	}
 
 	soc_info = &ipe_dev->soc_info;
 	core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
 	if ((!soc_info) || (!core_info)) {
-		pr_err("soc_info = %pK core_info = %pK\n", soc_info, core_info);
+		CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
 		return -EINVAL;
 	}
 
 	rc = cam_ipe_disable_soc_resources(soc_info);
 	if (rc)
-		pr_err("soc disable is failed : %d\n", rc);
+		CAM_ERR(CAM_ICP, "soc disable is failed : %d", rc);
 
 	if (core_info->cpas_start) {
 		if (cam_cpas_stop(core_info->cpas_handle))
-			pr_err("cpas stop is failed\n");
+			CAM_ERR(CAM_ICP, "cpas stop is failed");
 		else
 			core_info->cpas_start = false;
 	}
@@ -140,12 +141,12 @@
 	int rc = 0;
 
 	if (!device_priv) {
-		pr_err("Invalid arguments\n");
+		CAM_ERR(CAM_ICP, "Invalid arguments");
 		return -EINVAL;
 	}
 
 	if (cmd_type >= CAM_ICP_IPE_CMD_MAX) {
-		pr_err("Invalid command : %x\n", cmd_type);
+		CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
 		return -EINVAL;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
index 0efb1de..d95246f 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
@@ -23,9 +23,7 @@
 #include "cam_icp_hw_intf.h"
 #include "cam_icp_hw_mgr_intf.h"
 #include "cam_cpas_api.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 struct cam_ipe_device_hw_info cam_ipe_hw_info = {
 	.reserved = 0,
@@ -47,7 +45,7 @@
 
 	rc = cam_cpas_register_client(&cpas_register_params);
 	if (rc < 0) {
-		pr_err("cam_cpas_register_client is failed: %d\n", rc);
+		CAM_ERR(CAM_ICP, "failed: %d", rc);
 		return rc;
 	}
 	core_info->cpas_handle = cpas_register_params.client_handle;
@@ -83,7 +81,7 @@
 	ipe_dev_intf->hw_ops.process_cmd = cam_ipe_process_cmd;
 	ipe_dev_intf->hw_type = CAM_ICP_DEV_IPE;
 
-	pr_debug("%s: type %d index %d\n", __func__,
+	CAM_DBG(CAM_ICP, "type %d index %d",
 		ipe_dev_intf->hw_type,
 		ipe_dev_intf->hw_idx);
 
@@ -101,7 +99,7 @@
 	match_dev = of_match_device(pdev->dev.driver->of_match_table,
 		&pdev->dev);
 	if (!match_dev) {
-		pr_debug("%s: No ipe hardware info\n", __func__);
+		CAM_DBG(CAM_ICP, "No ipe hardware info");
 		kfree(ipe_dev->core_info);
 		kfree(ipe_dev);
 		kfree(ipe_dev_intf);
@@ -114,14 +112,14 @@
 	rc = cam_ipe_init_soc_resources(&ipe_dev->soc_info, cam_ipe_irq,
 		ipe_dev);
 	if (rc < 0) {
-		pr_err("%s: failed to init_soc\n", __func__);
+		CAM_ERR(CAM_ICP, "failed to init_soc");
 		kfree(ipe_dev->core_info);
 		kfree(ipe_dev);
 		kfree(ipe_dev_intf);
 		return rc;
 	}
 
-	pr_debug("cam_ipe_init_soc_resources : %pK\n",
+	CAM_DBG(CAM_ICP, "cam_ipe_init_soc_resources : %pK",
 		(void *)&ipe_dev->soc_info);
 	rc = cam_ipe_register_cpas(&ipe_dev->soc_info,
 		core_info, ipe_dev_intf->hw_idx);
@@ -136,7 +134,7 @@
 	spin_lock_init(&ipe_dev->hw_lock);
 	init_completion(&ipe_dev->hw_complete);
 
-	pr_debug("%s: IPE%d probe successful\n", __func__,
+	CAM_DBG(CAM_ICP, "IPE%d probe successful",
 		ipe_dev_intf->hw_idx);
 
 	return rc;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
index e691dad..26dd6d2 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
@@ -18,9 +18,7 @@
 #include <media/cam_icp.h>
 #include "ipe_soc.h"
 #include "cam_soc_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 static int cam_ipe_get_dt_properties(struct cam_hw_soc_info *soc_info)
 {
@@ -28,7 +26,7 @@
 
 	rc = cam_soc_util_get_dt_properties(soc_info);
 	if (rc < 0)
-		pr_err("get ipe dt prop is failed\n");
+		CAM_ERR(CAM_ICP, "get ipe dt prop is failed");
 
 	return rc;
 }
@@ -69,7 +67,7 @@
 	rc = cam_soc_util_enable_platform_resource(soc_info, true,
 		CAM_TURBO_VOTE, false);
 	if (rc) {
-		pr_err("%s: enable platform failed\n", __func__);
+		CAM_ERR(CAM_ICP, "enable platform failed");
 		return rc;
 	}
 
@@ -82,7 +80,7 @@
 
 	rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
 	if (rc)
-		pr_err("%s: enable platform failed\n", __func__);
+		CAM_ERR(CAM_ICP, "enable platform failed");
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index 766ea89..a6f60f5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -22,9 +22,7 @@
 #include "cam_sync_api.h"
 #include "cam_req_mgr_dev.h"
 #include "cam_trace.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 static uint64_t __cam_isp_ctx_get_event_ts(uint32_t evt_id, void *evt_data)
 {
@@ -57,7 +55,7 @@
 	case CAM_ISP_HW_EVENT_DONE:
 		break;
 	default:
-		CDBG("%s: Invalid Event Type %d\n", __func__, evt_id);
+		CAM_DBG(CAM_ISP, "Invalid Event Type %d", evt_id);
 	}
 
 	return ts;
@@ -75,11 +73,11 @@
 	struct cam_context *ctx = ctx_isp->base;
 
 	if (list_empty(&ctx->active_req_list)) {
-		CDBG("Buf done with no active request!\n");
+		CAM_DBG(CAM_ISP, "Buf done with no active request!");
 		goto end;
 	}
 
-	CDBG("%s: Enter with bubble_state %d\n", __func__, bubble_state);
+	CAM_DBG(CAM_ISP, "Enter with bubble_state %d", bubble_state);
 
 	req = list_first_entry(&ctx->active_req_list,
 			struct cam_ctx_request, list);
@@ -95,29 +93,30 @@
 		}
 
 		if (j == req_isp->num_fence_map_out) {
-			pr_err("Can not find matching lane handle 0x%x!\n",
+			CAM_ERR(CAM_ISP,
+				"Can not find matching lane handle 0x%x!",
 				done->resource_handle[i]);
 			rc = -EINVAL;
 			continue;
 		}
 
 		if (!bubble_state) {
-			CDBG("%s: Sync with success: fd 0x%x\n", __func__,
+			CAM_DBG(CAM_ISP, "Sync with success: fd 0x%x",
 				   req_isp->fence_map_out[j].sync_id);
 			rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
 				CAM_SYNC_STATE_SIGNALED_SUCCESS);
 			if (rc)
-				pr_err("%s: Sync failed with rc = %d\n",
-					__func__, rc);
+				CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
+					 rc);
 
 		} else if (!req_isp->bubble_report) {
-			CDBG("%s: Sync with failure: fd 0x%x\n", __func__,
+			CAM_DBG(CAM_ISP, "Sync with failure: fd 0x%x",
 				   req_isp->fence_map_out[j].sync_id);
 			rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
 				CAM_SYNC_STATE_SIGNALED_ERROR);
 			if (rc)
-				pr_err("%s: Sync failed with rc = %d\n",
-					__func__, rc);
+				CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
+					 rc);
 		} else {
 			/*
 			 * Ignore the buffer done if bubble detect is on
@@ -130,7 +129,7 @@
 			continue;
 		}
 
-		CDBG("%s: req %lld, reset sync id 0x%x\n", __func__,
+		CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x",
 			   req->request_id,
 			   req_isp->fence_map_out[j].sync_id);
 		req_isp->num_acked++;
@@ -141,8 +140,9 @@
 		list_del_init(&req->list);
 		list_add_tail(&req->list, &ctx->free_req_list);
 		ctx_isp->active_req_cnt--;
-		CDBG("%s: Move active request %lld to free list(cnt = %d)\n",
-			__func__, req->request_id, ctx_isp->active_req_cnt);
+		CAM_DBG(CAM_ISP,
+			"Move active request %lld to free list(cnt = %d)",
+			 req->request_id, ctx_isp->active_req_cnt);
 	}
 
 end:
@@ -162,15 +162,17 @@
 	req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
 	req_msg.u.frame_msg.sof_status = sof_event_status;
 
-	CDBG("%s: request id:%lld frame number:%lld SOF time stamp:0x%llx\n",
-		__func__, request_id, ctx_isp->frame_id,
+	CAM_DBG(CAM_ISP,
+		"request id:%lld frame number:%lld SOF time stamp:0x%llx",
+		 request_id, ctx_isp->frame_id,
 		ctx_isp->sof_timestamp_val);
-	CDBG("%s sof status:%d\n", __func__, sof_event_status);
+	CAM_DBG(CAM_ISP, " sof status:%d", sof_event_status);
 
 	if (cam_req_mgr_notify_frame_message(&req_msg,
 		V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
-		pr_err("%s: Error in notifying the sof time for req id:%lld\n",
-				__func__, request_id);
+		CAM_ERR(CAM_ISP,
+			"Error in notifying the sof time for req id:%lld",
+			request_id);
 }
 
 static int __cam_isp_ctx_reg_upd_in_activated_state(
@@ -182,7 +184,7 @@
 	struct cam_isp_ctx_req  *req_isp;
 
 	if (list_empty(&ctx->pending_req_list)) {
-		pr_err("Reg upd ack with no pending request\n");
+		CAM_ERR(CAM_ISP, "Reg upd ack with no pending request");
 		goto end;
 	}
 	req = list_first_entry(&ctx->pending_req_list,
@@ -193,13 +195,14 @@
 	if (req_isp->num_fence_map_out != 0) {
 		list_add_tail(&req->list, &ctx->active_req_list);
 		ctx_isp->active_req_cnt++;
-		CDBG("%s: move request %lld to active list(cnt = %d)\n",
-			__func__, req->request_id, ctx_isp->active_req_cnt);
+		CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+			 req->request_id, ctx_isp->active_req_cnt);
 	} else {
 		/* no io config, so the request is completed. */
 		list_add_tail(&req->list, &ctx->free_req_list);
-		CDBG("%s: move active request %lld to free list(cnt = %d)\n",
-			__func__, req->request_id, ctx_isp->active_req_cnt);
+		CAM_DBG(CAM_ISP,
+			"move active request %lld to free list(cnt = %d)",
+			 req->request_id, ctx_isp->active_req_cnt);
 	}
 
 	/*
@@ -207,7 +210,7 @@
 	 * state so change substate here.
 	 */
 	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
-	CDBG("%s: next substate %d\n", __func__, ctx_isp->substate_activated);
+	CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
 
 end:
 	return rc;
@@ -235,7 +238,7 @@
 		notify.frame_id = ctx_isp->frame_id;
 
 		ctx->ctx_crm_intf->notify_sof(&notify);
-		CDBG("%s: Notify CRM  SOF frame %lld\n", __func__,
+		CAM_DBG(CAM_ISP, "Notify CRM  SOF frame %lld",
 			ctx_isp->frame_id);
 
 		list_for_each_entry(req, &ctx->active_req_list, list) {
@@ -249,7 +252,7 @@
 		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
 			CAM_REQ_MGR_SOF_EVENT_SUCCESS);
 	} else {
-		pr_err("%s: Can not notify SOF to CRM\n", __func__);
+		CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
 	}
 
 	return 0;
@@ -263,13 +266,13 @@
 	struct cam_isp_hw_sof_event_data      *sof_event_data = evt_data;
 
 	if (!evt_data) {
-		pr_err("%s: in valid sof event data\n", __func__);
+		CAM_ERR(CAM_ISP, "in valid sof event data");
 		return -EINVAL;
 	}
 
 	ctx_isp->frame_id++;
 	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
-	CDBG("%s: frame id: %lld time stamp:0x%llx\n", __func__,
+	CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
 		ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
 
 	return rc;
@@ -284,7 +287,7 @@
 	struct cam_context *ctx = ctx_isp->base;
 
 	if (ctx->state != CAM_CTX_ACTIVATED) {
-		CDBG("%s: invalid RUP\n", __func__);
+		CAM_DBG(CAM_ISP, "invalid RUP");
 		goto end;
 	}
 
@@ -303,8 +306,9 @@
 			/* need to handle the buf done */
 			list_add_tail(&req->list, &ctx->active_req_list);
 			ctx_isp->active_req_cnt++;
-			CDBG("%s: move request %lld to active list(cnt = %d)\n",
-				__func__, req->request_id,
+			CAM_DBG(CAM_ISP,
+				"move request %lld to active list(cnt = %d)",
+				 req->request_id,
 				ctx_isp->active_req_cnt);
 			ctx_isp->substate_activated =
 				CAM_ISP_CTX_ACTIVATED_EPOCH;
@@ -327,7 +331,7 @@
 		 * If no pending req in epoch, this is an error case.
 		 * The recovery is to go back to sof state
 		 */
-		pr_err("%s: No pending request\n", __func__);
+		CAM_ERR(CAM_ISP, "No pending request");
 		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
 
 		/* Send SOF event as empty frame*/
@@ -341,7 +345,7 @@
 		list);
 	req_isp = (struct cam_isp_ctx_req *)req->req_priv;
 
-	CDBG("Report Bubble flag %d\n", req_isp->bubble_report);
+	CAM_DBG(CAM_ISP, "Report Bubble flag %d", req_isp->bubble_report);
 	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
 		ctx->ctx_crm_intf->notify_err) {
 		struct cam_req_mgr_error_notify notify;
@@ -351,7 +355,7 @@
 		notify.req_id = req->request_id;
 		notify.error = CRM_KMD_ERR_BUBBLE;
 		ctx->ctx_crm_intf->notify_err(&notify);
-		CDBG("%s: Notify CRM about Bubble frame %lld\n", __func__,
+		CAM_DBG(CAM_ISP, "Notify CRM about Bubble frame %lld",
 			ctx_isp->frame_id);
 	} else {
 		/*
@@ -361,8 +365,8 @@
 		list_del_init(&req->list);
 		list_add_tail(&req->list, &ctx->active_req_list);
 		ctx_isp->active_req_cnt++;
-		CDBG("%s: move request %lld to active list(cnt = %d)\n",
-			__func__, req->request_id, ctx_isp->active_req_cnt);
+		CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+			 req->request_id, ctx_isp->active_req_cnt);
 		req_isp->bubble_report = 0;
 	}
 
@@ -371,7 +375,7 @@
 		CAM_REQ_MGR_SOF_EVENT_ERROR);
 
 	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
-	CDBG("%s: next substate %d\n", __func__,
+	CAM_DBG(CAM_ISP, "next substate %d",
 		ctx_isp->substate_activated);
 end:
 	return 0;
@@ -398,7 +402,7 @@
 	struct cam_isp_hw_sof_event_data      *sof_event_data = evt_data;
 
 	if (!evt_data) {
-		pr_err("%s: in valid sof event data\n", __func__);
+		CAM_ERR(CAM_ISP, "in valid sof event data");
 		return -EINVAL;
 	}
 
@@ -408,9 +412,9 @@
 	if (list_empty(&ctx->active_req_list))
 		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
 	else
-		CDBG("%s: Still need to wait for the buf done\n", __func__);
+		CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
 
-	CDBG("%s: next substate %d\n", __func__,
+	CAM_DBG(CAM_ISP, "next substate %d",
 		ctx_isp->substate_activated);
 
 	return rc;
@@ -456,7 +460,7 @@
 		 * If no pending req in epoch, this is an error case.
 		 * Just go back to the bubble state.
 		 */
-		pr_err("%s: No pending request.\n", __func__);
+		CAM_ERR(CAM_ISP, "No pending request.");
 		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
 			CAM_REQ_MGR_SOF_EVENT_SUCCESS);
 
@@ -477,7 +481,7 @@
 		notify.req_id = req->request_id;
 		notify.error = CRM_KMD_ERR_BUBBLE;
 		ctx->ctx_crm_intf->notify_err(&notify);
-		CDBG("%s: Notify CRM about Bubble frame %lld\n", __func__,
+		CAM_DBG(CAM_ISP, "Notify CRM about Bubble frame %lld",
 			ctx_isp->frame_id);
 	} else {
 		/*
@@ -487,8 +491,8 @@
 		list_del_init(&req->list);
 		list_add_tail(&req->list, &ctx->active_req_list);
 		ctx_isp->active_req_cnt++;
-		CDBG("%s: move request %lld to active list(cnt = %d)\n",
-			__func__, req->request_id, ctx_isp->active_req_cnt);
+		CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+			 req->request_id, ctx_isp->active_req_cnt);
 		req_isp->bubble_report = 0;
 	}
 
@@ -497,7 +501,7 @@
 		CAM_REQ_MGR_SOF_EVENT_ERROR);
 
 	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
-	CDBG("%s: next substate %d\n", __func__, ctx_isp->substate_activated);
+	CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
 end:
 	return 0;
 }
@@ -526,7 +530,7 @@
 
 	uint32_t error_type = error_event_data->error_type;
 
-	CDBG("%s: Enter error_type = %d\n", __func__, error_type);
+	CAM_DBG(CAM_ISP, "Enter error_type = %d", error_type);
 	if ((error_type == CAM_ISP_HW_ERROR_OVERFLOW) ||
 		(error_type == CAM_ISP_HW_ERROR_BUSIF_OVERFLOW))
 		notify.error = CRM_KMD_ERR_FATAL;
@@ -538,7 +542,7 @@
 	 */
 
 	if (list_empty(&ctx->active_req_list)) {
-		pr_err("handling error with no active request!\n");
+		CAM_ERR(CAM_ISP, "handling error with no active request");
 		rc = -EINVAL;
 		goto end;
 	}
@@ -552,10 +556,10 @@
 		notify.req_id = req->request_id;
 
 		ctx->ctx_crm_intf->notify_err(&notify);
-		pr_err("%s: Notify CRM about ERROR frame %lld\n", __func__,
+		CAM_ERR(CAM_ISP, "Notify CRM about ERROR frame %lld",
 			ctx_isp->frame_id);
 	} else {
-		pr_err("%s: Can not notify ERRROR to CRM\n", __func__);
+		CAM_ERR(CAM_ISP, "Can not notify ERRROR to CRM");
 		rc = -EFAULT;
 	}
 
@@ -564,7 +568,7 @@
 	/* might need to check if active list is empty */
 
 end:
-	CDBG("%s: Exit\n", __func__);
+	CAM_DBG(CAM_ISP, "Exit");
 	return rc;
 }
 
@@ -641,8 +645,8 @@
 	struct cam_hw_config_args        cfg;
 
 	if (list_empty(&ctx->pending_req_list)) {
-		pr_err("%s: No available request for Apply id %lld\n",
-			__func__, apply->request_id);
+		CAM_ERR(CAM_ISP, "No available request for Apply id %lld",
+			apply->request_id);
 		rc = -EFAULT;
 		goto end;
 	}
@@ -655,8 +659,9 @@
 	 */
 	ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
 	if (ctx_isp->active_req_cnt >=  2) {
-		CDBG("%s: Reject apply request due to congestion(cnt = %d)\n",
-				__func__, ctx_isp->active_req_cnt);
+		CAM_DBG(CAM_ISP,
+			"Reject apply request due to congestion(cnt = %d)",
+			ctx_isp->active_req_cnt);
 		rc = -EFAULT;
 		goto end;
 	}
@@ -673,7 +678,7 @@
 		goto end;
 	}
 
-	CDBG("%s: Apply request %lld\n", __func__, req->request_id);
+	CAM_DBG(CAM_ISP, "Apply request %lld", req->request_id);
 	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
 
 	req_isp->bubble_report = apply->report_if_bubble;
@@ -684,11 +689,11 @@
 
 	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
 	if (rc) {
-		pr_err("%s: Can not apply the configuration\n", __func__);
+		CAM_ERR(CAM_ISP, "Can not apply the configuration");
 	} else {
 		spin_lock_bh(&ctx->lock);
 		ctx_isp->substate_activated = next_state;
-		CDBG("%s: new state %d\n", __func__, next_state);
+		CAM_DBG(CAM_ISP, "new state %d", next_state);
 		spin_unlock_bh(&ctx->lock);
 	}
 end:
@@ -702,11 +707,11 @@
 	struct cam_isp_context *ctx_isp =
 		(struct cam_isp_context *) ctx->ctx_priv;
 
-	CDBG("%s: current substate %d\n", __func__,
+	CAM_DBG(CAM_ISP, "current substate %d",
 		ctx_isp->substate_activated);
 	rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
 		CAM_ISP_CTX_ACTIVATED_APPLIED);
-	CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
+	CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
 
 	return rc;
 }
@@ -718,11 +723,11 @@
 	struct cam_isp_context *ctx_isp =
 		(struct cam_isp_context *) ctx->ctx_priv;
 
-	CDBG("%s: current substate %d\n", __func__,
+	CAM_DBG(CAM_ISP, "current substate %d",
 		ctx_isp->substate_activated);
 	rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
 		CAM_ISP_CTX_ACTIVATED_APPLIED);
-	CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
+	CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
 
 	return rc;
 }
@@ -734,11 +739,11 @@
 	struct cam_isp_context *ctx_isp =
 		(struct cam_isp_context *) ctx->ctx_priv;
 
-	CDBG("%s: current substate %d\n", __func__,
+	CAM_DBG(CAM_ISP, "current substate %d",
 		ctx_isp->substate_activated);
 	rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
 		CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED);
-	CDBG("%s: new substate %d\n", __func__, ctx_isp->substate_activated);
+	CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
 
 	return rc;
 }
@@ -755,7 +760,7 @@
 	spin_lock(&ctx->lock);
 	if (list_empty(req_list)) {
 		spin_unlock(&ctx->lock);
-		CDBG("%s: request list is empty\n", __func__);
+		CAM_DBG(CAM_ISP, "request list is empty");
 		return 0;
 	}
 
@@ -768,15 +773,15 @@
 		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
 		for (i = 0; i < req_isp->num_fence_map_out; i++) {
 			if (req_isp->fence_map_out[i].sync_id != -1) {
-				CDBG("%s: Flush req 0x%llx, fence %d\n",
-					__func__, req->request_id,
+				CAM_DBG(CAM_ISP, "Flush req 0x%llx, fence %d",
+					 req->request_id,
 					req_isp->fence_map_out[i].sync_id);
 				rc = cam_sync_signal(
 					req_isp->fence_map_out[i].sync_id,
 					CAM_SYNC_STATE_SIGNALED_ERROR);
 				if (rc)
-					pr_err_ratelimited("%s: signal fence failed\n",
-						__func__);
+					CAM_ERR_RATE_LIMIT(CAM_ISP,
+						"signal fence failed\n");
 				req_isp->fence_map_out[i].sync_id = -1;
 			}
 		}
@@ -792,8 +797,9 @@
 
 	if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
 		!cancel_req_id_found)
-		CDBG("%s:Flush request id:%lld is not found in the list\n",
-			__func__, flush_req->req_id);
+		CAM_DBG(CAM_ISP,
+			"Flush request id:%lld is not found in the list",
+			flush_req->req_id);
 
 	return 0;
 }
@@ -804,10 +810,10 @@
 {
 	int rc = 0;
 
-	CDBG("%s: try to flush pending list\n", __func__);
+	CAM_DBG(CAM_ISP, "try to flush pending list");
 	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
-	CDBG("%s: Flush request in top state %d\n",
-		__func__, ctx->state);
+	CAM_DBG(CAM_ISP, "Flush request in top state %d",
+		 ctx->state);
 	return rc;
 }
 
@@ -817,7 +823,7 @@
 {
 	int rc = 0;
 
-	CDBG("%s: try to flush pending list\n", __func__);
+	CAM_DBG(CAM_ISP, "try to flush pending list");
 	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
 
 	/* if nothing is in pending req list, change state to acquire*/
@@ -828,8 +834,8 @@
 
 	trace_cam_context_state("ISP", ctx);
 
-	CDBG("%s: Flush request in ready state. next state %d\n",
-		__func__, ctx->state);
+	CAM_DBG(CAM_ISP, "Flush request in ready state. next state %d",
+		 ctx->state);
 	return rc;
 }
 
@@ -879,6 +885,412 @@
 	},
 };
 
+static int __cam_isp_ctx_rdi_only_sof_in_top_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_context                    *ctx = ctx_isp->base;
+	struct cam_req_mgr_sof_notify          notify;
+	struct cam_isp_hw_sof_event_data      *sof_event_data = evt_data;
+	uint64_t                               request_id  = 0;
+
+	if (!evt_data) {
+		CAM_ERR(CAM_ISP, "in valid sof event data");
+		return -EINVAL;
+	}
+
+	ctx_isp->frame_id++;
+	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+	CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+		ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+
+	/*
+	 * notify reqmgr with sof signal. Note, due to scheduling delay
+	 * we can run into situation that two active requests has already
+	 * be in the active queue while we try to do the notification.
+	 * In this case, we need to skip the current notification. This
+	 * helps the state machine to catch up the delay.
+	 */
+	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof &&
+		ctx_isp->active_req_cnt <= 2) {
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.frame_id = ctx_isp->frame_id;
+
+		ctx->ctx_crm_intf->notify_sof(&notify);
+		CAM_DBG(CAM_ISP, "Notify CRM  SOF frame %lld",
+			ctx_isp->frame_id);
+
+		/*
+		 * It is idle frame with out any applied request id, send
+		 * request id as zero
+		 */
+		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+			CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+	} else {
+		CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
+	}
+
+	if (list_empty(&ctx->active_req_list))
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+	else
+		CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
+
+	CAM_DBG(CAM_ISP, "next substate %d",
+		ctx_isp->substate_activated);
+	return rc;
+}
+
+static int __cam_isp_ctx_rdi_only_sof_in_applied_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	struct cam_isp_hw_sof_event_data      *sof_event_data = evt_data;
+
+	if (!evt_data) {
+		CAM_ERR(CAM_ISP, "in valid sof event data");
+		return -EINVAL;
+	}
+
+	ctx_isp->frame_id++;
+	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+	CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+		ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED;
+	CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+
+	return 0;
+}
+
+static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	struct cam_ctx_request    *req;
+	struct cam_isp_ctx_req    *req_isp;
+	struct cam_context        *ctx = ctx_isp->base;
+	struct cam_isp_hw_sof_event_data      *sof_event_data = evt_data;
+	uint64_t  request_id = 0;
+
+	ctx_isp->frame_id++;
+	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+	CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+		ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+
+	if (list_empty(&ctx->pending_req_list)) {
+		/*
+		 * If no pending req in epoch, this is an error case.
+		 * The recovery is to go back to sof state
+		 */
+		CAM_ERR(CAM_ISP, "No pending request");
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+
+		/* Send SOF event as empty frame*/
+		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+			CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+		goto end;
+	}
+
+	req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
+		list);
+	req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+
+	CAM_DBG(CAM_ISP, "Report Bubble flag %d", req_isp->bubble_report);
+	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
+		ctx->ctx_crm_intf->notify_err) {
+		struct cam_req_mgr_error_notify notify;
+
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.req_id = req->request_id;
+		notify.error = CRM_KMD_ERR_BUBBLE;
+		ctx->ctx_crm_intf->notify_err(&notify);
+		CAM_DBG(CAM_ISP, "Notify CRM about Bubble frame %lld",
+			ctx_isp->frame_id);
+	} else {
+		/*
+		 * Since can not bubble report, always move the request to
+		 * active list.
+		 */
+		list_del_init(&req->list);
+		list_add_tail(&req->list, &ctx->active_req_list);
+		ctx_isp->active_req_cnt++;
+		CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+			req->request_id, ctx_isp->active_req_cnt);
+		req_isp->bubble_report = 0;
+	}
+
+	request_id = req->request_id;
+	__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+		CAM_REQ_MGR_SOF_EVENT_ERROR);
+
+	/* change the state to bubble, as reg update has not come */
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+	CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+end:
+	return 0;
+}
+
+static int __cam_isp_ctx_rdi_only_sof_in_bubble_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	uint32_t i;
+	struct cam_ctx_request                *req;
+	struct cam_context                    *ctx = ctx_isp->base;
+	struct cam_req_mgr_sof_notify          notify;
+	struct cam_isp_hw_sof_event_data      *sof_event_data = evt_data;
+	struct cam_isp_ctx_req                *req_isp;
+	uint64_t                               request_id  = 0;
+
+	if (!evt_data) {
+		CAM_ERR(CAM_ISP, "in valid sof event data");
+		return -EINVAL;
+	}
+
+	ctx_isp->frame_id++;
+	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+	CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+		ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+	/*
+	 * Signal all active requests with error and move the  all the active
+	 * requests to free list
+	 */
+	while (!list_empty(&ctx->active_req_list)) {
+		req = list_first_entry(&ctx->active_req_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
+			req_isp->num_fence_map_out);
+		for (i = 0; i < req_isp->num_fence_map_out; i++)
+			if (req_isp->fence_map_out[i].sync_id != -1) {
+				cam_sync_signal(
+					req_isp->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+			}
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+	/* notify reqmgr with sof signal */
+	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof) {
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.frame_id = ctx_isp->frame_id;
+
+		ctx->ctx_crm_intf->notify_sof(&notify);
+		CAM_DBG(CAM_ISP, "Notify CRM  SOF frame %lld",
+			ctx_isp->frame_id);
+
+	} else {
+		CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
+	}
+
+	/*
+	 * It is idle frame with out any applied request id, send
+	 * request id as zero
+	 */
+	__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+		CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+
+	CAM_DBG(CAM_ISP, "next substate %d",
+		ctx_isp->substate_activated);
+
+	return 0;
+}
+
+static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	struct cam_ctx_request  *req;
+	struct cam_context      *ctx = ctx_isp->base;
+	struct cam_isp_ctx_req  *req_isp;
+	struct cam_req_mgr_sof_notify  notify;
+	uint64_t  request_id  = 0;
+
+	/* notify reqmgr with sof signal*/
+	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_sof) {
+		if (list_empty(&ctx->pending_req_list)) {
+			CAM_ERR(CAM_ISP, "Reg upd ack with no pending request");
+			goto error;
+		}
+		req = list_first_entry(&ctx->pending_req_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		request_id = req->request_id;
+		if (req_isp->num_fence_map_out != 0) {
+			list_add_tail(&req->list, &ctx->active_req_list);
+			ctx_isp->active_req_cnt++;
+			CAM_DBG(CAM_ISP,
+				"move request %lld to active list(cnt = %d)",
+				req->request_id, ctx_isp->active_req_cnt);
+		} else {
+			/* no io config, so the request is completed. */
+			list_add_tail(&req->list, &ctx->free_req_list);
+			CAM_DBG(CAM_ISP,
+				"move active req %lld to free list(cnt=%d)",
+				req->request_id, ctx_isp->active_req_cnt);
+		}
+
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.frame_id = ctx_isp->frame_id;
+
+		ctx->ctx_crm_intf->notify_sof(&notify);
+		CAM_DBG(CAM_ISP, "Notify CRM  SOF frame %lld",
+			ctx_isp->frame_id);
+	} else {
+		CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
+	}
+	__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+		CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
+	CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+
+	return 0;
+error:
+	/* Send SOF event as idle frame*/
+	__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+		CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+	/*
+	 * There is no request in the pending list, move the sub state machine
+	 * to SOF sub state
+	 */
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+
+	return 0;
+}
+
+static struct cam_isp_ctx_irq_ops
+	cam_isp_ctx_rdi_only_activated_state_machine_irq
+			[CAM_ISP_CTX_ACTIVATED_MAX] = {
+	/* SOF */
+	{
+		.irq_ops = {
+			NULL,
+			__cam_isp_ctx_rdi_only_sof_in_top_state,
+			__cam_isp_ctx_reg_upd_in_sof,
+			NULL,
+			NULL,
+			NULL,
+		},
+	},
+	/* APPLIED */
+	{
+		.irq_ops = {
+			__cam_isp_ctx_handle_error,
+			__cam_isp_ctx_rdi_only_sof_in_applied_state,
+			NULL,
+			NULL,
+			NULL,
+			__cam_isp_ctx_buf_done_in_applied,
+		},
+	},
+	/* EPOCH */
+	{
+		.irq_ops = {
+			__cam_isp_ctx_handle_error,
+			__cam_isp_ctx_rdi_only_sof_in_top_state,
+			NULL,
+			NULL,
+			NULL,
+			__cam_isp_ctx_buf_done_in_epoch,
+		},
+	},
+	/* BUBBLE*/
+	{
+		.irq_ops = {
+			__cam_isp_ctx_handle_error,
+			__cam_isp_ctx_rdi_only_sof_in_bubble_state,
+			NULL,
+			NULL,
+			NULL,
+			__cam_isp_ctx_buf_done_in_bubble,
+		},
+	},
+	/* BUBBLE APPLIED ie PRE_BUBBLE */
+	{
+		.irq_ops = {
+			__cam_isp_ctx_handle_error,
+			__cam_isp_ctx_rdi_only_sof_in_bubble_applied,
+			__cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state,
+			NULL,
+			NULL,
+			__cam_isp_ctx_buf_done_in_bubble_applied,
+		},
+	},
+
+	/* HALT */
+	{
+	},
+};
+
+static int __cam_isp_ctx_rdi_only_apply_req_top_state(
+	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CAM_DBG(CAM_ISP, "current substate %d",
+		ctx_isp->substate_activated);
+	rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+		CAM_ISP_CTX_ACTIVATED_APPLIED);
+	CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
+
+	return rc;
+}
+
+static struct cam_ctx_ops
+	cam_isp_ctx_rdi_only_activated_state_machine
+		[CAM_ISP_CTX_ACTIVATED_MAX] = {
+	/* SOF */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {
+			.apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
+		},
+		.irq_ops = NULL,
+	},
+	/* APPLIED */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* EPOCH */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {
+			.apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
+		},
+		.irq_ops = NULL,
+	},
+	/* PRE BUBBLE */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* BUBBLE */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* HALT */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+};
+
 
 /* top level state machine */
 static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
@@ -910,20 +1322,20 @@
 	 * But we still add some sanity check code here to help the debug
 	 */
 	if (!list_empty(&ctx->active_req_list))
-		pr_err("%s: Active list is not empty\n", __func__);
+		CAM_ERR(CAM_ISP, "Active list is not empty");
 
 	/* Flush all the pending request list  */
 	flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
 	flush_req.link_hdl = ctx->link_hdl;
 	flush_req.dev_hdl = ctx->dev_hdl;
 
-	CDBG("%s: try to flush pending list\n", __func__);
+	CAM_DBG(CAM_ISP, "try to flush pending list");
 	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
 
 	ctx->state = CAM_CTX_AVAILABLE;
 
 	trace_cam_context_state("ISP", ctx);
-	CDBG("%s: next state %d\n", __func__, ctx->state);
+	CAM_DBG(CAM_ISP, "next state %d", ctx->state);
 	return rc;
 }
 
@@ -941,7 +1353,7 @@
 	struct cam_isp_context           *ctx_isp =
 		(struct cam_isp_context *) ctx->ctx_priv;
 
-	CDBG("%s: get free request object......\n", __func__);
+	CAM_DBG(CAM_ISP, "get free request object......");
 
 	/* get free request */
 	spin_lock_bh(&ctx->lock);
@@ -953,7 +1365,7 @@
 	spin_unlock_bh(&ctx->lock);
 
 	if (!req) {
-		pr_err("%s: No more request obj free\n", __func__);
+		CAM_ERR(CAM_ISP, "No more request obj free");
 		rc = -ENOMEM;
 		goto end;
 	}
@@ -965,20 +1377,20 @@
 	rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
 		(uint64_t *) &packet_addr, &len);
 	if (rc != 0) {
-		pr_err("%s: Can not get packet address\n", __func__);
+		CAM_ERR(CAM_ISP, "Can not get packet address");
 		rc = -EINVAL;
 		goto free_req;
 	}
 
 	packet = (struct cam_packet *) (packet_addr + cmd->offset);
-	CDBG("%s: pack_handle %llx\n", __func__, cmd->packet_handle);
-	CDBG("%s: packet address is 0x%llx\n", __func__, packet_addr);
-	CDBG("%s: packet with length %zu, offset 0x%llx\n", __func__,
+	CAM_DBG(CAM_ISP, "pack_handle %llx", cmd->packet_handle);
+	CAM_DBG(CAM_ISP, "packet address is 0x%llx", packet_addr);
+	CAM_DBG(CAM_ISP, "packet with length %zu, offset 0x%llx",
 		len, cmd->offset);
-	CDBG("%s: Packet request id %lld\n", __func__,
+	CAM_DBG(CAM_ISP, "Packet request id %lld",
 		packet->header.request_id);
-	CDBG("%s: Packet size 0x%x\n", __func__, packet->header.size);
-	CDBG("%s: packet op %d\n", __func__, packet->header.op_code);
+	CAM_DBG(CAM_ISP, "Packet size 0x%x", packet->header.size);
+	CAM_DBG(CAM_ISP, "packet op %d", packet->header.op_code);
 
 	/* preprocess the configuration */
 	memset(&cfg, 0, sizeof(cfg));
@@ -991,13 +1403,12 @@
 	cfg.out_map_entries = req_isp->fence_map_out;
 	cfg.in_map_entries = req_isp->fence_map_in;
 
-	CDBG("%s: try to prepare config packet......\n", __func__);
+	CAM_DBG(CAM_ISP, "try to prepare config packet......");
 
 	rc = ctx->hw_mgr_intf->hw_prepare_update(
 		ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
 	if (rc != 0) {
-		pr_err("%s: Prepare config packet failed in HW layer\n",
-			__func__);
+		CAM_ERR(CAM_ISP, "Prepare config packet failed in HW layer");
 		rc = -EFAULT;
 		goto free_req;
 	}
@@ -1006,8 +1417,8 @@
 	req_isp->num_fence_map_in = cfg.num_in_map_entries;
 	req_isp->num_acked = 0;
 
-	CDBG("%s: num_entry: %d, num fence out: %d, num fence in: %d\n",
-		__func__, req_isp->num_cfg, req_isp->num_fence_map_out,
+	CAM_DBG(CAM_ISP, "num_entry: %d, num fence out: %d, num fence in: %d",
+		 req_isp->num_cfg, req_isp->num_fence_map_out,
 		req_isp->num_fence_map_in);
 
 	req->request_id = packet->header.request_id;
@@ -1019,20 +1430,20 @@
 		add_req.req_id   = req->request_id;
 		rc = ctx->ctx_crm_intf->add_req(&add_req);
 		if (rc) {
-			pr_err("%s: Error: Adding request id=%llu\n", __func__,
+			CAM_ERR(CAM_ISP, "Error: Adding request id=%llu",
 				req->request_id);
 				goto free_req;
 		}
 	}
 
-	CDBG("%s: Packet request id 0x%llx\n", __func__,
+	CAM_DBG(CAM_ISP, "Packet request id 0x%llx",
 		packet->header.request_id);
 
 	spin_lock_bh(&ctx->lock);
 	list_add_tail(&req->list, &ctx->pending_req_list);
 	spin_unlock_bh(&ctx->lock);
 
-	CDBG("%s: Preprocessing Config %lld successful\n", __func__,
+	CAM_DBG(CAM_ISP, "Preprocessing Config %lld successful",
 		req->request_id);
 
 	return rc;
@@ -1055,26 +1466,28 @@
 	struct cam_hw_release_args       release;
 	struct cam_isp_context          *ctx_isp =
 		(struct cam_isp_context *) ctx->ctx_priv;
+	struct cam_isp_hw_cmd_args       hw_cmd_args;
 
 	if (!ctx->hw_mgr_intf) {
-		pr_err("HW interface is not ready!\n");
+		CAM_ERR(CAM_ISP, "HW interface is not ready");
 		rc = -EFAULT;
 		goto end;
 	}
 
-	CDBG("%s: session_hdl 0x%x, num_resources %d, hdl type %d, res %lld\n",
-		 __func__, cmd->session_handle, cmd->num_resources,
+	CAM_DBG(CAM_ISP,
+		"session_hdl 0x%x, num_resources %d, hdl type %d, res %lld",
+		cmd->session_handle, cmd->num_resources,
 		cmd->handle_type, cmd->resource_hdl);
 
 	if (cmd->num_resources > CAM_ISP_CTX_RES_MAX) {
-		pr_err("Too much resources in the acquire!\n");
+		CAM_ERR(CAM_ISP, "Too much resources in the acquire");
 		rc = -ENOMEM;
 		goto end;
 	}
 
 	/* for now we only support user pointer */
 	if (cmd->handle_type != 1)  {
-		pr_err("%s: Only user pointer is supported!", __func__);
+		CAM_ERR(CAM_ISP, "Only user pointer is supported");
 		rc = -EINVAL;
 		goto end;
 	}
@@ -1086,8 +1499,8 @@
 		goto end;
 	}
 
-	CDBG("%s: start copy %d resources from user\n",
-		__func__, cmd->num_resources);
+	CAM_DBG(CAM_ISP, "start copy %d resources from user",
+		 cmd->num_resources);
 
 	if (copy_from_user(isp_res, (void __user *)cmd->resource_hdl,
 		sizeof(*isp_res)*cmd->num_resources)) {
@@ -1104,10 +1517,39 @@
 	rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
 		&param);
 	if (rc != 0) {
-		pr_err("Acquire device failed\n");
+		CAM_ERR(CAM_ISP, "Acquire device failed");
 		goto free_res;
 	}
 
+	/* Query the context has rdi only resource */
+	hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
+	hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT;
+	rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+				&hw_cmd_args);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "HW command failed");
+		goto free_hw;
+	}
+
+	if (hw_cmd_args.u.is_rdi_only_context) {
+		/*
+		 * this context has rdi only resource assign rdi only
+		 * state machine
+		 */
+		CAM_DBG(CAM_ISP, "RDI only session Context");
+
+		ctx_isp->substate_machine_irq =
+			cam_isp_ctx_rdi_only_activated_state_machine_irq;
+		ctx_isp->substate_machine =
+			cam_isp_ctx_rdi_only_activated_state_machine;
+	} else {
+		CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
+		ctx_isp->substate_machine_irq =
+			cam_isp_ctx_activated_state_machine_irq;
+		ctx_isp->substate_machine =
+			cam_isp_ctx_activated_state_machine;
+	}
+
 	ctx_isp->hw_ctx = param.ctxt_to_hw_map;
 
 	req_hdl_param.session_hdl = cmd->session_handle;
@@ -1117,11 +1559,11 @@
 	req_hdl_param.ops = ctx->crm_ctx_intf;
 	req_hdl_param.priv = ctx;
 
-	CDBG("%s: get device handle form bridge\n", __func__);
+	CAM_DBG(CAM_ISP, "get device handle form bridge");
 	ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
 	if (ctx->dev_hdl <= 0) {
 		rc = -EFAULT;
-		pr_err("Can not create device handle\n");
+		CAM_ERR(CAM_ISP, "Can not create device handle");
 		goto free_hw;
 	}
 	cmd->dev_handle = ctx->dev_hdl;
@@ -1132,7 +1574,7 @@
 	ctx->state = CAM_CTX_ACQUIRED;
 
 	trace_cam_context_state("ISP", ctx);
-	CDBG("%s:%d: Acquire success.\n", __func__, __LINE__);
+	CAM_DBG(CAM_ISP, "Acquire success.");
 	kfree(isp_res);
 	return rc;
 
@@ -1158,7 +1600,7 @@
 		trace_cam_context_state("ISP", ctx);
 	}
 
-	CDBG("%s: next state %d\n", __func__, ctx->state);
+	CAM_DBG(CAM_ISP, "next state %d", ctx->state);
 	return rc;
 }
 
@@ -1167,7 +1609,7 @@
 {
 	int rc = 0;
 
-	CDBG("%s:%d: Enter.........\n", __func__, __LINE__);
+	CAM_DBG(CAM_ISP, "Enter.........");
 
 	ctx->link_hdl = link->link_hdl;
 	ctx->ctx_crm_intf = link->crm_cb;
@@ -1178,7 +1620,7 @@
 		trace_cam_context_state("ISP", ctx);
 	}
 
-	CDBG("%s: next state %d\n", __func__, ctx->state);
+	CAM_DBG(CAM_ISP, "next state %d", ctx->state);
 
 	return rc;
 }
@@ -1225,8 +1667,7 @@
 
 	if (list_empty(&ctx->pending_req_list)) {
 		/* should never happen */
-		pr_err("%s: Start device with empty configuration\n",
-			__func__);
+		CAM_ERR(CAM_ISP, "Start device with empty configuration");
 		rc = -EFAULT;
 		goto end;
 	} else {
@@ -1236,8 +1677,7 @@
 	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
 
 	if (!ctx_isp->hw_ctx) {
-		pr_err("%s:%d: Wrong hw context pointer.\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_ISP, "Wrong hw context pointer.");
 		rc = -EFAULT;
 		goto end;
 	}
@@ -1260,12 +1700,12 @@
 	rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
 	if (rc) {
 		/* HW failure. user need to clean up the resource */
-		pr_err("Start HW failed\n");
+		CAM_ERR(CAM_ISP, "Start HW failed");
 		ctx->state = CAM_CTX_READY;
 		trace_cam_context_state("ISP", ctx);
 		goto end;
 	}
-	CDBG("%s: start device success\n", __func__);
+	CAM_DBG(CAM_ISP, "start device success");
 end:
 	return rc;
 }
@@ -1298,7 +1738,7 @@
 	spin_lock_bh(&ctx->lock);
 	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
 	spin_unlock_bh(&ctx->lock);
-	CDBG("%s: next substate %d", __func__, ctx_isp->substate_activated);
+	CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
 
 	/* stop hw first */
 	if (ctx_isp->hw_ctx) {
@@ -1312,8 +1752,8 @@
 				struct cam_ctx_request, list);
 		list_del_init(&req->list);
 		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
-		CDBG("%s: signal fence in pending list. fence num %d\n",
-			__func__, req_isp->num_fence_map_out);
+		CAM_DBG(CAM_ISP, "signal fence in pending list. fence num %d",
+			 req_isp->num_fence_map_out);
 		for (i = 0; i < req_isp->num_fence_map_out; i++)
 			if (req_isp->fence_map_out[i].sync_id != -1) {
 				cam_sync_signal(
@@ -1328,8 +1768,8 @@
 				struct cam_ctx_request, list);
 		list_del_init(&req->list);
 		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
-		CDBG("%s: signal fence in active list. fence num %d\n",
-			__func__, req_isp->num_fence_map_out);
+		CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
+			 req_isp->num_fence_map_out);
 		for (i = 0; i < req_isp->num_fence_map_out; i++)
 			if (req_isp->fence_map_out[i].sync_id != -1) {
 				cam_sync_signal(
@@ -1342,7 +1782,7 @@
 	ctx_isp->active_req_cnt = 0;
 	ctx_isp->reported_req_id = 0;
 
-	CDBG("%s: next state %d", __func__, ctx->state);
+	CAM_DBG(CAM_ISP, "next state %d", ctx->state);
 	return rc;
 }
 
@@ -1394,21 +1834,21 @@
 		(struct cam_isp_context *) ctx->ctx_priv;
 
 	trace_cam_apply_req("ISP", apply);
-	CDBG("%s: Enter: apply req in Substate %d request _id:%lld\n",
-		__func__, ctx_isp->substate_activated, apply->request_id);
+	CAM_DBG(CAM_ISP, "Enter: apply req in Substate %d request _id:%lld",
+		 ctx_isp->substate_activated, apply->request_id);
 	if (ctx_isp->substate_machine[ctx_isp->substate_activated].
 		crm_ops.apply_req) {
 		rc = ctx_isp->substate_machine[ctx_isp->substate_activated].
 			crm_ops.apply_req(ctx, apply);
 	} else {
-		pr_err("%s: No handle function in activated substate %d\n",
-			__func__, ctx_isp->substate_activated);
+		CAM_ERR(CAM_ISP, "No handle function in activated substate %d",
+			 ctx_isp->substate_activated);
 		rc = -EFAULT;
 	}
 
 	if (rc)
-		pr_err("%s: Apply failed in active substate %d\n",
-			__func__, ctx_isp->substate_activated);
+		CAM_ERR(CAM_ISP, "Apply failed in active substate %d",
+			 ctx_isp->substate_activated);
 	return rc;
 }
 
@@ -1427,18 +1867,18 @@
 	trace_cam_isp_activated_irq(ctx, ctx_isp->substate_activated, evt_id,
 		__cam_isp_ctx_get_event_ts(evt_id, evt_data));
 
-	CDBG("%s: Enter: State %d, Substate %d, evt id %d\n",
-		__func__, ctx->state, ctx_isp->substate_activated, evt_id);
+	CAM_DBG(CAM_ISP, "Enter: State %d, Substate %d, evt id %d",
+		 ctx->state, ctx_isp->substate_activated, evt_id);
 	if (ctx_isp->substate_machine_irq[ctx_isp->substate_activated].
 		irq_ops[evt_id]) {
 		rc = ctx_isp->substate_machine_irq[ctx_isp->substate_activated].
 			irq_ops[evt_id](ctx_isp, evt_data);
 	} else {
-		CDBG("%s: No handle function for substate %d\n", __func__,
+		CAM_DBG(CAM_ISP, "No handle function for substate %d",
 			ctx_isp->substate_activated);
 	}
-	CDBG("%s: Exit: State %d Substate %d\n",
-		__func__, ctx->state, ctx_isp->substate_activated);
+	CAM_DBG(CAM_ISP, "Exit: State %d Substate %d",
+		 ctx->state, ctx_isp->substate_activated);
 	spin_unlock_bh(&ctx->lock);
 	return rc;
 }
@@ -1513,7 +1953,7 @@
 	int i;
 
 	if (!ctx || !ctx_base) {
-		pr_err("%s: Invalid Context\n", __func__);
+		CAM_ERR(CAM_ISP, "Invalid Context");
 		goto err;
 	}
 
@@ -1538,7 +1978,7 @@
 	rc = cam_context_init(ctx_base, crm_node_intf, hw_intf, ctx->req_base,
 		CAM_CTX_REQ_MAX);
 	if (rc) {
-		pr_err("%s: Camera Context Base init failed\n", __func__);
+		CAM_ERR(CAM_ISP, "Camera Context Base init failed");
 		goto err;
 	}
 
@@ -1558,7 +1998,7 @@
 		cam_context_deinit(ctx->base);
 
 	if (ctx->substate_activated != CAM_ISP_CTX_ACTIVATED_SOF)
-		pr_err("%s: ISP context substate is invalid\n", __func__);
+		CAM_ERR(CAM_ISP, "ISP context substate is invalid");
 
 	memset(ctx, 0, sizeof(*ctx));
 	return rc;
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
index 4c819cf..2bf7795 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
@@ -25,6 +25,7 @@
 #include "cam_hw_mgr_intf.h"
 #include "cam_isp_hw_mgr_intf.h"
 #include "cam_node.h"
+#include "cam_debug_util.h"
 
 static struct cam_isp_dev g_isp_dev;
 
@@ -44,13 +45,13 @@
 	for (i = 0; i < CAM_CTX_MAX; i++) {
 		rc = cam_isp_context_deinit(&g_isp_dev.ctx_isp[i]);
 		if (rc)
-			pr_err("%s: ISP context %d deinit failed\n",
-				__func__, i);
+			CAM_ERR(CAM_ISP, "ISP context %d deinit failed",
+				 i);
 	}
 
 	rc = cam_subdev_remove(&g_isp_dev.sd);
 	if (rc)
-		pr_err("%s: Unregister failed\n", __func__);
+		CAM_ERR(CAM_ISP, "Unregister failed");
 
 	memset(&g_isp_dev, 0, sizeof(g_isp_dev));
 	return 0;
@@ -67,7 +68,7 @@
 	rc = cam_subdev_probe(&g_isp_dev.sd, pdev, CAM_ISP_DEV_NAME,
 		CAM_IFE_DEVICE_TYPE);
 	if (rc) {
-		pr_err("%s: ISP cam_subdev_probe failed!\n", __func__);
+		CAM_ERR(CAM_ISP, "ISP cam_subdev_probe failed!");
 		goto err;
 	}
 	node = (struct cam_node *) g_isp_dev.sd.token;
@@ -75,7 +76,7 @@
 	memset(&hw_mgr_intf, 0, sizeof(hw_mgr_intf));
 	rc = cam_isp_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf);
 	if (rc != 0) {
-		pr_err("%s: Can not initialized ISP HW manager!\n", __func__);
+		CAM_ERR(CAM_ISP, "Can not initialized ISP HW manager!");
 		goto unregister;
 	}
 
@@ -85,7 +86,7 @@
 			&node->crm_node_intf,
 			&node->hw_mgr_intf);
 		if (rc) {
-			pr_err("%s: ISP context init failed!\n", __func__);
+			CAM_ERR(CAM_ISP, "ISP context init failed!");
 			goto unregister;
 		}
 	}
@@ -93,11 +94,11 @@
 	rc = cam_node_init(node, &hw_mgr_intf, g_isp_dev.ctx, CAM_CTX_MAX,
 		CAM_ISP_DEV_NAME);
 	if (rc) {
-		pr_err("%s: ISP node init failed!\n", __func__);
+		CAM_ERR(CAM_ISP, "ISP node init failed!");
 		goto unregister;
 	}
 
-	pr_info("%s: Camera ISP probe complete\n", __func__);
+	CAM_INFO(CAM_ISP, "Camera ISP probe complete");
 
 	return 0;
 unregister:
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index cb38d8f..4a5b1c3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -24,9 +24,7 @@
 #include "cam_ife_hw_mgr.h"
 #include "cam_cdm_intf_api.h"
 #include "cam_packet_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 #define CAM_IFE_HW_ENTRIES_MAX  20
 
@@ -41,7 +39,7 @@
 	struct cam_query_cap_cmd          *query = hw_caps_args;
 	struct cam_isp_query_cap_cmd       query_isp;
 
-	CDBG("%s: enter\n", __func__);
+	CAM_DBG(CAM_ISP, "enter");
 
 	if (copy_from_user(&query_isp, (void __user *)query->caps_handle,
 		sizeof(struct cam_isp_query_cap_cmd))) {
@@ -66,7 +64,7 @@
 		sizeof(struct cam_isp_query_cap_cmd)))
 		rc = -EFAULT;
 
-	CDBG("%s: exit rc :%d !\n", __func__, rc);
+	CAM_DBG(CAM_ISP, "exit rc :%d !", rc);
 
 	return rc;
 }
@@ -100,7 +98,7 @@
 		if (!isp_hw_res->hw_res[i])
 			continue;
 		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
-		CDBG("%s: enabled vfe hardware %d\n", __func__,
+		CAM_DBG(CAM_ISP, "enabled vfe hardware %d",
 			hw_intf->hw_idx);
 		if (hw_intf->hw_ops.init) {
 			rc = hw_intf->hw_ops.init(hw_intf->hw_priv,
@@ -113,7 +111,7 @@
 
 	return 0;
 err:
-	pr_err("%s: INIT HW res failed! (type:%d, id:%d)", __func__,
+	CAM_ERR(CAM_ISP, "INIT HW res failed! (type:%d, id:%d)",
 		isp_hw_res->res_type, isp_hw_res->res_id);
 	return rc;
 }
@@ -134,19 +132,18 @@
 				isp_hw_res->hw_res[i],
 				sizeof(struct cam_isp_resource_node));
 			if (rc) {
-				pr_err("%s: Can not start HW resources!\n",
-					__func__);
+				CAM_ERR(CAM_ISP, "Can not start HW resources!");
 				goto err;
 			}
 		} else {
-			pr_err("%s:function null\n", __func__);
+			CAM_ERR(CAM_ISP, "function null");
 			goto err;
 		}
 	}
 
 	return 0;
 err:
-	pr_err("%s: Start hw res failed! (type:%d, id:%d)", __func__,
+	CAM_ERR(CAM_ISP, "Start hw res failed! (type:%d, id:%d)",
 		isp_hw_res->res_type, isp_hw_res->res_id);
 	return rc;
 }
@@ -166,7 +163,7 @@
 				isp_hw_res->hw_res[i],
 				sizeof(struct cam_isp_resource_node));
 		else
-			pr_err("%s:stop null\n", __func__);
+			CAM_ERR(CAM_ISP, "stop null");
 	}
 }
 
@@ -213,7 +210,7 @@
 			struct cam_ife_hw_mgr_res, list);
 		list_del_init(&res_ptr->list);
 	} else {
-		pr_err("No more free ife hw mgr ctx!\n");
+		CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx!");
 		rc = -1;
 	}
 	*res = res_ptr;
@@ -237,11 +234,12 @@
 				isp_hw_res->hw_res[i],
 				sizeof(struct cam_isp_resource_node));
 			if (rc)
-				pr_err("%s:Release hw resrouce id %d failed!\n",
-					__func__, isp_hw_res->res_id);
+				CAM_ERR(CAM_ISP,
+					"Release hw resrouce id %d failed!",
+					isp_hw_res->res_id);
 			isp_hw_res->hw_res[i] = NULL;
 		} else
-			pr_err("%s:Release null\n", __func__);
+			CAM_ERR(CAM_ISP, "Release null");
 	}
 	/* caller should make sure the resource is in a list */
 	list_del_init(&isp_hw_res->list);
@@ -328,8 +326,8 @@
 	ife_ctx->common.cb_priv = NULL;
 	memset(ife_ctx->common.event_cb, 0, sizeof(ife_ctx->common.event_cb));
 
-	CDBG("%s:%d: release context completed ctx id:%d\n",
-		__func__, __LINE__, ife_ctx->ctx_index);
+	CAM_DBG(CAM_ISP, "release context completed ctx id:%d",
+		ife_ctx->ctx_index);
 
 	return 0;
 }
@@ -364,7 +362,7 @@
 			struct cam_ife_hw_mgr_ctx, list);
 		list_del_init(&ctx_ptr->list);
 	} else {
-		pr_err("No more free ife hw mgr ctx!\n");
+		CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx!");
 		rc = -1;
 	}
 	*ife_ctx = ctx_ptr;
@@ -381,7 +379,7 @@
 	uint32_t    i;
 
 	if (!ctx->num_base) {
-		CDBG("%s: Add split id = %d for base idx = %d\n", __func__,
+		CAM_DBG(CAM_ISP, "Add split id = %d for base idx = %d",
 			split_id, base_idx);
 		ctx->base[0].split_id = split_id;
 		ctx->base[0].idx      = base_idx;
@@ -400,8 +398,8 @@
 		}
 
 		if (i == CAM_IFE_HW_NUM_MAX) {
-			CDBG("%s: Add split id = %d for base idx = %d\n",
-				__func__, split_id, base_idx);
+			CAM_DBG(CAM_ISP, "Add split id = %d for base idx = %d",
+				 split_id, base_idx);
 			ctx->base[ctx->num_base].split_id = split_id;
 			ctx->base[ctx->num_base].idx      = base_idx;
 			ctx->num_base++;
@@ -417,7 +415,7 @@
 	uint32_t i;
 
 	if (list_empty(&ctx->res_list_ife_src)) {
-		pr_err("%s: Error! Mux List empty\n", __func__);
+		CAM_ERR(CAM_ISP, "Error! Mux List empty");
 		return -ENODEV;
 	}
 
@@ -440,7 +438,7 @@
 						res->hw_intf->hw_idx);
 		}
 	}
-	CDBG("%s: ctx base num = %d\n", __func__, ctx->num_base);
+	CAM_DBG(CAM_ISP, "ctx base num = %d", ctx->num_base);
 
 	return 0;
 }
@@ -474,7 +472,7 @@
 		vfe_out_res_id = CAM_ISP_IFE_OUT_RES_RDI_3;
 		break;
 	default:
-		pr_err("%s: invalid resource type\n", __func__);
+		CAM_ERR(CAM_ISP, "invalid resource type");
 		goto err;
 	}
 
@@ -498,15 +496,15 @@
 			&vfe_acquire,
 			sizeof(struct cam_vfe_acquire_args));
 		if (rc) {
-			pr_err("%s: Can not acquire out resource 0x%x\n",
-				__func__, out_port->res_type);
+			CAM_ERR(CAM_ISP, "Can not acquire out resource 0x%x",
+				 out_port->res_type);
 			goto err;
 		}
 		break;
 	}
 
 	if (i == in_port->num_out_res) {
-		pr_err("%s: Can not acquire out resource\n", __func__);
+		CAM_ERR(CAM_ISP, "Can not acquire out resource");
 		goto err;
 	}
 
@@ -536,16 +534,16 @@
 		out_port = &in_port->data[i];
 		k = out_port->res_type & 0xFF;
 		if (k >= CAM_IFE_HW_OUT_RES_MAX) {
-			pr_err("%s: invalid output resource type 0x%x\n",
-				__func__, out_port->res_type);
+			CAM_ERR(CAM_ISP, "invalid output resource type 0x%x",
+				 out_port->res_type);
 			continue;
 		}
 
 		if (cam_ife_hw_mgr_is_rdi_res(out_port->res_type))
 			continue;
 
-		CDBG("%s: res_type 0x%x\n",
-			__func__, out_port->res_type);
+		CAM_DBG(CAM_ISP, "res_type 0x%x",
+			 out_port->res_type);
 
 		ife_out_res = &ife_ctx->res_list_ife_out[k];
 		ife_out_res->is_dual_vfe = in_port->usage_type;
@@ -587,15 +585,16 @@
 				&vfe_acquire,
 				sizeof(struct cam_vfe_acquire_args));
 			if (rc) {
-				pr_err("%s:Can not acquire out resource 0x%x\n",
-					__func__, out_port->res_type);
+				CAM_ERR(CAM_ISP,
+					"Can not acquire out resource 0x%x",
+					out_port->res_type);
 				goto err;
 			}
 
 			ife_out_res->hw_res[j] =
 				vfe_acquire.vfe_out.rsrc_node;
-			CDBG("%s: resource type :0x%x res id:0x%x\n",
-				__func__, ife_out_res->hw_res[j]->res_type,
+			CAM_DBG(CAM_ISP, "resource type :0x%x res id:0x%x",
+				ife_out_res->hw_res[j]->res_type,
 				ife_out_res->hw_res[j]->res_id);
 
 		}
@@ -635,8 +634,7 @@
 				ife_src_res, in_port);
 			break;
 		default:
-			pr_err("%s: Fatal: Unknown IFE SRC resource!\n",
-				__func__);
+			CAM_ERR(CAM_ISP, "Fatal: Unknown IFE SRC resource!");
 			break;
 		}
 		if (rc)
@@ -670,7 +668,7 @@
 		rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
 			&ife_src_res);
 		if (rc) {
-			pr_err("%s: No more free hw mgr resource!\n", __func__);
+			CAM_ERR(CAM_ISP, "No more free hw mgr resource!");
 			goto err;
 		}
 		cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_src,
@@ -710,8 +708,7 @@
 			vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
 			break;
 		default:
-			pr_err("%s: Wrong IFE CSID Resource Node!\n",
-				__func__);
+			CAM_ERR(CAM_ISP, "Wrong IFE CSID Resource Node!");
 			goto err;
 		}
 		ife_src_res->res_type = vfe_acquire.rsrc_type;
@@ -736,13 +733,15 @@
 					&vfe_acquire,
 					sizeof(struct cam_vfe_acquire_args));
 			if (rc) {
-				pr_err("%s:Can not acquire IFE HW res %d!\n",
-					__func__, csid_res->res_id);
+				CAM_ERR(CAM_ISP,
+					"Can not acquire IFE HW res %d",
+					csid_res->res_id);
 				goto err;
 			}
 			ife_src_res->hw_res[i] = vfe_acquire.vfe_in.rsrc_node;
-			CDBG("%s:acquire success res type :0x%x res id:0x%x\n",
-				__func__, ife_src_res->hw_res[i]->res_type,
+			CAM_DBG(CAM_ISP,
+				"acquire success res type :0x%x res id:0x%x",
+				ife_src_res->hw_res[i]->res_type,
 				ife_src_res->hw_res[i]->res_id);
 
 		}
@@ -779,7 +778,7 @@
 
 	rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &csid_res);
 	if (rc) {
-		pr_err("%s: No more free hw mgr resource!\n", __func__);
+		CAM_ERR(CAM_ISP, "No more free hw mgr resource!");
 		goto err;
 	}
 	cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res);
@@ -810,13 +809,12 @@
 	}
 
 	if (i == CAM_IFE_CSID_HW_NUM_MAX) {
-		pr_err("%s: Can not acquire ife csid ipp resrouce!\n",
-			__func__);
+		CAM_ERR(CAM_ISP, "Can not acquire ife csid ipp resrouce!");
 		goto err;
 	}
 
-	CDBG("%s: acquired csid(%d) left ipp resrouce successfully!\n",
-		__func__, i);
+	CAM_DBG(CAM_ISP, "acquired csid(%d) left ipp resrouce successfully!",
+		 i);
 
 	csid_res->res_type = CAM_ISP_RESOURCE_PIX_PATH;
 	csid_res->res_id = CAM_IFE_PIX_PATH_RES_IPP;
@@ -841,14 +839,14 @@
 		}
 
 		if (j == CAM_IFE_CSID_HW_NUM_MAX) {
-			pr_err("%s: Can not acquire ife csid rdi resrouce!\n",
-				__func__);
+			CAM_ERR(CAM_ISP,
+				"Can not acquire ife csid rdi resrouce!");
 			goto err;
 		}
 		csid_res->hw_res[1] = csid_acquire.node_res;
 
-		CDBG("%s:acquired csid(%d)right ipp resrouce successfully!\n",
-			__func__, j);
+		CAM_DBG(CAM_ISP,
+			"acquired csid(%d)right ipp resrouce successfully!", j);
 
 	}
 	csid_res->parent = &ife_ctx->res_list_ife_in;
@@ -881,7 +879,7 @@
 		break;
 	default:
 		path_id = CAM_IFE_PIX_PATH_RES_MAX;
-		CDBG("%s: maximum rdi output type exceeded\n", __func__);
+		CAM_DBG(CAM_ISP, "maximum rdi output type exceeded");
 		break;
 	}
 
@@ -912,7 +910,7 @@
 		rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
 			&csid_res);
 		if (rc) {
-			pr_err("%s: No more free hw mgr resource!\n",
+			CAM_ERR(CAM_ISP, "No more free hw mgr resource!",
 				__func__);
 			goto err;
 		}
@@ -930,6 +928,7 @@
 		csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH;
 		csid_acquire.cid = cid_res_id;
 		csid_acquire.in_port = in_port;
+		csid_acquire.out_port = out_port;
 		csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE;
 
 		for (j = 0; j < CAM_IFE_CSID_HW_NUM_MAX; j++) {
@@ -946,8 +945,8 @@
 		}
 
 		if (j == CAM_IFE_CSID_HW_NUM_MAX) {
-			pr_err("%s: Can not acquire ife csid rdi resrouce!\n",
-				__func__);
+			CAM_ERR(CAM_ISP,
+				"Can not acquire ife csid rdi resrouce!");
 			goto err;
 		}
 
@@ -980,7 +979,7 @@
 		ife_ctx->res_list_ife_in.res_id = in_port->res_type;
 		ife_ctx->res_list_ife_in.is_dual_vfe = in_port->usage_type;
 	} else if (ife_ctx->res_list_ife_in.res_id != in_port->res_type) {
-		pr_err("%s: No Free resource for this context!\n", __func__);
+		CAM_ERR(CAM_ISP, "No Free resource for this context!");
 		goto err;
 	} else {
 		/* else do nothing */
@@ -1034,7 +1033,7 @@
 	/* no dual vfe for TPG */
 	if ((in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) &&
 		(in_port->usage_type != 0)) {
-		pr_err("%s: No Dual VFE on TPG input!\n", __func__);
+		CAM_ERR(CAM_ISP, "No Dual VFE on TPG input!");
 		goto err;
 	}
 
@@ -1042,7 +1041,7 @@
 
 	rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &cid_res);
 	if (rc) {
-		pr_err("%s: No more free hw mgr resource!\n", __func__);
+		CAM_ERR(CAM_ISP, "No more free hw mgr resource!");
 		goto err;
 	}
 	cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_cid, &cid_res);
@@ -1064,8 +1063,7 @@
 	}
 
 	if (i == CAM_IFE_CSID_HW_NUM_MAX || !csid_acquire.node_res) {
-		pr_err("%s: Can not acquire ife csid rdi resrouce!\n",
-			__func__);
+		CAM_ERR(CAM_ISP, "Can not acquire ife csid rdi resrouce!");
 		goto err;
 	}
 
@@ -1095,8 +1093,8 @@
 		}
 
 		if (j == CAM_IFE_CSID_HW_NUM_MAX) {
-			pr_err("%s: Can not acquire ife csid rdi resrouce!\n",
-				__func__);
+			CAM_ERR(CAM_ISP,
+				"Can not acquire ife csid rdi resrouce!");
 			goto err;
 		}
 		cid_res->hw_res[1] = csid_acquire.node_res;
@@ -1112,7 +1110,8 @@
 }
 static int cam_ife_mgr_acquire_hw_for_ctx(
 	struct cam_ife_hw_mgr_ctx          *ife_ctx,
-	struct cam_isp_in_port_info        *in_port)
+	struct cam_isp_in_port_info        *in_port,
+	uint32_t  *num_pix_port, uint32_t  *num_rdi_port)
 {
 	int rc                                    = -1;
 	int is_dual_vfe                           = 0;
@@ -1125,16 +1124,14 @@
 	/* get root node resource */
 	rc = cam_ife_hw_mgr_acquire_res_root(ife_ctx, in_port);
 	if (rc) {
-		pr_err("%s:%d: Can not acquire csid rx resource!\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_ISP, "Can not acquire csid rx resource!");
 		goto err;
 	}
 
 	/* get cid resource */
 	rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res_id);
 	if (rc) {
-		pr_err("%s%d: Acquire IFE CID resource Failed!\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed!");
 		goto err;
 	}
 
@@ -1142,7 +1139,7 @@
 		&pixel_count, &rdi_count);
 
 	if (!pixel_count && !rdi_count) {
-		pr_err("%s: Error! no PIX or RDI resource\n", __func__);
+		CAM_ERR(CAM_ISP, "Error! no PIX or RDI resource");
 		return -EINVAL;
 	}
 
@@ -1151,8 +1148,8 @@
 		rc = cam_ife_hw_mgr_acquire_res_ife_csid_ipp(ife_ctx, in_port,
 				cid_res_id);
 		if (rc) {
-			pr_err("%s%d: Acquire IFE CSID IPP resource Failed!\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_ISP,
+				"Acquire IFE CSID IPP resource Failed!");
 			goto err;
 		}
 	}
@@ -1162,8 +1159,8 @@
 		rc = cam_ife_hw_mgr_acquire_res_ife_csid_rdi(ife_ctx, in_port,
 			cid_res_id);
 		if (rc) {
-			pr_err("%s%d: Acquire IFE CSID RDI resource Failed!\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_ISP,
+				"Acquire IFE CSID RDI resource Failed!");
 			goto err;
 		}
 	}
@@ -1171,18 +1168,19 @@
 	/* get ife src resource */
 	rc = cam_ife_hw_mgr_acquire_res_ife_src(ife_ctx, in_port);
 	if (rc) {
-		pr_err("%s%d: Acquire IFE SRC resource Failed!\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_ISP, "Acquire IFE SRC resource Failed!");
 		goto err;
 	}
 
 	rc = cam_ife_hw_mgr_acquire_res_ife_out(ife_ctx, in_port);
 	if (rc) {
-		pr_err("%s%d: Acquire IFE OUT resource Failed!\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_ISP, "Acquire IFE OUT resource Failed!");
 		goto err;
 	}
 
+	*num_pix_port += pixel_count;
+	*num_rdi_port += rdi_count;
+
 	return 0;
 err:
 	/* release resource at the acquire entry funciton */
@@ -1192,11 +1190,11 @@
 void cam_ife_cam_cdm_callback(uint32_t handle, void *userdata,
 	enum cam_cdm_cb_status status, uint32_t cookie)
 {
-	CDBG("%s: Called by CDM hdl=%x, udata=%pK, status=%d, cookie=%d\n",
-		__func__, handle, userdata, status, cookie);
+	CAM_DBG(CAM_ISP,
+		"Called by CDM hdl=%x, udata=%pK, status=%d, cookie=%d",
+		 handle, userdata, status, cookie);
 }
 
-
 /* entry function: acquire_hw */
 static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv,
 					void *acquire_hw_args)
@@ -1208,19 +1206,21 @@
 	struct cam_ife_hw_mgr_ctx         *ife_ctx;
 	struct cam_isp_in_port_info       *in_port = NULL;
 	struct cam_isp_resource           *isp_resource = NULL;
-	struct cam_cdm_acquire_data cdm_acquire;
+	struct cam_cdm_acquire_data        cdm_acquire;
+	uint32_t                           num_pix_port = 0;
+	uint32_t                           num_rdi_port = 0;
 
-	CDBG("%s: Enter...\n", __func__);
+	CAM_DBG(CAM_ISP, "Enter...");
 
 	if (!acquire_args || acquire_args->num_acq <= 0) {
-		pr_err("%s: Nothing to acquire. Seems like error\n", __func__);
+		CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error");
 		return -EINVAL;
 	}
 
 	/* get the ife ctx */
 	rc = cam_ife_hw_mgr_get_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
 	if (rc || !ife_ctx) {
-		pr_err("Get ife hw context failed!\n");
+		CAM_ERR(CAM_ISP, "Get ife hw context failed!");
 		goto err;
 	}
 
@@ -1247,12 +1247,12 @@
 	cdm_acquire.id = CAM_CDM_VIRTUAL;
 	cdm_acquire.cam_cdm_callback = cam_ife_cam_cdm_callback;
 	if (!cam_cdm_acquire(&cdm_acquire)) {
-		CDBG("Successfully acquired the CDM HW hdl=%x\n",
+		CAM_DBG(CAM_ISP, "Successfully acquired the CDM HW hdl=%x",
 			cdm_acquire.handle);
 		ife_ctx->cdm_handle = cdm_acquire.handle;
 		ife_ctx->cdm_ops = cdm_acquire.ops;
 	} else {
-		pr_err("Failed to acquire the CDM HW\n");
+		CAM_ERR(CAM_ISP, "Failed to acquire the CDM HW");
 		goto err;
 	}
 
@@ -1263,32 +1263,38 @@
 		if (isp_resource[i].resource_id != CAM_ISP_RES_ID_PORT)
 			continue;
 
-		CDBG("%s: start copy from user handle %lld with len = %d\n",
-			__func__, isp_resource[i].res_hdl,
+		CAM_DBG(CAM_ISP,
+			"start copy from user handle %lld with len = %d",
+			isp_resource[i].res_hdl,
 			isp_resource[i].length);
 
 		in_port = memdup_user((void __user *)isp_resource[i].res_hdl,
 			isp_resource[i].length);
 		if (in_port > 0) {
-			rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port);
+			rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port,
+				&num_pix_port, &num_rdi_port);
 			kfree(in_port);
 			if (rc) {
-				pr_err("%s: can not acquire resource!\n",
-					__func__);
+				CAM_ERR(CAM_ISP, "can not acquire resource");
 				goto free_res;
 			}
 		} else {
-			pr_err("%s: copy from user failed with in_port = %pK",
-				__func__, in_port);
+			CAM_ERR(CAM_ISP,
+				"copy from user failed with in_port = %pK",
+				in_port);
 			rc = -EFAULT;
 			goto free_res;
 		}
 	}
+
+	/* Check whether context has only RDI resource */
+	if (!num_pix_port)
+		ife_ctx->is_rdi_only_context = 1;
+
 	/* Process base info */
 	rc = cam_ife_mgr_process_base_info(ife_ctx);
 	if (rc) {
-		pr_err("%s: Error process) base info!\n",
-			__func__);
+		CAM_ERR(CAM_ISP, "Error process) base info!");
 		return -EINVAL;
 	}
 
@@ -1297,14 +1303,14 @@
 
 	cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->used_ctx_list, &ife_ctx);
 
-	CDBG("%s: Exit...(success)!\n", __func__);
+	CAM_DBG(CAM_ISP, "Exit...(success)!");
 
 	return 0;
 free_res:
 	cam_ife_hw_mgr_release_hw_for_ctx(ife_ctx);
 	cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
 err:
-	CDBG("%s: Exit...(rc=%d)!\n", __func__, rc);
+	CAM_DBG(CAM_ISP, "Exit...(rc=%d)!", rc);
 	return rc;
 }
 
@@ -1318,25 +1324,25 @@
 	struct cam_cdm_bl_request *cdm_cmd;
 	struct cam_ife_hw_mgr_ctx *ctx;
 
-	CDBG("%s: Enter\n", __func__);
+	CAM_DBG(CAM_ISP, "Enter");
 	if (!hw_mgr_priv || !config_hw_args) {
-		pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+		CAM_ERR(CAM_ISP, "Invalid arguments");
 		return -EINVAL;
 	}
 
 	cfg = config_hw_args;
 	ctx = (struct cam_ife_hw_mgr_ctx *)cfg->ctxt_to_hw_map;
 	if (!ctx) {
-		pr_err("%s: Fatal: Invalid context is used!\n", __func__);
+		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
 		return -EPERM;
 	}
 
 	if (!ctx->ctx_in_use || !ctx->cdm_cmd) {
-		pr_err("%s: Invalid context parameters !\n", __func__);
+		CAM_ERR(CAM_ISP, "Invalid context parameters !");
 		return -EPERM;
 	}
 
-	CDBG("%s:%d Enter ctx id:%d\n", __func__, __LINE__, ctx->ctx_index);
+	CAM_DBG(CAM_ISP, "Enter ctx id:%d", ctx->ctx_index);
 
 	if (cfg->num_hw_update_entries > 0) {
 		cdm_cmd = ctx->cdm_cmd;
@@ -1355,11 +1361,11 @@
 
 		rc = cam_cdm_submit_bls(ctx->cdm_handle, cdm_cmd);
 		if (rc)
-			pr_err("Failed to apply the configs\n");
+			CAM_ERR(CAM_ISP, "Failed to apply the configs");
 	} else {
-		pr_err("No commands to config\n");
+		CAM_ERR(CAM_ISP, "No commands to config");
 	}
-	CDBG("%s: Exit\n", __func__);
+	CAM_DBG(CAM_ISP, "Exit");
 
 	return rc;
 }
@@ -1374,22 +1380,21 @@
 	uint32_t                          i, master_base_idx = 0;
 
 	if (!hw_mgr_priv || !stop_hw_args) {
-		pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+		CAM_ERR(CAM_ISP, "Invalid arguments");
 		return -EINVAL;
 	}
 	ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
 	if (!ctx || !ctx->ctx_in_use) {
-		pr_err("%s: Fatal: Invalid context is used!\n", __func__);
+		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
 		return -EPERM;
 	}
 
-	CDBG("%s%d: Enter...ctx id:%d\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "Enter...ctx id:%d",
 		ctx->ctx_index);
 
 	/* stop resource will remove the irq mask from the hardware */
 	if (!ctx->num_base) {
-		pr_err("%s%d: error number of bases are zero\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_ISP, "error number of bases are zero");
 		return -EINVAL;
 	}
 
@@ -1445,7 +1450,7 @@
 
 	/* update vote bandwidth should be done at the HW layer */
 
-	CDBG("%s%d Exit...ctx id:%d rc :%d\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "Exit...ctx id:%d rc :%d",
 		ctx->ctx_index, rc);
 
 	return rc;
@@ -1461,23 +1466,22 @@
 	uint32_t                          i, master_base_idx = 0;
 
 	if (!hw_mgr_priv || !stop_hw_args) {
-		pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+		CAM_ERR(CAM_ISP, "Invalid arguments");
 		return -EINVAL;
 	}
 	ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
 	if (!ctx || !ctx->ctx_in_use) {
-		pr_err("%s: Fatal: Invalid context is used!\n", __func__);
+		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
 		return -EPERM;
 	}
 
-	CDBG("%s%d: Enter...ctx id:%d\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, " Enter...ctx id:%d",
 		ctx->ctx_index);
 
 	/* Note:stop resource will remove the irq mask from the hardware */
 
 	if (!ctx->num_base) {
-		pr_err("%s%d: error number of bases are zero\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_ISP, "error number of bases are zero");
 		return -EINVAL;
 	}
 
@@ -1522,8 +1526,8 @@
 	}
 
 	if (cam_cdm_stream_off(ctx->cdm_handle))
-		pr_err("%s%d: CDM stream off failed %d\n",
-			__func__, __LINE__, ctx->cdm_handle);
+		CAM_ERR(CAM_ISP, "CDM stream off failed %d",
+			ctx->cdm_handle);
 
 	/* IFE mux in resources */
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
@@ -1559,8 +1563,7 @@
 	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
 		cam_ife_hw_mgr_deinit_hw_res(&ctx->res_list_ife_out[i]);
 
-	CDBG("%s%d Exit...ctx id:%d rc :%d\n", __func__, __LINE__,
-		ctx->ctx_index, rc);
+	CAM_DBG(CAM_ISP, "Exit...ctx id:%d rc :%d", ctx->ctx_index, rc);
 
 	return rc;
 }
@@ -1574,7 +1577,7 @@
 	struct cam_csid_reset_cfg_args  csid_reset_args;
 
 	if (!hw_mgr) {
-		CDBG("%s: Invalid arguments\n", __func__);
+		CAM_DBG(CAM_ISP, "Invalid arguments");
 		return -EINVAL;
 	}
 
@@ -1596,13 +1599,13 @@
 	for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) {
 		if (hw_idx != hw_mgr->ife_devices[i]->hw_idx)
 			continue;
-		CDBG("%d:VFE (id = %d) reset\n", __LINE__, hw_idx);
+		CAM_DBG(CAM_ISP, "VFE (id = %d) reset", hw_idx);
 		vfe_hw_intf = hw_mgr->ife_devices[i];
 		vfe_hw_intf->hw_ops.reset(vfe_hw_intf->hw_priv, NULL, 0);
 		break;
 	}
 
-	CDBG("%d: Exit Successfully\n", __LINE__);
+	CAM_DBG(CAM_ISP, "Exit Successfully");
 	return 0;
 }
 
@@ -1616,74 +1619,68 @@
 	uint32_t                          i;
 
 	if (!hw_mgr_priv || !start_hw_args) {
-		pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+		CAM_ERR(CAM_ISP, "Invalid arguments");
 		return -EINVAL;
 	}
 
 	ctx = (struct cam_ife_hw_mgr_ctx *)start_args->ctxt_to_hw_map;
 	if (!ctx || !ctx->ctx_in_use) {
-		pr_err("%s: Invalid context is used!\n", __func__);
+		CAM_ERR(CAM_ISP, "Invalid context is used!");
 		return -EPERM;
 	}
 
-	CDBG("%s%d Enter... ctx id:%d\n", __func__, __LINE__,
-		ctx->ctx_index);
+	CAM_DBG(CAM_ISP, "Enter... ctx id:%d", ctx->ctx_index);
 
-	CDBG("%s%d START IFE OUT ... in ctx id:%d\n", __func__, __LINE__,
-		ctx->ctx_index);
+	CAM_DBG(CAM_ISP, "START IFE OUT ... in ctx id:%d", ctx->ctx_index);
 	/* start the IFE out devices */
 	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
 		rc = cam_ife_hw_mgr_start_hw_res(&ctx->res_list_ife_out[i]);
 		if (rc) {
-			pr_err("%s: Can not start IFE OUT (%d)!\n",
-				__func__, i);
+			CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)!", i);
 			goto err;
 		}
 	}
 
-	CDBG("%s%d START IFE SRC ... in ctx id:%d\n", __func__, __LINE__,
-		ctx->ctx_index);
+	CAM_DBG(CAM_ISP, "START IFE SRC ... in ctx id:%d", ctx->ctx_index);
 	/* Start the IFE mux in devices */
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
 		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
 		if (rc) {
-			pr_err("%s: Can not start IFE MUX (%d)!\n",
-				__func__, hw_mgr_res->res_id);
+			CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)!",
+				 hw_mgr_res->res_id);
 			goto err;
 		}
 	}
 
-	CDBG("%s:%d: START CSID HW ... in ctx id:%d\n", __func__, __LINE__,
-		ctx->ctx_index);
+	CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d", ctx->ctx_index);
 	/* Start the IFE CSID HW devices */
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
 		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
 		if (rc) {
-			pr_err("%s: Can not start IFE CSID (%d)!\n",
-				__func__, hw_mgr_res->res_id);
+			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+				 hw_mgr_res->res_id);
 			goto err;
 		}
 	}
 
-	CDBG("%s%d START CID SRC ... in ctx id:%d\n", __func__, __LINE__,
-		ctx->ctx_index);
+	CAM_DBG(CAM_ISP, "START CID SRC ... in ctx id:%d", ctx->ctx_index);
 	/* Start the IFE CID HW devices */
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
 		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
 		if (rc) {
-			pr_err("%s: Can not start IFE CSID (%d)!\n",
-				__func__, hw_mgr_res->res_id);
+			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+				 hw_mgr_res->res_id);
 			goto err;
 		}
 	}
 
 	/* Start IFE root node: do nothing */
-	CDBG("%s: Exit...(success)\n", __func__);
+	CAM_DBG(CAM_ISP, "Exit...(success)");
 	return 0;
 
 err:
 	cam_ife_mgr_stop_hw(hw_mgr_priv, start_hw_args);
-	CDBG("%s: Exit...(rc=%d)\n", __func__, rc);
+	CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
 	return rc;
 }
 
@@ -1696,17 +1693,17 @@
 	uint32_t                          i;
 
 	if (!hw_mgr_priv || !start_hw_args) {
-		pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+		CAM_ERR(CAM_ISP, "Invalid arguments");
 		return -EINVAL;
 	}
 
 	ctx = (struct cam_ife_hw_mgr_ctx *)start_args->ctxt_to_hw_map;
 	if (!ctx || !ctx->ctx_in_use) {
-		pr_err("%s: Invalid context is used!\n", __func__);
+		CAM_ERR(CAM_ISP, "Invalid context is used!");
 		return -EPERM;
 	}
 
-	CDBG("%s%d Enter... ctx id:%d\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "Enter... ctx id:%d",
 		ctx->ctx_index);
 
 	/* update Bandwidth should be done at the hw layer */
@@ -1715,127 +1712,127 @@
 
 	/* INIT IFE Root: do nothing */
 
-	CDBG("%s%d INIT IFE CID ... in ctx id:%d\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "INIT IFE CID ... in ctx id:%d",
 		ctx->ctx_index);
 	/* INIT IFE CID */
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
 		rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
 		if (rc) {
-			pr_err("%s: Can not INIT IFE CID.(id :%d)!\n",
-				__func__, hw_mgr_res->res_id);
+			CAM_ERR(CAM_ISP, "Can not INIT IFE CID.(id :%d)!",
+				 hw_mgr_res->res_id);
 			goto err;
 		}
 	}
 
 
-	CDBG("%s%d INIT IFE csid ... in ctx id:%d\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "INIT IFE csid ... in ctx id:%d",
 		ctx->ctx_index);
 
 	/* INIT IFE csid */
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
 		rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
 		if (rc) {
-			pr_err("%s: Can not INIT IFE CSID.(id :%d)!\n",
-				__func__, hw_mgr_res->res_id);
+			CAM_ERR(CAM_ISP, "Can not INIT IFE CSID.(id :%d)!",
+				 hw_mgr_res->res_id);
 			goto err;
 		}
 	}
 
 	/* INIT IFE SRC */
-	CDBG("%s%d INIT IFE SRC in ctx id:%d\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "INIT IFE SRC in ctx id:%d",
 		ctx->ctx_index);
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
 		rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
 		if (rc) {
-			pr_err("%s: Can not INIT IFE SRC (%d)!\n",
-				__func__, hw_mgr_res->res_id);
+			CAM_ERR(CAM_ISP, "Can not INIT IFE SRC (%d)!",
+				 hw_mgr_res->res_id);
 			goto err;
 		}
 	}
 
 	/* INIT IFE OUT */
-	CDBG("%s%d INIT IFE OUT RESOURCES in ctx id:%d\n", __func__,
-		__LINE__, ctx->ctx_index);
+	CAM_DBG(CAM_ISP, "INIT IFE OUT RESOURCES in ctx id:%d",
+		ctx->ctx_index);
 
 	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
 		rc = cam_ife_hw_mgr_init_hw_res(&ctx->res_list_ife_out[i]);
 		if (rc) {
-			pr_err("%s: Can not INIT IFE OUT (%d)!\n",
-				__func__, ctx->res_list_ife_out[i].res_id);
+			CAM_ERR(CAM_ISP, "Can not INIT IFE OUT (%d)!",
+				 ctx->res_list_ife_out[i].res_id);
 			goto err;
 		}
 	}
 
-	CDBG("%s: start cdm interface\n", __func__);
+	CAM_DBG(CAM_ISP, "start cdm interface");
 	rc = cam_cdm_stream_on(ctx->cdm_handle);
 	if (rc) {
-		pr_err("%s: Can not start cdm (%d)!\n",
-			__func__, ctx->cdm_handle);
+		CAM_ERR(CAM_ISP, "Can not start cdm (%d)!",
+			 ctx->cdm_handle);
 		goto err;
 	}
 
 	/* Apply initial configuration */
-	CDBG("%s: Config HW\n", __func__);
+	CAM_DBG(CAM_ISP, "Config HW");
 	rc = cam_ife_mgr_config_hw(hw_mgr_priv, start_hw_args);
 	if (rc) {
-		pr_err("%s: Config HW failed\n", __func__);
+		CAM_ERR(CAM_ISP, "Config HW failed");
 		goto err;
 	}
 
-	CDBG("%s%d START IFE OUT ... in ctx id:%d\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "START IFE OUT ... in ctx id:%d",
 		ctx->ctx_index);
 	/* start the IFE out devices */
 	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
 		rc = cam_ife_hw_mgr_start_hw_res(&ctx->res_list_ife_out[i]);
 		if (rc) {
-			pr_err("%s: Can not start IFE OUT (%d)!\n",
-				__func__, i);
+			CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)!",
+				 i);
 			goto err;
 		}
 	}
 
-	CDBG("%s%d START IFE SRC ... in ctx id:%d\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "START IFE SRC ... in ctx id:%d",
 		ctx->ctx_index);
 	/* Start the IFE mux in devices */
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
 		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
 		if (rc) {
-			pr_err("%s: Can not start IFE MUX (%d)!\n",
-				__func__, hw_mgr_res->res_id);
+			CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)!",
+				 hw_mgr_res->res_id);
 			goto err;
 		}
 	}
 
-	CDBG("%s:%d: START CSID HW ... in ctx id:%d\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d",
 		ctx->ctx_index);
 	/* Start the IFE CSID HW devices */
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
 		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
 		if (rc) {
-			pr_err("%s: Can not start IFE CSID (%d)!\n",
-				__func__, hw_mgr_res->res_id);
+			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+				 hw_mgr_res->res_id);
 			goto err;
 		}
 	}
 
-	CDBG("%s%d START CID SRC ... in ctx id:%d\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "START CID SRC ... in ctx id:%d",
 		ctx->ctx_index);
 	/* Start the IFE CID HW devices */
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
 		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
 		if (rc) {
-			pr_err("%s: Can not start IFE CSID (%d)!\n",
-				__func__, hw_mgr_res->res_id);
+			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+				 hw_mgr_res->res_id);
 			goto err;
 		}
 	}
 
 	/* Start IFE root node: do nothing */
-	CDBG("%s: Exit...(success)\n", __func__);
+	CAM_DBG(CAM_ISP, "Exit...(success)");
 	return 0;
 err:
 	cam_ife_mgr_stop_hw(hw_mgr_priv, start_hw_args);
-	CDBG("%s: Exit...(rc=%d)\n", __func__, rc);
+	CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
 	return rc;
 }
 
@@ -1858,17 +1855,17 @@
 	struct cam_ife_hw_mgr_ctx        *ctx;
 
 	if (!hw_mgr_priv || !release_hw_args) {
-		pr_err("%s%d: Invalid arguments\n", __func__, __LINE__);
+		CAM_ERR(CAM_ISP, "Invalid arguments");
 		return -EINVAL;
 	}
 
 	ctx = (struct cam_ife_hw_mgr_ctx *)release_args->ctxt_to_hw_map;
 	if (!ctx || !ctx->ctx_in_use) {
-		pr_err("%s: Fatal: Invalid context is used!\n", __func__);
+		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
 		return -EPERM;
 	}
 
-	CDBG("%s%d Enter...ctx id:%d\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "Enter...ctx id:%d",
 		ctx->ctx_index);
 
 	/* we should called the stop hw before this already */
@@ -1884,7 +1881,7 @@
 	/* clean context */
 	list_del_init(&ctx->list);
 	ctx->ctx_in_use = 0;
-	CDBG("%s%d Exit...ctx id:%d\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "Exit...ctx id:%d",
 		ctx->ctx_index);
 	cam_ife_hw_mgr_put_ctx(&hw_mgr->free_ctx_list, &ctx);
 	return rc;
@@ -1898,34 +1895,34 @@
 		(struct cam_hw_prepare_update_args *) prepare_hw_update_args;
 	struct cam_ife_hw_mgr_ctx        *ctx;
 	struct cam_ife_hw_mgr            *hw_mgr;
-	struct cam_isp_kmd_buf_info       kmd_buf;
+	struct cam_kmd_buf_info           kmd_buf;
 	uint32_t                          i;
 	bool                              fill_fence = true;
 
 	if (!hw_mgr_priv || !prepare_hw_update_args) {
-		pr_err("%s: Invalid args\n", __func__);
+		CAM_ERR(CAM_ISP, "Invalid args");
 		return -EINVAL;
 	}
 
-	CDBG("%s:%d enter\n", __func__, __LINE__);
+	CAM_DBG(CAM_ISP, "enter");
 
 	ctx = (struct cam_ife_hw_mgr_ctx *) prepare->ctxt_to_hw_map;
 	hw_mgr = (struct cam_ife_hw_mgr *)hw_mgr_priv;
 
-	rc = cam_isp_validate_packet(prepare->packet);
+	rc = cam_packet_util_validate_packet(prepare->packet);
 	if (rc)
 		return rc;
 
-	CDBG("%s:%d enter\n", __func__, __LINE__);
+	CAM_DBG(CAM_ISP, "enter");
 	/* Pre parse the packet*/
-	rc = cam_isp_get_kmd_buffer(prepare->packet, &kmd_buf);
+	rc = cam_packet_util_get_kmd_buffer(prepare->packet, &kmd_buf);
 	if (rc)
 		return rc;
 
 	rc = cam_packet_util_process_patches(prepare->packet,
 		hw_mgr->mgr_common.cmd_iommu_hdl);
 	if (rc) {
-		pr_err("%s: Patch ISP packet failed.\n", __func__);
+		CAM_ERR(CAM_ISP, "Patch ISP packet failed.");
 		return rc;
 	}
 
@@ -1934,7 +1931,7 @@
 	prepare->num_out_map_entries = 0;
 
 	for (i = 0; i < ctx->num_base; i++) {
-		CDBG("%s: process cmd buffer for device %d\n", __func__, i);
+		CAM_DBG(CAM_ISP, "process cmd buffer for device %d", i);
 
 		/* Add change base */
 		rc = cam_isp_add_change_base(prepare, &ctx->res_list_ife_src,
@@ -1982,6 +1979,41 @@
 	return rc;
 }
 
+static int cam_ife_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
+{
+	int rc = 0;
+	struct cam_isp_hw_cmd_args  *hw_cmd_args  = cmd_args;
+	struct cam_ife_hw_mgr_ctx   *ctx;
+
+	if (!hw_mgr_priv || !cmd_args) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_ife_hw_mgr_ctx *)hw_cmd_args->ctxt_to_hw_map;
+	if (!ctx || !ctx->ctx_in_use) {
+		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
+		return -EPERM;
+	}
+
+	switch (hw_cmd_args->cmd_type) {
+	case CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT:
+		if (ctx->is_rdi_only_context)
+			hw_cmd_args->u.is_rdi_only_context = 1;
+		else
+			hw_cmd_args->u.is_rdi_only_context = 0;
+
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x",
+			hw_cmd_args->cmd_type);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
 static int cam_ife_mgr_cmd_get_sof_timestamp(
 	struct cam_ife_hw_mgr_ctx      *ife_ctx,
 	uint64_t                       *time_stamp)
@@ -2028,7 +2060,7 @@
 	}
 end:
 	if (rc)
-		pr_err("%s:error in getting sof time stamp\n", __func__);
+		CAM_ERR(CAM_ISP, "error in getting sof time stamp");
 
 	return rc;
 }
@@ -2046,13 +2078,14 @@
 	struct cam_ife_hw_mgr_ctx        *ctx = NULL;
 
 	/* Here recovery is performed */
-	CDBG("%s:Enter: ErrorType = %d\n", __func__, error_type);
+	CAM_DBG(CAM_ISP, "Enter: ErrorType = %d", error_type);
 
 	switch (error_type) {
 	case CAM_ISP_HW_ERROR_OVERFLOW:
 	case CAM_ISP_HW_ERROR_BUSIF_OVERFLOW:
 		if (!recovery_data->affected_ctx[0]) {
-			pr_err("No context is affected but recovery called\n");
+			CAM_ERR(CAM_ISP,
+				"No context is affected but recovery called");
 			kfree(recovery_data);
 			return 0;
 		}
@@ -2080,9 +2113,9 @@
 		break;
 
 	default:
-		pr_err("%s: Invalid Error\n", __func__);
+		CAM_ERR(CAM_ISP, "Invalid Error");
 	}
-	CDBG("%s:Exit: ErrorType = %d\n", __func__, error_type);
+	CAM_DBG(CAM_ISP, "Exit: ErrorType = %d", error_type);
 
 	kfree(recovery_data);
 	return rc;
@@ -2105,12 +2138,11 @@
 	memcpy(recovery_data, ife_mgr_recovery_data,
 			sizeof(struct cam_hw_event_recovery_data));
 
-	CDBG("%s: Enter: error_type (%d)\n", __func__,
-		recovery_data->error_type);
+	CAM_DBG(CAM_ISP, "Enter: error_type (%d)", recovery_data->error_type);
 
 	task = cam_req_mgr_workq_get_task(g_ife_hw_mgr.workq);
 	if (!task) {
-		pr_err("%s: No empty task frame\n", __func__);
+		CAM_ERR(CAM_ISP, "No empty task frame");
 		kfree(recovery_data);
 		return -ENOMEM;
 	}
@@ -2139,7 +2171,7 @@
 	uint32_t max_idx =  ife_hwr_mgr_ctx->num_base;
 	uint32_t ctx_affected_core_idx[CAM_IFE_HW_NUM_MAX] = {0};
 
-	CDBG("%s:Enter:max_idx = %d\n", __func__, max_idx);
+	CAM_DBG(CAM_ISP, "Enter:max_idx = %d", max_idx);
 
 	while (i < max_idx) {
 		if (affected_core[ife_hwr_mgr_ctx->base[i].idx])
@@ -2160,7 +2192,7 @@
 			j = j - 1;
 		}
 	}
-	CDBG("%s:Exit\n", __func__);
+	CAM_DBG(CAM_ISP, "Exit");
 	return rc;
 }
 
@@ -2186,11 +2218,11 @@
 	struct cam_hw_stop_args          stop_args;
 	uint32_t i = 0;
 
-	CDBG("%s:Enter\n", __func__);
+	CAM_DBG(CAM_ISP, "Enter");
 	return 0;
 
 	if (!recovery_data) {
-		pr_err("%s: recovery_data parameter is NULL\n",
+		CAM_ERR(CAM_ISP, "recovery_data parameter is NULL",
 			__func__);
 		return -EINVAL;
 	}
@@ -2207,7 +2239,7 @@
 		 * Check if current core_idx matches the HW associated
 		 * with this context
 		 */
-		CDBG("%s:Calling match Hw idx\n", __func__);
+		CAM_DBG(CAM_ISP, "Calling match Hw idx");
 		if (cam_ife_hw_mgr_match_hw_idx(ife_hwr_mgr_ctx, affected_core))
 			continue;
 
@@ -2217,7 +2249,7 @@
 		stop_args.ctxt_to_hw_map = ife_hwr_mgr_ctx;
 
 		/* Add affected_context in list of recovery data*/
-		CDBG("%s:Add new entry in affected_ctx_list\n", __func__);
+		CAM_DBG(CAM_ISP, "Add new entry in affected_ctx_list");
 		if (recovery_data->no_of_context < CAM_CTX_MAX)
 			recovery_data->affected_ctx[
 				recovery_data->no_of_context++] =
@@ -2230,7 +2262,7 @@
 		 */
 		if (!cam_ife_mgr_stop_hw_in_overflow(&hw_mgr_priv,
 			&stop_args)) {
-			CDBG("%s:Calling Error handler CB\n", __func__);
+			CAM_DBG(CAM_ISP, "Calling Error handler CB");
 			ife_hwr_irq_err_cb(ife_hwr_mgr_ctx->common.cb_priv,
 				CAM_ISP_HW_EVENT_ERROR, error_event_data);
 		}
@@ -2238,10 +2270,10 @@
 	/* fill the affected_core in recovery data */
 	for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
 		recovery_data->affected_core[i] = affected_core[i];
-		CDBG("%s: Vfe core %d is affected (%d)\n",
-			__func__, i, recovery_data->affected_core[i]);
+		CAM_DBG(CAM_ISP, "Vfe core %d is affected (%d)",
+			 i, recovery_data->affected_core[i]);
 	}
-	CDBG("%s:Exit\n", __func__);
+	CAM_DBG(CAM_ISP, "Exit");
 	return 0;
 }
 
@@ -2261,7 +2293,7 @@
 	core_idx = evt_payload->core_index;
 
 	rc = evt_payload->error_type;
-	CDBG("%s: Enter: error_type (%d)\n", __func__, evt_payload->error_type);
+	CAM_DBG(CAM_ISP, "Enter: error_type (%d)", evt_payload->error_type);
 	switch (evt_payload->error_type) {
 	case CAM_ISP_HW_ERROR_OVERFLOW:
 	case CAM_ISP_HW_ERROR_P2I_ERROR:
@@ -2280,11 +2312,11 @@
 		cam_ife_hw_mgr_do_error_recovery(&recovery_data);
 		break;
 	default:
-		CDBG("%s: None error. Error type (%d)\n", __func__,
+		CAM_DBG(CAM_ISP, "None error. Error type (%d)",
 			evt_payload->error_type);
 	}
 
-	CDBG("%s: Exit (%d)\n", __func__, rc);
+	CAM_DBG(CAM_ISP, "Exit (%d)", rc);
 	return rc;
 }
 
@@ -2294,26 +2326,26 @@
  * of dual VFE.
  * RDI path does not support DUAl VFE
  */
-static int cam_ife_hw_mgr_handle_rup_for_camif_hw_res(
+static int cam_ife_hw_mgr_handle_reg_update(
 	void                              *handler_priv,
 	void                              *payload)
 {
 	struct cam_isp_resource_node            *hw_res;
 	struct cam_ife_hw_mgr_ctx               *ife_hwr_mgr_ctx;
 	struct cam_vfe_top_irq_evt_payload      *evt_payload;
-	struct cam_ife_hw_mgr_res               *isp_ife_camif_res = NULL;
+	struct cam_ife_hw_mgr_res               *ife_src_res = NULL;
 	cam_hw_event_cb_func                     ife_hwr_irq_rup_cb;
 	struct cam_isp_hw_reg_update_event_data  rup_event_data;
 	uint32_t  core_idx;
 	uint32_t  rup_status = -EINVAL;
 
-	CDBG("%s: Enter\n", __func__);
+	CAM_DBG(CAM_ISP, "Enter");
 
 	ife_hwr_mgr_ctx = handler_priv;
 	evt_payload = payload;
 
 	if (!handler_priv || !payload) {
-		pr_err("%s: Invalid Parameter\n", __func__);
+		CAM_ERR(CAM_ISP, "Invalid Parameter");
 		return -EPERM;
 	}
 
@@ -2322,65 +2354,82 @@
 		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_REG_UPDATE];
 
 	evt_payload->evt_id = CAM_ISP_HW_EVENT_REG_UPDATE;
-	list_for_each_entry(isp_ife_camif_res,
+	list_for_each_entry(ife_src_res,
 			&ife_hwr_mgr_ctx->res_list_ife_src, list) {
 
-		if (isp_ife_camif_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+		if (ife_src_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
 			continue;
 
-		CDBG("%s: camif resource id = %d, curr_core_idx = %d\n",
-			__func__, isp_ife_camif_res->res_id, core_idx);
-		switch (isp_ife_camif_res->res_id) {
+		CAM_DBG(CAM_ISP, "resource id = %d, curr_core_idx = %d",
+			 ife_src_res->res_id, core_idx);
+		switch (ife_src_res->res_id) {
 		case CAM_ISP_HW_VFE_IN_CAMIF:
-			if (isp_ife_camif_res->is_dual_vfe)
+			if (ife_src_res->is_dual_vfe)
 				/* It checks for slave core RUP ACK*/
-				hw_res = isp_ife_camif_res->hw_res[1];
+				hw_res = ife_src_res->hw_res[1];
 			else
-				hw_res = isp_ife_camif_res->hw_res[0];
+				hw_res = ife_src_res->hw_res[0];
 
 			if (!hw_res) {
-				pr_err("%s: CAMIF device is NULL\n", __func__);
+				CAM_ERR(CAM_ISP, "CAMIF device is NULL");
 				break;
 			}
-			CDBG("%s: current_core_id = %d , core_idx res = %d\n",
-					__func__, core_idx,
-					hw_res->hw_intf->hw_idx);
+			CAM_DBG(CAM_ISP,
+				"current_core_id = %d , core_idx res = %d",
+				 core_idx, hw_res->hw_intf->hw_idx);
 
 			if (core_idx == hw_res->hw_intf->hw_idx) {
 				rup_status = hw_res->bottom_half_handler(
 					hw_res, evt_payload);
 			}
+
+			if (!rup_status) {
+				ife_hwr_irq_rup_cb(
+					ife_hwr_mgr_ctx->common.cb_priv,
+					CAM_ISP_HW_EVENT_REG_UPDATE,
+					&rup_event_data);
+			}
 			break;
 
 		case CAM_ISP_HW_VFE_IN_RDI0:
 		case CAM_ISP_HW_VFE_IN_RDI1:
 		case CAM_ISP_HW_VFE_IN_RDI2:
-			hw_res = isp_ife_camif_res->hw_res[0];
+		case CAM_ISP_HW_VFE_IN_RDI3:
+			if (!ife_hwr_mgr_ctx->is_rdi_only_context)
+				continue;
+
+			/*
+			 * This is RDI only context, send Reg update and epoch
+			 * HW event to cam context
+			 */
+			hw_res = ife_src_res->hw_res[0];
 
 			if (!hw_res) {
-				pr_err("%s: RDI Device is NULL\n", __func__);
+				CAM_ERR(CAM_ISP, "RDI Device is NULL");
 				break;
 			}
+
 			if (core_idx == hw_res->hw_intf->hw_idx)
-				/* Need to process rdi reg update */
-				rup_status = -EINVAL;
+				rup_status = hw_res->bottom_half_handler(
+					hw_res, evt_payload);
+
+			if (!rup_status) {
+				/* Send the Reg update hw event */
+				ife_hwr_irq_rup_cb(
+					ife_hwr_mgr_ctx->common.cb_priv,
+					CAM_ISP_HW_EVENT_REG_UPDATE,
+					&rup_event_data);
+			}
 			break;
 		default:
-			pr_err("%s: invalid resource id (%d)", __func__,
-				isp_ife_camif_res->res_id);
-		}
-
-		/* only do callback for pixel reg update for now */
-		if (!rup_status && (isp_ife_camif_res->res_id ==
-			CAM_ISP_HW_VFE_IN_CAMIF)) {
-			ife_hwr_irq_rup_cb(ife_hwr_mgr_ctx->common.cb_priv,
-				CAM_ISP_HW_EVENT_REG_UPDATE, &rup_event_data);
+			CAM_ERR(CAM_ISP, "Invalid resource id (%d)",
+				ife_src_res->res_id);
 		}
 
 	}
 
 	if (!rup_status)
-		CDBG("%s: Exit rup_status = %d\n", __func__, rup_status);
+		CAM_DBG(CAM_ISP, "Exit rup_status = %d", rup_status);
 
 	return 0;
 }
@@ -2406,13 +2455,13 @@
 	if ((epoch_cnt[core_idx0] - epoch_cnt[core_idx1] > 1) ||
 		(epoch_cnt[core_idx1] - epoch_cnt[core_idx0] > 1)) {
 
-		pr_warn("%s:One of the VFE of dual VFE cound not generate error\n",
-			__func__);
+		CAM_WARN(CAM_ISP,
+			"One of the VFE of dual VFE cound not generate error");
 		rc = -1;
 		return rc;
 	}
 
-	CDBG("Only one core_index has given EPOCH\n");
+	CAM_DBG(CAM_ISP, "Only one core_index has given EPOCH");
 
 	return rc;
 }
@@ -2434,7 +2483,7 @@
 	uint32_t  core_index0;
 	uint32_t  core_index1;
 
-	CDBG("%s:Enter\n", __func__);
+	CAM_DBG(CAM_ISP, "Enter");
 
 	ife_hwr_mgr_ctx = handler_priv;
 	evt_payload = payload;
@@ -2459,8 +2508,7 @@
 		case 0:
 			/* EPOCH check for Left side VFE */
 			if (!hw_res_l) {
-				pr_err("%s: Left Device is NULL\n",
-					__func__);
+				CAM_ERR(CAM_ISP, "Left Device is NULL");
 				break;
 			}
 
@@ -2481,8 +2529,7 @@
 			/* SOF check for Left side VFE (Master)*/
 
 			if ((!hw_res_l) || (!hw_res_r)) {
-				pr_err("%s: Dual VFE Device is NULL\n",
-					__func__);
+				CAM_ERR(CAM_ISP, "Dual VFE Device is NULL");
 				break;
 			}
 			if (core_idx == hw_res_l->hw_intf->hw_idx) {
@@ -2520,13 +2567,13 @@
 
 		/* Error */
 		default:
-			pr_err("%s: error with hw_res\n", __func__);
+			CAM_ERR(CAM_ISP, "error with hw_res");
 
 		}
 	}
 
 	if (!epoch_status)
-		CDBG("%s: Exit epoch_status = %d\n", __func__, epoch_status);
+		CAM_DBG(CAM_ISP, "Exit epoch_status = %d", epoch_status);
 
 	return 0;
 }
@@ -2552,134 +2599,56 @@
 	if ((sof_cnt[core_idx0] - sof_cnt[core_idx1] > 1) ||
 		(sof_cnt[core_idx1] - sof_cnt[core_idx0] > 1)) {
 
-		pr_err("%s: One VFE of dual VFE cound not generate SOF\n",
-					__func__);
+		CAM_ERR(CAM_ISP, "One VFE of dual VFE cound not generate SOF");
 		rc = -1;
 		return rc;
 	}
 
-	pr_info("Only one core_index has given SOF\n");
+	CAM_INFO(CAM_ISP, "Only one core_index has given SOF");
 
 	return rc;
 }
 
-static int cam_ife_hw_mgr_handle_sof_for_camif_hw_res(
-	void                              *handler_priv,
-	void                              *payload)
+static int cam_ife_hw_mgr_process_camif_sof(
+	struct cam_ife_hw_mgr_res            *isp_ife_camif_res,
+	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx,
+	struct cam_vfe_top_irq_evt_payload   *evt_payload)
 {
-	int32_t rc = -1;
+	struct cam_isp_hw_sof_event_data      sof_done_event_data;
+	cam_hw_event_cb_func                  ife_hwr_irq_sof_cb;
 	struct cam_isp_resource_node         *hw_res_l = NULL;
 	struct cam_isp_resource_node         *hw_res_r = NULL;
-	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx;
-	struct cam_vfe_top_irq_evt_payload   *evt_payload;
-	struct cam_ife_hw_mgr_res            *isp_ife_camif_res = NULL;
-	cam_hw_event_cb_func                  ife_hwr_irq_sof_cb;
-	struct cam_isp_hw_sof_event_data      sof_done_event_data;
+	int32_t rc = -EINVAL;
 	uint32_t  core_idx;
 	uint32_t  sof_status = 0;
 	uint32_t  core_index0;
 	uint32_t  core_index1;
 
-	CDBG("%s:Enter\n", __func__);
-
-	ife_hwr_mgr_ctx = handler_priv;
-	evt_payload = payload;
-	if (!evt_payload) {
-		pr_err("%s: no payload\n", __func__);
-		return IRQ_HANDLED;
-	}
+	CAM_DBG(CAM_ISP, "Enter");
 	core_idx = evt_payload->core_index;
+	hw_res_l = isp_ife_camif_res->hw_res[0];
+	hw_res_r = isp_ife_camif_res->hw_res[1];
+	CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d",
+		isp_ife_camif_res->is_dual_vfe);
+
 	ife_hwr_irq_sof_cb =
 		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_SOF];
 
-	evt_payload->evt_id = CAM_ISP_HW_EVENT_SOF;
-
-	list_for_each_entry(isp_ife_camif_res,
-		&ife_hwr_mgr_ctx->res_list_ife_src, list) {
-
-		if ((isp_ife_camif_res->res_type ==
-			CAM_IFE_HW_MGR_RES_UNINIT) ||
-			(isp_ife_camif_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF))
-			continue;
-
-		hw_res_l = isp_ife_camif_res->hw_res[0];
-		hw_res_r = isp_ife_camif_res->hw_res[1];
-
-		CDBG("%s:is_dual_vfe ? = %d\n", __func__,
-			isp_ife_camif_res->is_dual_vfe);
-		switch (isp_ife_camif_res->is_dual_vfe) {
-		/* Handling Single VFE Scenario */
-		case 0:
-			/* SOF check for Left side VFE */
-			if (!hw_res_l) {
-				pr_err("%s: VFE Device is NULL\n",
-					__func__);
-				break;
-			}
-			CDBG("%s: curr_core_idx = %d, core idx hw = %d\n",
-					__func__, core_idx,
-					hw_res_l->hw_intf->hw_idx);
-
-			if (core_idx == hw_res_l->hw_intf->hw_idx) {
-				sof_status = hw_res_l->bottom_half_handler(
-					hw_res_l, evt_payload);
-				if (!sof_status) {
-					cam_ife_mgr_cmd_get_sof_timestamp(
-						ife_hwr_mgr_ctx,
-						&sof_done_event_data.timestamp);
-
-					ife_hwr_irq_sof_cb(
-						ife_hwr_mgr_ctx->common.cb_priv,
-						CAM_ISP_HW_EVENT_SOF,
-						&sof_done_event_data);
-				}
-			}
-
+	switch (isp_ife_camif_res->is_dual_vfe) {
+	/* Handling Single VFE Scenario */
+	case 0:
+		/* SOF check for Left side VFE */
+		if (!hw_res_l) {
+			CAM_ERR(CAM_ISP, "VFE Device is NULL");
 			break;
+		}
+		CAM_DBG(CAM_ISP, "curr_core_idx = %d,core idx hw = %d",
+			core_idx, hw_res_l->hw_intf->hw_idx);
 
-		/* Handling Dual VFE Scenario */
-		case 1:
-			/* SOF check for Left side VFE */
-
-			if (!hw_res_l) {
-				pr_err("%s: VFE Device is NULL\n",
-					__func__);
-				break;
-			}
-			CDBG("%s: curr_core_idx = %d, idx associated hw = %d\n",
-					__func__, core_idx,
-					hw_res_l->hw_intf->hw_idx);
-
-			if (core_idx == hw_res_l->hw_intf->hw_idx) {
-				sof_status = hw_res_l->bottom_half_handler(
-					hw_res_l, evt_payload);
-				if (!sof_status)
-					ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
-			}
-
-			/* SOF check for Right side VFE */
-			if (!hw_res_r) {
-				pr_err("%s: VFE Device is NULL\n",
-					__func__);
-				break;
-			}
-			CDBG("%s: curr_core_idx = %d, idx associated hw = %d\n",
-					__func__, core_idx,
-					hw_res_r->hw_intf->hw_idx);
-			if (core_idx == hw_res_r->hw_intf->hw_idx) {
-				sof_status = hw_res_r->bottom_half_handler(
-					hw_res_r, evt_payload);
-				if (!sof_status)
-					ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
-			}
-
-			core_index0 = hw_res_l->hw_intf->hw_idx;
-			core_index1 = hw_res_r->hw_intf->hw_idx;
-
-			rc = cam_ife_hw_mgr_check_sof_for_dual_vfe(
-				ife_hwr_mgr_ctx, core_index0, core_index1);
-
-			if (!rc) {
+		if (core_idx == hw_res_l->hw_intf->hw_idx) {
+			sof_status = hw_res_l->bottom_half_handler(hw_res_l,
+				evt_payload);
+			if (!sof_status) {
 				cam_ife_mgr_cmd_get_sof_timestamp(
 					ife_hwr_mgr_ctx,
 					&sof_done_event_data.timestamp);
@@ -2689,14 +2658,138 @@
 					CAM_ISP_HW_EVENT_SOF,
 					&sof_done_event_data);
 			}
+		}
+
+		break;
+
+	/* Handling Dual VFE Scenario */
+	case 1:
+		/* SOF check for Left side VFE */
+
+		if (!hw_res_l) {
+			CAM_ERR(CAM_ISP, "VFE Device is NULL");
+			break;
+		}
+		CAM_DBG(CAM_ISP, "curr_core_idx = %d, res hw idx= %d",
+				 core_idx,
+				hw_res_l->hw_intf->hw_idx);
+
+		if (core_idx == hw_res_l->hw_intf->hw_idx) {
+			sof_status = hw_res_l->bottom_half_handler(
+				hw_res_l, evt_payload);
+			if (!sof_status)
+				ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
+		}
+
+		/* SOF check for Right side VFE */
+		if (!hw_res_r) {
+			CAM_ERR(CAM_ISP, "VFE Device is NULL");
+			break;
+		}
+		CAM_DBG(CAM_ISP, "curr_core_idx = %d, ews hw idx= %d",
+				 core_idx,
+				hw_res_r->hw_intf->hw_idx);
+		if (core_idx == hw_res_r->hw_intf->hw_idx) {
+			sof_status = hw_res_r->bottom_half_handler(hw_res_r,
+				evt_payload);
+			if (!sof_status)
+				ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
+		}
+
+		core_index0 = hw_res_l->hw_intf->hw_idx;
+		core_index1 = hw_res_r->hw_intf->hw_idx;
+
+		rc = cam_ife_hw_mgr_check_sof_for_dual_vfe(ife_hwr_mgr_ctx,
+			core_index0, core_index1);
+
+		if (!rc)
+			ife_hwr_irq_sof_cb(ife_hwr_mgr_ctx->common.cb_priv,
+				CAM_ISP_HW_EVENT_SOF, &sof_done_event_data);
+
+		break;
+
+	default:
+		CAM_ERR(CAM_ISP, "error with hw_res");
+		break;
+	}
+
+	CAM_DBG(CAM_ISP, "Exit (sof_status = %d)!", sof_status);
+
+	return 0;
+}
+
+static int cam_ife_hw_mgr_handle_sof(
+	void                              *handler_priv,
+	void                              *payload)
+{
+	int32_t rc = -EINVAL;
+	struct cam_isp_resource_node         *hw_res = NULL;
+	struct cam_ife_hw_mgr_ctx            *ife_hw_mgr_ctx;
+	struct cam_vfe_top_irq_evt_payload   *evt_payload;
+	struct cam_ife_hw_mgr_res            *ife_src_res = NULL;
+	cam_hw_event_cb_func                  ife_hw_irq_sof_cb;
+	struct cam_isp_hw_sof_event_data      sof_done_event_data;
+	uint32_t  sof_status = 0;
+
+	CAM_DBG(CAM_ISP, "Enter");
+
+	ife_hw_mgr_ctx = handler_priv;
+	evt_payload = payload;
+	if (!evt_payload) {
+		CAM_ERR(CAM_ISP, "no payload");
+		return IRQ_HANDLED;
+	}
+	ife_hw_irq_sof_cb =
+		ife_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_SOF];
+
+	evt_payload->evt_id = CAM_ISP_HW_EVENT_SOF;
+
+	list_for_each_entry(ife_src_res,
+		&ife_hw_mgr_ctx->res_list_ife_src, list) {
+
+		if (ife_src_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+			continue;
+
+		switch (ife_src_res->res_id) {
+		case CAM_ISP_HW_VFE_IN_RDI0:
+		case CAM_ISP_HW_VFE_IN_RDI1:
+		case CAM_ISP_HW_VFE_IN_RDI2:
+		case CAM_ISP_HW_VFE_IN_RDI3:
+			/* check if it is rdi only context */
+			if (ife_hw_mgr_ctx->is_rdi_only_context) {
+				hw_res = ife_src_res->hw_res[0];
+				sof_status = hw_res->bottom_half_handler(
+					hw_res, evt_payload);
+
+				if (!sof_status) {
+					cam_ife_mgr_cmd_get_sof_timestamp(
+						ife_hw_mgr_ctx,
+						&sof_done_event_data.timestamp);
+
+					ife_hw_irq_sof_cb(
+						ife_hw_mgr_ctx->common.cb_priv,
+						CAM_ISP_HW_EVENT_SOF,
+						&sof_done_event_data);
+				}
+
+				CAM_DBG(CAM_ISP, "sof_status = %d", sof_status);
+
+				/* this is RDI only context so exit from here */
+				return 0;
+			}
 			break;
 
+		case CAM_ISP_HW_VFE_IN_CAMIF:
+			rc = cam_ife_hw_mgr_process_camif_sof(ife_src_res,
+				ife_hw_mgr_ctx, evt_payload);
+			break;
 		default:
-			pr_err("%s: error with hw_res\n", __func__);
+			CAM_ERR(CAM_ISP, "Invalid resource id :%d",
+				ife_src_res->res_id);
+			break;
 		}
 	}
 
-	CDBG("%s: Exit (sof_status = %d)!\n", __func__, sof_status);
 	return 0;
 }
 
@@ -2719,7 +2812,7 @@
 	uint32_t  error_resc_handle[CAM_IFE_HW_OUT_RES_MAX];
 	uint32_t  num_of_error_handles = 0;
 
-	CDBG("%s:Enter\n", __func__);
+	CAM_DBG(CAM_ISP, "Enter");
 
 	ife_hwr_mgr_ctx = evt_payload->ctx;
 	ife_hwr_irq_wm_done_cb =
@@ -2782,7 +2875,7 @@
 			/* Report for Successful buf_done event if any */
 			if (buf_done_event_data.num_handles > 0 &&
 				ife_hwr_irq_wm_done_cb) {
-				CDBG("%s: notify isp context\n", __func__);
+				CAM_DBG(CAM_ISP, "notify isp context");
 				ife_hwr_irq_wm_done_cb(
 					ife_hwr_mgr_ctx->common.cb_priv,
 					CAM_ISP_HW_EVENT_DONE,
@@ -2802,8 +2895,9 @@
 			break;
 		}
 		if (!buf_done_status)
-			CDBG("buf_done status:(%d),out_res->res_id: 0x%x\n",
-			buf_done_status, isp_ife_out_res->res_id);
+			CAM_DBG(CAM_ISP,
+				"buf_done status:(%d),out_res->res_id: 0x%x",
+				buf_done_status, isp_ife_out_res->res_id);
 	}
 
 	return rc;
@@ -2824,8 +2918,8 @@
 	 * for the first phase, we are going to reset entire HW.
 	 */
 
-	CDBG("%s: Exit (buf_done_status (Error) = %d)!\n", __func__,
-			buf_done_status);
+	CAM_DBG(CAM_ISP, "Exit (buf_done_status (Error) = %d)!",
+		buf_done_status);
 	return rc;
 }
 
@@ -2842,16 +2936,19 @@
 	evt_payload = evt_payload_priv;
 	ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)evt_payload->ctx;
 
-	CDBG("addr of evt_payload = %llx\n", (uint64_t)evt_payload);
-	CDBG("bus_irq_status_0: = %x\n", evt_payload->irq_reg_val[0]);
-	CDBG("bus_irq_status_1: = %x\n", evt_payload->irq_reg_val[1]);
-	CDBG("bus_irq_status_2: = %x\n", evt_payload->irq_reg_val[2]);
-	CDBG("bus_irq_comp_err: = %x\n", evt_payload->irq_reg_val[3]);
-	CDBG("bus_irq_comp_owrt: = %x\n", evt_payload->irq_reg_val[4]);
-	CDBG("bus_irq_dual_comp_err: = %x\n", evt_payload->irq_reg_val[5]);
-	CDBG("bus_irq_dual_comp_owrt: = %x\n", evt_payload->irq_reg_val[6]);
+	CAM_DBG(CAM_ISP, "addr of evt_payload = %llx", (uint64_t)evt_payload);
+	CAM_DBG(CAM_ISP, "bus_irq_status_0: = %x", evt_payload->irq_reg_val[0]);
+	CAM_DBG(CAM_ISP, "bus_irq_status_1: = %x", evt_payload->irq_reg_val[1]);
+	CAM_DBG(CAM_ISP, "bus_irq_status_2: = %x", evt_payload->irq_reg_val[2]);
+	CAM_DBG(CAM_ISP, "bus_irq_comp_err: = %x", evt_payload->irq_reg_val[3]);
+	CAM_DBG(CAM_ISP, "bus_irq_comp_owrt: = %x",
+		evt_payload->irq_reg_val[4]);
+	CAM_DBG(CAM_ISP, "bus_irq_dual_comp_err: = %x",
+		evt_payload->irq_reg_val[5]);
+	CAM_DBG(CAM_ISP, "bus_irq_dual_comp_owrt: = %x",
+		evt_payload->irq_reg_val[6]);
 
-	CDBG("%s: Calling Buf_done\n", __func__);
+	CAM_DBG(CAM_ISP, "Calling Buf_done");
 	/* WM Done */
 	return cam_ife_hw_mgr_handle_buf_done_for_hw_res(ife_hwr_mgr_ctx,
 		evt_payload_priv);
@@ -2869,10 +2966,11 @@
 	evt_payload = evt_payload_priv;
 	ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)handler_priv;
 
-	CDBG("addr of evt_payload = %llx\n", (uint64_t)evt_payload);
-	CDBG("irq_status_0: = %x\n", evt_payload->irq_reg_val[0]);
-	CDBG("irq_status_1: = %x\n", evt_payload->irq_reg_val[1]);
-	CDBG("Violation register: = %x\n", evt_payload->irq_reg_val[2]);
+	CAM_DBG(CAM_ISP, "addr of evt_payload = %llx", (uint64_t)evt_payload);
+	CAM_DBG(CAM_ISP, "irq_status_0: = %x", evt_payload->irq_reg_val[0]);
+	CAM_DBG(CAM_ISP, "irq_status_1: = %x", evt_payload->irq_reg_val[1]);
+	CAM_DBG(CAM_ISP, "Violation register: = %x",
+		evt_payload->irq_reg_val[2]);
 
 	/*
 	 * If overflow/overwrite/error/violation are pending
@@ -2882,22 +2980,22 @@
 	rc = cam_ife_hw_mgr_handle_camif_error(ife_hwr_mgr_ctx,
 		evt_payload_priv);
 	if (rc) {
-		pr_err("%s: Encountered Error (%d), ignoring other irqs\n",
-			__func__, rc);
+		CAM_ERR(CAM_ISP, "Encountered Error (%d), ignoring other irqs",
+			 rc);
 		return IRQ_HANDLED;
 	}
 
-	CDBG("%s: Calling SOF\n", __func__);
+	CAM_DBG(CAM_ISP, "Calling SOF");
 	/* SOF IRQ */
-	cam_ife_hw_mgr_handle_sof_for_camif_hw_res(ife_hwr_mgr_ctx,
+	cam_ife_hw_mgr_handle_sof(ife_hwr_mgr_ctx,
 		evt_payload_priv);
 
-	CDBG("%s: Calling RUP\n", __func__);
+	CAM_DBG(CAM_ISP, "Calling RUP");
 	/* REG UPDATE */
-	cam_ife_hw_mgr_handle_rup_for_camif_hw_res(ife_hwr_mgr_ctx,
+	cam_ife_hw_mgr_handle_reg_update(ife_hwr_mgr_ctx,
 		evt_payload_priv);
 
-	CDBG("%s: Calling EPOCH\n", __func__);
+	CAM_DBG(CAM_ISP, "Calling EPOCH");
 	/* EPOCH IRQ */
 	cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(ife_hwr_mgr_ctx,
 		evt_payload_priv);
@@ -2943,15 +3041,14 @@
 	int i, j;
 	struct cam_iommu_handle cdm_handles;
 
-	CDBG("%s: Enter\n", __func__);
+	CAM_DBG(CAM_ISP, "Enter");
 
 	memset(&g_ife_hw_mgr, 0, sizeof(g_ife_hw_mgr));
 
 	mutex_init(&g_ife_hw_mgr.ctx_mutex);
 
 	if (CAM_IFE_HW_NUM_MAX != CAM_IFE_CSID_HW_NUM_MAX) {
-		pr_err("%s: Fatal, CSID num is different then IFE num!\n",
-			__func__);
+		CAM_ERR(CAM_ISP, "Fatal, CSID num is different then IFE num!");
 		goto end;
 	}
 
@@ -2967,15 +3064,16 @@
 			j++;
 
 			g_ife_hw_mgr.cdm_reg_map[i] = &soc_info->reg_map[0];
-			CDBG("reg_map: mem base = 0x%llx, cam_base = 0x%llx\n",
-				(uint64_t) soc_info->reg_map[0].mem_base,
+			CAM_DBG(CAM_ISP,
+				"reg_map: mem base = %pK cam_base = 0x%llx",
+				(void __iomem *)soc_info->reg_map[0].mem_base,
 				(uint64_t) soc_info->reg_map[0].mem_cam_base);
 		} else {
 			g_ife_hw_mgr.cdm_reg_map[i] = NULL;
 		}
 	}
 	if (j == 0) {
-		pr_err("%s: no valid IFE HW!\n", __func__);
+		CAM_ERR(CAM_ISP, "no valid IFE HW!");
 		goto end;
 	}
 
@@ -2986,7 +3084,7 @@
 			j++;
 	}
 	if (!j) {
-		pr_err("%s: no valid IFE CSID HW!\n", __func__);
+		CAM_ERR(CAM_ISP, "no valid IFE CSID HW!");
 		goto end;
 	}
 
@@ -3005,26 +3103,27 @@
 	 */
 	if (cam_smmu_get_handle("ife",
 		&g_ife_hw_mgr.mgr_common.img_iommu_hdl)) {
-		pr_err("%s: Can not get iommu handle.\n", __func__);
+		CAM_ERR(CAM_ISP, "Can not get iommu handle.");
 		goto end;
 	}
 
 	if (cam_smmu_ops(g_ife_hw_mgr.mgr_common.img_iommu_hdl,
 		CAM_SMMU_ATTACH)) {
-		pr_err("%s: Attach iommu handle failed.\n", __func__);
+		CAM_ERR(CAM_ISP, "Attach iommu handle failed.");
 		goto end;
 	}
 
-	CDBG("got iommu_handle=%d\n", g_ife_hw_mgr.mgr_common.img_iommu_hdl);
+	CAM_DBG(CAM_ISP, "got iommu_handle=%d",
+		g_ife_hw_mgr.mgr_common.img_iommu_hdl);
 	g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure = -1;
 
 	if (!cam_cdm_get_iommu_handle("ife", &cdm_handles)) {
-		CDBG("Successfully acquired the CDM iommu handles\n");
+		CAM_DBG(CAM_ISP, "Successfully acquired the CDM iommu handles");
 		g_ife_hw_mgr.mgr_common.cmd_iommu_hdl = cdm_handles.non_secure;
 		g_ife_hw_mgr.mgr_common.cmd_iommu_hdl_secure =
 			cdm_handles.secure;
 	} else {
-		CDBG("Failed to acquire the CDM iommu handles\n");
+		CAM_DBG(CAM_ISP, "Failed to acquire the CDM iommu handles");
 		g_ife_hw_mgr.mgr_common.cmd_iommu_hdl = -1;
 		g_ife_hw_mgr.mgr_common.cmd_iommu_hdl_secure = -1;
 	}
@@ -3059,7 +3158,7 @@
 				 sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
 		if (!g_ife_hw_mgr.ctx_pool[i].cdm_cmd) {
 			rc = -ENOMEM;
-			pr_err("Allocation Failed for cdm command\n");
+			CAM_ERR(CAM_ISP, "Allocation Failed for cdm command");
 			goto end;
 		}
 
@@ -3080,7 +3179,7 @@
 			&g_ife_hw_mgr.workq, CRM_WORKQ_USAGE_NON_IRQ);
 
 	if (rc < 0) {
-		pr_err("%s: Unable to create worker\n", __func__);
+		CAM_ERR(CAM_ISP, "Unable to create worker");
 		goto end;
 	}
 
@@ -3095,8 +3194,9 @@
 	hw_mgr_intf->hw_release = cam_ife_mgr_release_hw;
 	hw_mgr_intf->hw_prepare_update = cam_ife_mgr_prepare_hw_update;
 	hw_mgr_intf->hw_config = cam_ife_mgr_config_hw;
+	hw_mgr_intf->hw_cmd = cam_ife_mgr_cmd;
 
-	CDBG("%s: Exit\n", __func__);
+	CAM_DBG(CAM_ISP, "Exit");
 	return 0;
 end:
 	if (rc) {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
index 174d2ce..6dfdb21 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
@@ -108,6 +108,7 @@
  * @epoch_cnt               epoch count value per core, used for dual VFE
  * @overflow_pending        flat to specify the overflow is pending for the
  *                          context
+ * @is_rdi_only_context     flag to specify the context has only rdi resource
  */
 struct cam_ife_hw_mgr_ctx {
 	struct list_head                list;
@@ -138,6 +139,7 @@
 	uint32_t                        sof_cnt[CAM_IFE_HW_NUM_MAX];
 	uint32_t                        epoch_cnt[CAM_IFE_HW_NUM_MAX];
 	atomic_t                        overflow_pending;
+	uint32_t                        is_rdi_only_context;
 
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
index 2e23222..2f18895 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
@@ -12,6 +12,7 @@
 
 #include "cam_isp_hw_mgr_intf.h"
 #include "cam_ife_hw_mgr.h"
+#include "cam_debug_util.h"
 
 
 int cam_isp_hw_mgr_init(struct device_node *of_node,
@@ -26,7 +27,7 @@
 	if (strnstr(compat_str, "ife", strlen(compat_str)))
 		rc = cam_ife_hw_mgr_init(hw_mgr);
 	else {
-		pr_err("%s: Invalid ISP hw type\n", __func__);
+		CAM_ERR(CAM_ISP, "Invalid ISP hw type");
 		rc = -EINVAL;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
index 3c72279..0a0eecb 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
@@ -15,130 +15,13 @@
 #include "cam_mem_mgr.h"
 #include "cam_vfe_hw_intf.h"
 #include "cam_isp_packet_parser.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-
-static int cam_isp_get_cmd_mem_addr(int handle, uint32_t **buf_addr,
-	size_t *len)
-{
-	int rc = 0;
-	uint64_t kmd_buf_addr = 0;
-
-	rc = cam_mem_get_cpu_buf(handle, &kmd_buf_addr, len);
-	if (rc) {
-		pr_err("%s:%d Unable to get the virtual address rc:%d\n",
-			__func__, __LINE__, rc);
-		rc = -ENOMEM;
-	} else {
-		if (kmd_buf_addr && *len)
-			*buf_addr = (uint32_t *)kmd_buf_addr;
-		else {
-			pr_err("%s:%d Invalid addr and length :%ld\n",
-				__func__, __LINE__, *len);
-			rc = -ENOMEM;
-		}
-	}
-	return rc;
-}
-
-static int cam_isp_validate_cmd_desc(
-	struct cam_cmd_buf_desc *cmd_desc)
-{
-	if (cmd_desc->length > cmd_desc->size ||
-		(cmd_desc->mem_handle <= 0)) {
-		pr_err("%s:%d invalid cmd arg %d %d %d %d\n",
-			__func__, __LINE__, cmd_desc->offset,
-			cmd_desc->length, cmd_desc->mem_handle,
-			cmd_desc->size);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-int cam_isp_validate_packet(struct cam_packet *packet)
-{
-	if (!packet)
-		return -EINVAL;
-
-	CDBG("%s:%d num cmd buf:%d num of io config:%d kmd buf index:%d\n",
-		__func__, __LINE__, packet->num_cmd_buf,
-		packet->num_io_configs, packet->kmd_cmd_buf_index);
-
-	if (packet->kmd_cmd_buf_index >= packet->num_cmd_buf ||
-		(!packet->header.size) ||
-		packet->cmd_buf_offset > packet->header.size ||
-		packet->io_configs_offset > packet->header.size)  {
-		pr_err("%s:%d invalid packet:%d %d %d %d %d\n",
-			__func__, __LINE__, packet->kmd_cmd_buf_index,
-			packet->num_cmd_buf, packet->cmd_buf_offset,
-			packet->io_configs_offset, packet->header.size);
-		return -EINVAL;
-	}
-
-	CDBG("%s:%d exit\n", __func__, __LINE__);
-	return 0;
-}
-
-int cam_isp_get_kmd_buffer(struct cam_packet *packet,
-	struct cam_isp_kmd_buf_info *kmd_buf)
-{
-	int                      rc = 0;
-	size_t                   len = 0;
-	struct cam_cmd_buf_desc *cmd_desc;
-	uint32_t                *cpu_addr;
-
-	if (!packet || !kmd_buf) {
-		pr_err("%s:%d Invalid arg\n", __func__, __LINE__);
-		rc = -EINVAL;
-		return rc;
-	}
-
-	/* Take first command descriptor and add offset to it for kmd*/
-	cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)
-			&packet->payload + packet->cmd_buf_offset);
-	cmd_desc += packet->kmd_cmd_buf_index;
-
-	CDBG("%s:%d enter\n", __func__, __LINE__);
-	rc = cam_isp_validate_cmd_desc(cmd_desc);
-	if (rc)
-		return rc;
-
-	CDBG("%s:%d enter\n", __func__, __LINE__);
-	rc = cam_isp_get_cmd_mem_addr(cmd_desc->mem_handle, &cpu_addr,
-		&len);
-	if (rc)
-		return rc;
-
-	if (len < cmd_desc->size) {
-		pr_err("%s:%d invalid memory len:%ld and cmd desc size:%d\n",
-			__func__, __LINE__, len, cmd_desc->size);
-		return -EINVAL;
-	}
-
-	cpu_addr += cmd_desc->offset/4 + packet->kmd_cmd_buf_offset/4;
-	CDBG("%s:%d total size %d, cmd size: %d, KMD buffer size: %d\n",
-		__func__, __LINE__, cmd_desc->size, cmd_desc->length,
-		cmd_desc->size - cmd_desc->length);
-	CDBG("%s:%d: handle 0x%x, cmd offset %d, kmd offset %d, addr 0x%pK\n",
-		__func__, __LINE__, cmd_desc->mem_handle, cmd_desc->offset,
-		packet->kmd_cmd_buf_offset, cpu_addr);
-
-	kmd_buf->cpu_addr   = cpu_addr;
-	kmd_buf->handle     = cmd_desc->mem_handle;
-	kmd_buf->offset     = cmd_desc->offset + packet->kmd_cmd_buf_offset;
-	kmd_buf->size       = cmd_desc->size - cmd_desc->length;
-	kmd_buf->used_bytes = 0;
-
-	return rc;
-}
+#include "cam_debug_util.h"
 
 int cam_isp_add_change_base(
 	struct cam_hw_prepare_update_args      *prepare,
 	struct list_head                       *res_list_isp_src,
 	uint32_t                                base_idx,
-	struct cam_isp_kmd_buf_info            *kmd_buf_info)
+	struct cam_kmd_buf_info                *kmd_buf_info)
 {
 	int rc = -EINVAL;
 	struct cam_ife_hw_mgr_res       *hw_mgr_res;
@@ -152,9 +35,8 @@
 
 	/* Max one hw entries required for each base */
 	if (num_ent + 1 >= prepare->max_hw_update_entries) {
-		pr_err("%s:%d Insufficient  HW entries :%d %d\n",
-			__func__, __LINE__, num_ent,
-			prepare->max_hw_update_entries);
+		CAM_ERR(CAM_ISP, "Insufficient  HW entries :%d %d",
+			num_ent, prepare->max_hw_update_entries);
 		return -EINVAL;
 	}
 
@@ -220,8 +102,8 @@
 			((uint8_t *)&prepare->packet->payload +
 			prepare->packet->cmd_buf_offset);
 
-	CDBG("%s:%d split id = %d, number of command buffers:%d\n", __func__,
-		__LINE__, split_id, prepare->packet->num_cmd_buf);
+	CAM_DBG(CAM_ISP, "split id = %d, number of command buffers:%d",
+		split_id, prepare->packet->num_cmd_buf);
 
 	for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
 		if (!cmd_desc[i].length)
@@ -229,19 +111,18 @@
 
 		/* One hw entry space required for left or right or common */
 		if (num_ent + 1 >= prepare->max_hw_update_entries) {
-			pr_err("%s:%d Insufficient  HW entries :%d %d\n",
-				__func__, __LINE__, num_ent,
-				prepare->max_hw_update_entries);
+			CAM_ERR(CAM_ISP, "Insufficient  HW entries :%d %d",
+				num_ent, prepare->max_hw_update_entries);
 			return -EINVAL;
 		}
 
-		rc = cam_isp_validate_cmd_desc(&cmd_desc[i]);
+		rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
 		if (rc)
 			return rc;
 
 		cmd_meta_data = cmd_desc[i].meta_data;
 
-		CDBG("%s:%d meta type: %d, split_id: %d\n", __func__, __LINE__,
+		CAM_DBG(CAM_ISP, "meta type: %d, split_id: %d",
 			cmd_meta_data, split_id);
 
 		switch (cmd_meta_data) {
@@ -288,8 +169,8 @@
 			num_ent++;
 			break;
 		default:
-			pr_err("%s:%d invalid cdm command meta data %d\n",
-			__func__, __LINE__, cmd_meta_data);
+			CAM_ERR(CAM_ISP, "invalid cdm command meta data %d",
+				cmd_meta_data);
 			return -EINVAL;
 		}
 	}
@@ -304,7 +185,7 @@
 	int                                   iommu_hdl,
 	struct cam_hw_prepare_update_args    *prepare,
 	uint32_t                              base_idx,
-	struct cam_isp_kmd_buf_info          *kmd_buf_info,
+	struct cam_kmd_buf_info              *kmd_buf_info,
 	struct cam_ife_hw_mgr_res            *res_list_isp_out,
 	uint32_t                              size_isp_out,
 	bool                                  fill_fence)
@@ -331,33 +212,31 @@
 	/* Max one hw entries required for each base */
 	if (prepare->num_hw_update_entries + 1 >=
 			prepare->max_hw_update_entries) {
-		pr_err("%s:%d Insufficient  HW entries :%d %d\n",
-			__func__, __LINE__, prepare->num_hw_update_entries,
+		CAM_ERR(CAM_ISP, "Insufficient  HW entries :%d %d",
+			prepare->num_hw_update_entries,
 			prepare->max_hw_update_entries);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < prepare->packet->num_io_configs; i++) {
-		CDBG("%s:%d ======= io config idx %d ============\n",
-			__func__, __LINE__, i);
-		CDBG("%s:%d resource_type:%d fence:%d\n", __func__, __LINE__,
+		CAM_DBG(CAM_ISP, "======= io config idx %d ============", i);
+		CAM_DBG(CAM_ISP, "resource_type:%d fence:%d",
 			io_cfg[i].resource_type, io_cfg[i].fence);
-		CDBG("%s:%d format: %d\n", __func__, __LINE__,
-			io_cfg[i].format);
-		CDBG("%s:%d direction %d\n", __func__, __LINE__,
+		CAM_DBG(CAM_ISP, "format: %d", io_cfg[i].format);
+		CAM_DBG(CAM_ISP, "direction %d",
 			io_cfg[i].direction);
 
 		if (io_cfg[i].direction == CAM_BUF_OUTPUT) {
 			res_id_out = io_cfg[i].resource_type & 0xFF;
 			if (res_id_out >= size_isp_out) {
-				pr_err("%s:%d invalid out restype:%x\n",
-					__func__, __LINE__,
+				CAM_ERR(CAM_ISP, "invalid out restype:%x",
 					io_cfg[i].resource_type);
 				return -EINVAL;
 			}
 
-			CDBG("%s:%d configure output io with fill fence %d\n",
-				__func__, __LINE__, fill_fence);
+			CAM_DBG(CAM_ISP,
+				"configure output io with fill fence %d",
+				fill_fence);
 			if (fill_fence) {
 				if (num_out_buf <
 					prepare->max_out_map_entries) {
@@ -368,8 +247,7 @@
 						sync_id = io_cfg[i].fence;
 					num_out_buf++;
 				} else {
-					pr_err("%s:%d ln_out:%d max_ln:%d\n",
-						__func__, __LINE__,
+					CAM_ERR(CAM_ISP, "ln_out:%d max_ln:%d",
 						num_out_buf,
 						prepare->max_out_map_entries);
 					return -EINVAL;
@@ -378,15 +256,15 @@
 
 			hw_mgr_res = &res_list_isp_out[res_id_out];
 			if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT) {
-				pr_err("%s:%d io res id:%d not valid\n",
-					__func__, __LINE__,
+				CAM_ERR(CAM_ISP, "io res id:%d not valid",
 					io_cfg[i].resource_type);
 				return -EINVAL;
 			}
 		} else if (io_cfg[i].direction == CAM_BUF_INPUT) {
 			res_id_in = io_cfg[i].resource_type & 0xFF;
-			CDBG("%s:%d configure input io with fill fence %d\n",
-				__func__, __LINE__, fill_fence);
+			CAM_DBG(CAM_ISP,
+				"configure input io with fill fence %d",
+				fill_fence);
 			if (fill_fence) {
 				if (num_in_buf < prepare->max_in_map_entries) {
 					prepare->in_map_entries[num_in_buf].
@@ -397,8 +275,7 @@
 							io_cfg[i].fence;
 					num_in_buf++;
 				} else {
-					pr_err("%s:%d ln_in:%d imax_ln:%d\n",
-						__func__, __LINE__,
+					CAM_ERR(CAM_ISP, "ln_in:%d imax_ln:%d",
 						num_in_buf,
 						prepare->max_in_map_entries);
 					return -EINVAL;
@@ -406,13 +283,12 @@
 			}
 			continue;
 		} else {
-			pr_err("%s:%d Invalid io config direction :%d\n",
-				__func__, __LINE__,
+			CAM_ERR(CAM_ISP, "Invalid io config direction :%d",
 				io_cfg[i].direction);
 			return -EINVAL;
 		}
 
-		CDBG("%s:%d setup mem io\n", __func__, __LINE__);
+		CAM_DBG(CAM_ISP, "setup mem io");
 		for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
 			if (!hw_mgr_res->hw_res[j])
 				continue;
@@ -422,9 +298,9 @@
 
 			res = hw_mgr_res->hw_res[j];
 			if (res->res_id != io_cfg[i].resource_type) {
-				pr_err("%s:%d wm err res id:%d io res id:%d\n",
-					__func__, __LINE__, res->res_id,
-					io_cfg[i].resource_type);
+				CAM_ERR(CAM_ISP,
+					"wm err res id:%d io res id:%d",
+					res->res_id, io_cfg[i].resource_type);
 				return -EINVAL;
 			}
 
@@ -439,14 +315,16 @@
 					io_cfg[i].mem_handle[plane_id],
 					iommu_hdl, &io_addr[plane_id], &size);
 				if (rc) {
-					pr_err("%s:%d no io addr for plane%d\n",
-						__func__, __LINE__, plane_id);
+					CAM_ERR(CAM_ISP,
+						"no io addr for plane%d",
+						plane_id);
 					rc = -ENOMEM;
 					return rc;
 				}
 
 				if (io_addr[plane_id] >> 32) {
-					pr_err("Invalid mapped address\n");
+					CAM_ERR(CAM_ISP,
+						"Invalid mapped address");
 					rc = -EINVAL;
 					return rc;
 				}
@@ -454,13 +332,13 @@
 				/* need to update with offset */
 				io_addr[plane_id] +=
 						io_cfg[i].offsets[plane_id];
-				CDBG("%s: get io_addr for plane %d: 0x%llx\n",
-					__func__, plane_id,
-					io_addr[plane_id]);
+				CAM_DBG(CAM_ISP,
+					"get io_addr for plane %d: 0x%llx",
+					plane_id, io_addr[plane_id]);
 			}
 			if (!plane_id) {
-				pr_err("%s:%d No valid planes for res%d\n",
-					__func__, __LINE__, res->res_id);
+				CAM_ERR(CAM_ISP, "No valid planes for res%d",
+					res->res_id);
 				rc = -ENOMEM;
 				return rc;
 			}
@@ -471,8 +349,9 @@
 					(kmd_buf_info->used_bytes +
 					io_cfg_used_bytes);
 			} else {
-				pr_err("%s:%d no free kmd memory for base %d\n",
-					__func__, __LINE__, base_idx);
+				CAM_ERR(CAM_ISP,
+					"no free kmd memory for base %d",
+					base_idx);
 				rc = -ENOMEM;
 				return rc;
 			}
@@ -485,8 +364,8 @@
 			update_buf.num_buf   = plane_id;
 			update_buf.io_cfg    = &io_cfg[i];
 
-			CDBG("%s:%d: cmd buffer 0x%pK, size %d\n", __func__,
-				__LINE__, update_buf.cdm.cmd_buf_addr,
+			CAM_DBG(CAM_ISP, "cmd buffer 0x%pK, size %d",
+				update_buf.cdm.cmd_buf_addr,
 				update_buf.cdm.size);
 			rc = res->hw_intf->hw_ops.process_cmd(
 				res->hw_intf->hw_priv,
@@ -494,8 +373,8 @@
 				sizeof(struct cam_isp_hw_get_buf_update));
 
 			if (rc) {
-				pr_err("%s:%d get buf cmd error:%d\n",
-					__func__, __LINE__, res->res_id);
+				CAM_ERR(CAM_ISP, "get buf cmd error:%d",
+					res->res_id);
 				rc = -ENOMEM;
 				return rc;
 			}
@@ -503,7 +382,7 @@
 		}
 	}
 
-	CDBG("%s: io_cfg_used_bytes %d, fill_fence %d\n", __func__,
+	CAM_DBG(CAM_ISP, "io_cfg_used_bytes %d, fill_fence %d",
 		io_cfg_used_bytes, fill_fence);
 	if (io_cfg_used_bytes) {
 		/* Update the HW entries */
@@ -533,7 +412,7 @@
 	struct cam_hw_prepare_update_args    *prepare,
 	struct list_head                     *res_list_isp_src,
 	uint32_t                              base_idx,
-	struct cam_isp_kmd_buf_info          *kmd_buf_info)
+	struct cam_kmd_buf_info              *kmd_buf_info)
 {
 	int rc = -EINVAL;
 	struct cam_isp_resource_node         *res;
@@ -546,8 +425,7 @@
 	/* Max one hw entries required for each base */
 	if (prepare->num_hw_update_entries + 1 >=
 				prepare->max_hw_update_entries) {
-		pr_err("%s:%d Insufficient  HW entries :%d %d\n",
-			__func__, __LINE__,
+		CAM_ERR(CAM_ISP, "Insufficient  HW entries :%d %d",
 			prepare->num_hw_update_entries,
 			prepare->max_hw_update_entries);
 		return -EINVAL;
@@ -572,9 +450,8 @@
 					(kmd_buf_info->used_bytes +
 					reg_update_size);
 			} else {
-				pr_err("%s:%d no free mem %d %d %d\n",
-					__func__, __LINE__, base_idx,
-					kmd_buf_info->size,
+				CAM_ERR(CAM_ISP, "no free mem %d %d %d",
+					base_idx, kmd_buf_info->size,
 					kmd_buf_info->used_bytes +
 					reg_update_size);
 				rc = -EINVAL;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
index ecc71b3..4a7eff8 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
@@ -19,9 +17,7 @@
 #include <linux/ratelimit.h>
 #include "cam_tasklet_util.h"
 #include "cam_irq_controller.h"
-
-#undef  CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 #define CAM_TASKLETQ_SIZE              256
 
@@ -95,14 +91,14 @@
 	*tasklet_cmd = NULL;
 
 	if (!atomic_read(&tasklet->tasklet_active)) {
-		pr_err_ratelimited("Tasklet is not active!\n");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Tasklet is not active!\n");
 		rc = -EPIPE;
 		return rc;
 	}
 
 	spin_lock_irqsave(&tasklet->tasklet_lock, flags);
 	if (list_empty(&tasklet->free_cmd_list)) {
-		pr_err_ratelimited("No more free tasklet cmd!\n");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No more free tasklet cmd!\n");
 		rc = -ENODEV;
 		goto spin_unlock;
 	} else {
@@ -162,22 +158,22 @@
 	*tasklet_cmd = NULL;
 
 	if (!atomic_read(&tasklet->tasklet_active)) {
-		pr_err("Tasklet is not active!\n");
+		CAM_ERR(CAM_ISP, "Tasklet is not active!");
 		rc = -EPIPE;
 		return rc;
 	}
 
-	CDBG("Dequeue before lock.\n");
+	CAM_DBG(CAM_ISP, "Dequeue before lock.");
 	spin_lock_irqsave(&tasklet->tasklet_lock, flags);
 	if (list_empty(&tasklet->used_cmd_list)) {
-		CDBG("End of list reached. Exit\n");
+		CAM_DBG(CAM_ISP, "End of list reached. Exit");
 		rc = -ENODEV;
 		goto spin_unlock;
 	} else {
 		*tasklet_cmd = list_first_entry(&tasklet->used_cmd_list,
 			struct cam_tasklet_queue_cmd, list);
 		list_del_init(&(*tasklet_cmd)->list);
-		CDBG("Dequeue Successful\n");
+		CAM_DBG(CAM_ISP, "Dequeue Successful");
 	}
 
 spin_unlock:
@@ -197,14 +193,14 @@
 	int                            rc;
 
 	if (!bottom_half) {
-		pr_err("NULL bottom half\n");
+		CAM_ERR(CAM_ISP, "NULL bottom half");
 		return -EINVAL;
 	}
 
 	rc = cam_tasklet_get_cmd(tasklet, &tasklet_cmd);
 
 	if (tasklet_cmd) {
-		CDBG("%s: Enqueue tasklet cmd\n", __func__);
+		CAM_DBG(CAM_ISP, "Enqueue tasklet cmd");
 		tasklet_cmd->bottom_half_handler = bottom_half_handler;
 		tasklet_cmd->payload = evt_payload_priv;
 		spin_lock_irqsave(&tasklet->tasklet_lock, flags);
@@ -213,7 +209,7 @@
 		spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
 		tasklet_schedule(&tasklet->tasklet);
 	} else {
-		pr_err("%s: tasklet cmd is NULL!\n", __func__);
+		CAM_ERR(CAM_ISP, "tasklet cmd is NULL!");
 	}
 
 	return rc;
@@ -229,7 +225,8 @@
 
 	tasklet = kzalloc(sizeof(struct cam_tasklet_info), GFP_KERNEL);
 	if (!tasklet) {
-		CDBG("Error! Unable to allocate memory for tasklet");
+		CAM_DBG(CAM_ISP,
+			"Error! Unable to allocate memory for tasklet");
 		*tasklet_info = NULL;
 		return -ENOMEM;
 	}
@@ -271,7 +268,8 @@
 	struct cam_tasklet_queue_cmd  *tasklet_cmd_temp;
 
 	if (atomic_read(&tasklet->tasklet_active)) {
-		pr_err("Tasklet already active. idx = %d\n", tasklet->index);
+		CAM_ERR(CAM_ISP, "Tasklet already active. idx = %d",
+			tasklet->index);
 		return -EBUSY;
 	}
 	atomic_set(&tasklet->tasklet_active, 1);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
index 9730fc2..7ac729f 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
@@ -18,47 +18,7 @@
 #include "cam_isp_hw_mgr_intf.h"
 #include "cam_ife_hw_mgr.h"
 #include "cam_hw_intf.h"
-
-/**
- * @brief                  KMD scratch buffer information
- *
- * @handle:                Memory handle
- * @cpu_addr:              Cpu address
- * @offset:                Offset from the start of the buffer
- * @size:                  Size of the buffer
- * @used_bytes:            Used memory in bytes
- *
- */
-struct cam_isp_kmd_buf_info {
-	int        handle;
-	uint32_t  *cpu_addr;
-	uint32_t   offset;
-	uint32_t   size;
-	uint32_t   used_bytes;
-};
-
-
-/**
- * @brief                  Validate the packet
- *
- * @packet:                Packet to be validated
- *
- * @return:                0 for success
- *                         -EINVAL for Fail
- */
-int cam_isp_validate_packet(struct cam_packet *packet);
-
-/**
- * @brief                  Get the kmd buffer from the packet command descriptor
- *
- * @packet:                Packet data
- * @kmd_buf:               Extracted the KMD buffer information
- *
- * @return:                0 for success
- *                         -EINVAL for Fail
- */
-int cam_isp_get_kmd_buffer(struct cam_packet *packet,
-	struct cam_isp_kmd_buf_info *kmd_buf_info);
+#include "cam_packet_util.h"
 
 /**
  * @brief                  Add change base in the hw entries list
@@ -77,7 +37,7 @@
 	struct cam_hw_prepare_update_args     *prepare,
 	struct list_head                      *res_list_isp_src,
 	uint32_t                               base_idx,
-	struct cam_isp_kmd_buf_info           *kmd_buf_info);
+	struct cam_kmd_buf_info               *kmd_buf_info);
 
 /**
  * @brief                  Add command buffer in the HW entries list for given
@@ -112,7 +72,7 @@
 int cam_isp_add_io_buffers(int	 iommu_hdl,
 	struct cam_hw_prepare_update_args    *prepare,
 	uint32_t                              base_idx,
-	struct cam_isp_kmd_buf_info          *kmd_buf_info,
+	struct cam_kmd_buf_info              *kmd_buf_info,
 	struct cam_ife_hw_mgr_res            *res_list_isp_out,
 	uint32_t                              size_isp_out,
 	bool                                  fill_fence);
@@ -134,7 +94,7 @@
 	struct cam_hw_prepare_update_args    *prepare,
 	struct list_head                     *res_list_isp_src,
 	uint32_t                              base_idx,
-	struct cam_isp_kmd_buf_info          *kmd_buf_info);
+	struct cam_kmd_buf_info              *kmd_buf_info);
 
 
 #endif /*_CAM_ISP_HW_PARSER_H */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
index 9a42b6e..2341b38 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
@@ -10,16 +10,12 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/list.h>
 #include "cam_io_util.h"
 #include "cam_irq_controller.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 /**
  * struct cam_irq_evt_handler:
@@ -143,21 +139,21 @@
 
 	if (!register_info->num_registers || !register_info->irq_reg_set ||
 		!name || !mem_base) {
-		pr_err("Invalid parameters\n");
+		CAM_ERR(CAM_ISP, "Invalid parameters");
 		rc = -EINVAL;
 		return rc;
 	}
 
 	controller = kzalloc(sizeof(struct cam_irq_controller), GFP_KERNEL);
 	if (!controller) {
-		CDBG("Failed to allocate IRQ Controller\n");
+		CAM_DBG(CAM_ISP, "Failed to allocate IRQ Controller");
 		return -ENOMEM;
 	}
 
 	controller->irq_register_arr = kzalloc(register_info->num_registers *
 		sizeof(struct cam_irq_register_obj), GFP_KERNEL);
 	if (!controller->irq_register_arr) {
-		CDBG("Failed to allocate IRQ register Arr\n");
+		CAM_DBG(CAM_ISP, "Failed to allocate IRQ register Arr");
 		rc = -ENOMEM;
 		goto reg_alloc_error;
 	}
@@ -165,7 +161,7 @@
 	controller->irq_status_arr = kzalloc(register_info->num_registers *
 		sizeof(uint32_t), GFP_KERNEL);
 	if (!controller->irq_status_arr) {
-		CDBG("Failed to allocate IRQ status Arr\n");
+		CAM_DBG(CAM_ISP, "Failed to allocate IRQ status Arr");
 		rc = -ENOMEM;
 		goto status_alloc_error;
 	}
@@ -174,14 +170,14 @@
 		kzalloc(register_info->num_registers * sizeof(uint32_t),
 		GFP_KERNEL);
 	if (!controller->th_payload.evt_status_arr) {
-		CDBG("Failed to allocate BH payload bit mask Arr\n");
+		CAM_DBG(CAM_ISP, "Failed to allocate BH payload bit mask Arr");
 		rc = -ENOMEM;
 		goto evt_mask_alloc_error;
 	}
 
 	controller->name = name;
 
-	CDBG("num_registers: %d\n", register_info->num_registers);
+	CAM_DBG(CAM_ISP, "num_registers: %d", register_info->num_registers);
 	for (i = 0; i < register_info->num_registers; i++) {
 		controller->irq_register_arr[i].index = i;
 		controller->irq_register_arr[i].mask_reg_offset =
@@ -190,11 +186,11 @@
 			register_info->irq_reg_set[i].clear_reg_offset;
 		controller->irq_register_arr[i].status_reg_offset =
 			register_info->irq_reg_set[i].status_reg_offset;
-		CDBG("i %d mask_reg_offset: 0x%x\n", i,
+		CAM_DBG(CAM_ISP, "i %d mask_reg_offset: 0x%x", i,
 			controller->irq_register_arr[i].mask_reg_offset);
-		CDBG("i %d clear_reg_offset: 0x%x\n", i,
+		CAM_DBG(CAM_ISP, "i %d clear_reg_offset: 0x%x", i,
 			controller->irq_register_arr[i].clear_reg_offset);
-		CDBG("i %d status_reg_offset: 0x%x\n", i,
+		CAM_DBG(CAM_ISP, "i %d status_reg_offset: 0x%x", i,
 			controller->irq_register_arr[i].status_reg_offset);
 	}
 	controller->num_registers        = register_info->num_registers;
@@ -202,11 +198,11 @@
 	controller->global_clear_offset  = register_info->global_clear_offset;
 	controller->mem_base             = mem_base;
 
-	CDBG("global_clear_bitmask: 0x%x\n",
+	CAM_DBG(CAM_ISP, "global_clear_bitmask: 0x%x",
 		controller->global_clear_bitmask);
-	CDBG("global_clear_offset: 0x%x\n",
+	CAM_DBG(CAM_ISP, "global_clear_offset: 0x%x",
 		controller->global_clear_offset);
-	CDBG("mem_base: 0x%llx\n", (uint64_t)controller->mem_base);
+	CAM_DBG(CAM_ISP, "mem_base: %pK", (void __iomem *)controller->mem_base);
 
 	INIT_LIST_HEAD(&controller->evt_handler_list_head);
 	for (i = 0; i < CAM_IRQ_PRIORITY_MAX; i++)
@@ -246,19 +242,21 @@
 	unsigned long               flags;
 
 	if (!controller || !handler_priv || !evt_bit_mask_arr) {
-		pr_err("Invalid params: ctlr=%pK handler_priv=%pK bit_mask_arr = %pK\n",
+		CAM_ERR(CAM_ISP,
+			"Inval params: ctlr=%pK hdl_priv=%pK bit_mask_arr=%pK",
 			controller, handler_priv, evt_bit_mask_arr);
 		return -EINVAL;
 	}
 
 	if (!top_half_handler) {
-		pr_err("Missing top half handler\n");
+		CAM_ERR(CAM_ISP, "Missing top half handler");
 		return -EINVAL;
 	}
 
 	if (bottom_half_handler &&
 		(!bottom_half || !bottom_half_enqueue_func)) {
-		pr_err("Invalid params: bh_handler=%pK bh=%pK bh_enq_f=%pK\n",
+		CAM_ERR(CAM_ISP,
+			"Invalid params: bh_handler=%pK bh=%pK bh_enq_f=%pK",
 			bottom_half_handler,
 			bottom_half,
 			bottom_half_enqueue_func);
@@ -266,21 +264,21 @@
 	}
 
 	if (priority >= CAM_IRQ_PRIORITY_MAX) {
-		pr_err("Invalid priority=%u, max=%u\n", priority,
+		CAM_ERR(CAM_ISP, "Invalid priority=%u, max=%u", priority,
 			CAM_IRQ_PRIORITY_MAX);
 		return -EINVAL;
 	}
 
 	evt_handler = kzalloc(sizeof(struct cam_irq_evt_handler), GFP_KERNEL);
 	if (!evt_handler) {
-		CDBG("Error allocating hlist_node\n");
+		CAM_DBG(CAM_ISP, "Error allocating hlist_node");
 		return -ENOMEM;
 	}
 
 	evt_handler->evt_bit_mask_arr = kzalloc(sizeof(uint32_t) *
 		controller->num_registers, GFP_KERNEL);
 	if (!evt_handler->evt_bit_mask_arr) {
-		CDBG("Error allocating hlist_node\n");
+		CAM_DBG(CAM_ISP, "Error allocating hlist_node");
 		rc = -ENOMEM;
 		goto free_evt_handler;
 	}
@@ -346,7 +344,7 @@
 	list_for_each_entry_safe(evt_handler, evt_handler_temp,
 		&controller->evt_handler_list_head, list_node) {
 		if (evt_handler->index == handle) {
-			CDBG("unsubscribe item %d\n", handle);
+			CAM_DBG(CAM_ISP, "unsubscribe item %d", handle);
 			list_del_init(&evt_handler->list_node);
 			list_del_init(&evt_handler->th_list_node);
 			found = 1;
@@ -429,7 +427,7 @@
 	int                             rc = -EINVAL;
 	int                             i;
 
-	CDBG("Enter\n");
+	CAM_DBG(CAM_ISP, "Enter");
 
 	if (list_empty(th_list_head))
 		return;
@@ -441,7 +439,7 @@
 		if (!is_irq_match)
 			continue;
 
-		CDBG("match found\n");
+		CAM_DBG(CAM_ISP, "match found");
 
 		cam_irq_th_payload_init(th_payload);
 		th_payload->handler_priv  = evt_handler->handler_priv;
@@ -462,7 +460,7 @@
 				(void *)th_payload);
 
 		if (!rc && evt_handler->bottom_half_handler) {
-			CDBG("Enqueuing bottom half for %s\n",
+			CAM_DBG(CAM_ISP, "Enqueuing bottom half for %s",
 				controller->name);
 			if (evt_handler->bottom_half_enqueue_func) {
 				evt_handler->bottom_half_enqueue_func(
@@ -474,7 +472,7 @@
 		}
 	}
 
-	CDBG("Exit\n");
+	CAM_DBG(CAM_ISP, "Exit");
 }
 
 irqreturn_t cam_irq_controller_handle_irq(int irq_num, void *priv)
@@ -487,7 +485,7 @@
 	if (!controller)
 		return IRQ_NONE;
 
-	CDBG("locking controller %pK name %s rw_lock %pK\n",
+	CAM_DBG(CAM_ISP, "locking controller %pK name %s rw_lock %pK",
 		controller, controller->name, &controller->rw_lock);
 	read_lock(&controller->rw_lock);
 	for (i = 0; i < controller->num_registers; i++) {
@@ -497,7 +495,7 @@
 		cam_io_w_mb(controller->irq_status_arr[i],
 			controller->mem_base +
 			controller->irq_register_arr[i].clear_reg_offset);
-		CDBG("Read irq status%d (0x%x) = 0x%x\n", i,
+		CAM_DBG(CAM_ISP, "Read irq status%d (0x%x) = 0x%x", i,
 			controller->irq_register_arr[i].status_reg_offset,
 			controller->irq_status_arr[i]);
 		for (j = 0; j < CAM_IRQ_PRIORITY_MAX; j++) {
@@ -505,25 +503,26 @@
 				top_half_enable_mask[j] &
 				controller->irq_status_arr[i])
 				need_th_processing[j] = true;
-				CDBG("i %d j %d need_th_processing = %d\n",
+				CAM_DBG(CAM_ISP,
+					"i %d j %d need_th_processing = %d",
 					i, j, need_th_processing[j]);
 		}
 	}
 	read_unlock(&controller->rw_lock);
-	CDBG("unlocked controller %pK name %s rw_lock %pK\n",
+	CAM_DBG(CAM_ISP, "unlocked controller %pK name %s rw_lock %pK",
 		controller, controller->name, &controller->rw_lock);
 
-	CDBG("Status Registers read Successful\n");
+	CAM_DBG(CAM_ISP, "Status Registers read Successful");
 
 	if (controller->global_clear_offset)
 		cam_io_w_mb(controller->global_clear_bitmask,
 			controller->mem_base + controller->global_clear_offset);
 
-	CDBG("Status Clear done\n");
+	CAM_DBG(CAM_ISP, "Status Clear done");
 
 	for (i = 0; i < CAM_IRQ_PRIORITY_MAX; i++) {
 		if (need_th_processing[i]) {
-			CDBG("%s: Invoke TH processing\n", __func__);
+			CAM_DBG(CAM_ISP, "Invoke TH processing");
 			cam_irq_controller_th_processing(controller,
 				&controller->th_list_head[i]);
 		}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
index b32bdb2..0480cd3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
@@ -115,6 +115,28 @@
 	uint64_t             timestamp;
 };
 
+/* enum cam_isp_hw_mgr_command - Hardware manager command type */
+enum cam_isp_hw_mgr_command {
+	CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT,
+	CAM_ISP_HW_MGR_CMD_MAX,
+};
+
+/**
+ * struct cam_isp_hw_cmd_args - Payload for hw manager command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @cmd_type               HW command type
+ * @get_context            Get context type information
+ */
+struct cam_isp_hw_cmd_args {
+	void                               *ctxt_to_hw_map;
+	uint32_t                            cmd_type;
+	union {
+		uint32_t                      is_rdi_only_context;
+	} u;
+};
+
+
 /**
  * cam_isp_hw_mgr_init()
  *
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile
index 1615d21f..4c6745c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile
@@ -1,5 +1,6 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index e779aef..a2f773e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -19,10 +19,7 @@
 #include "cam_isp_hw.h"
 #include "cam_soc_util.h"
 #include "cam_io_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-
+#include "cam_debug_util.h"
 
 /* Timeout value in msec */
 #define IFE_CSID_TIMEOUT                               1000
@@ -62,79 +59,127 @@
 	return rc;
 }
 
-static int cam_ife_csid_get_format(uint32_t  res_id,
-	uint32_t decode_fmt, uint32_t *path_fmt, uint32_t *plain_fmt)
+static int cam_ife_csid_get_format(uint32_t input_fmt,
+	uint32_t *path_fmt)
 {
 	int rc = 0;
 
-	if (res_id >= CAM_IFE_PIX_PATH_RES_RDI_0 &&
-		res_id <= CAM_IFE_PIX_PATH_RES_RDI_3) {
-		*path_fmt = 0xf;
-		return 0;
-	}
-
-	switch (decode_fmt) {
+	switch (input_fmt) {
 	case CAM_FORMAT_MIPI_RAW_6:
 		*path_fmt  = 0;
-		*plain_fmt = 0;
 		break;
 	case CAM_FORMAT_MIPI_RAW_8:
 		*path_fmt  = 1;
-		*plain_fmt = 0;
 		break;
 	case CAM_FORMAT_MIPI_RAW_10:
 		*path_fmt  = 2;
-		*plain_fmt = 1;
 		break;
 	case CAM_FORMAT_MIPI_RAW_12:
 		*path_fmt  = 3;
-		*plain_fmt = 1;
 		break;
 	case CAM_FORMAT_MIPI_RAW_14:
 		*path_fmt  = 4;
-		*plain_fmt = 1;
 		break;
 	case CAM_FORMAT_MIPI_RAW_16:
 		*path_fmt  = 5;
-		*plain_fmt = 1;
 		break;
 	case CAM_FORMAT_MIPI_RAW_20:
 		*path_fmt  = 6;
-		*plain_fmt = 2;
 		break;
 	case CAM_FORMAT_DPCM_10_6_10:
 		*path_fmt  = 7;
-		*plain_fmt = 1;
 		break;
 	case CAM_FORMAT_DPCM_10_8_10:
 		*path_fmt  = 8;
-		*plain_fmt = 1;
 		break;
 	case CAM_FORMAT_DPCM_12_6_12:
 		*path_fmt  = 9;
-		*plain_fmt = 1;
 		break;
 	case CAM_FORMAT_DPCM_12_8_12:
 		*path_fmt  = 0xA;
-		*plain_fmt = 1;
 		break;
 	case CAM_FORMAT_DPCM_14_8_14:
 		*path_fmt  = 0xB;
-		*plain_fmt = 1;
 		break;
 	case CAM_FORMAT_DPCM_14_10_14:
 		*path_fmt  = 0xC;
-		*plain_fmt = 1;
 		break;
 	default:
-		pr_err("%s:%d:CSID:%d un supported format\n",
-		__func__, __LINE__, decode_fmt);
+		CAM_ERR(CAM_ISP, "CSID:%d un supported format",
+			input_fmt);
 		rc = -EINVAL;
 	}
 
 	return rc;
 }
 
+static int cam_ife_csid_get_rdi_format(uint32_t input_fmt,
+	uint32_t output_fmt, uint32_t *path_fmt, uint32_t *plain_fmt)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_ISP, "input format:%d output format:%d",
+		 input_fmt, output_fmt);
+
+	switch (output_fmt) {
+	case CAM_FORMAT_MIPI_RAW_6:
+	case CAM_FORMAT_MIPI_RAW_8:
+	case CAM_FORMAT_MIPI_RAW_10:
+	case CAM_FORMAT_MIPI_RAW_12:
+	case CAM_FORMAT_MIPI_RAW_14:
+	case CAM_FORMAT_MIPI_RAW_16:
+	case CAM_FORMAT_MIPI_RAW_20:
+	case CAM_FORMAT_DPCM_10_6_10:
+	case CAM_FORMAT_DPCM_10_8_10:
+	case CAM_FORMAT_DPCM_12_6_12:
+	case CAM_FORMAT_DPCM_12_8_12:
+	case CAM_FORMAT_DPCM_14_8_14:
+	case CAM_FORMAT_DPCM_14_10_14:
+		*path_fmt  = 0xF;
+		*plain_fmt = 0;
+		break;
+
+	case CAM_FORMAT_PLAIN8:
+		rc = cam_ife_csid_get_format(input_fmt, path_fmt);
+		if (rc)
+			goto error;
+
+		*plain_fmt = 0;
+		break;
+	case CAM_FORMAT_PLAIN16_8:
+	case CAM_FORMAT_PLAIN16_10:
+	case CAM_FORMAT_PLAIN16_12:
+	case CAM_FORMAT_PLAIN16_14:
+	case CAM_FORMAT_PLAIN16_16:
+		rc = cam_ife_csid_get_format(input_fmt, path_fmt);
+		if (rc)
+			goto error;
+
+		*plain_fmt = 1;
+		break;
+	case CAM_FORMAT_PLAIN32_20:
+		rc = cam_ife_csid_get_format(input_fmt, path_fmt);
+		if (rc)
+			goto error;
+
+		*plain_fmt = 2;
+		break;
+	default:
+		*path_fmt  = 0xF;
+		*plain_fmt = 0;
+		break;
+	}
+
+	CAM_DBG(CAM_ISP, "path format value:%d plain format value:%d",
+		 *path_fmt, *plain_fmt);
+
+	return 0;
+error:
+	return rc;
+
+}
+
+
 static int cam_ife_csid_cid_get(struct cam_ife_csid_hw *csid_hw,
 	struct cam_isp_resource_node **res, int32_t vc, uint32_t dt,
 	uint32_t res_type)
@@ -166,8 +211,8 @@
 
 	if (i == CAM_IFE_CSID_CID_RES_MAX) {
 		if (res_type == CAM_ISP_IFE_IN_RES_TPG) {
-			pr_err("%s:%d:CSID:%d TPG CID not available\n",
-				__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+			CAM_ERR(CAM_ISP, "CSID:%d TPG CID not available",
+				 csid_hw->hw_intf->hw_idx);
 			rc = -EINVAL;
 		}
 
@@ -182,8 +227,7 @@
 				csid_hw->cid_res[j].res_state =
 					CAM_ISP_RESOURCE_STATE_RESERVED;
 				*res = &csid_hw->cid_res[j];
-				CDBG("%s:%d:CSID:%d CID %d allocated\n",
-					__func__, __LINE__,
+				CAM_DBG(CAM_ISP, "CSID:%d CID %d allocated",
 					csid_hw->hw_intf->hw_idx,
 					csid_hw->cid_res[j].res_id);
 				break;
@@ -191,8 +235,8 @@
 		}
 
 		if (j == CAM_IFE_CSID_CID_RES_MAX) {
-			pr_err("%s:%d:CSID:%d Free cid is not available\n",
-				__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+			CAM_ERR(CAM_ISP, "CSID:%d Free cid is not available",
+				 csid_hw->hw_intf->hw_idx);
 			rc = -EINVAL;
 		}
 	}
@@ -213,13 +257,13 @@
 	csid_reg = csid_hw->csid_info->csid_reg;
 
 	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
-		pr_err("%s:%d:CSID:%d Invalid HW State:%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid HW State:%d",
+			csid_hw->hw_intf->hw_idx,
 			csid_hw->hw_info->hw_state);
 		return -EINVAL;
 	}
 
-	CDBG("%s:%d:CSID:%d Csid reset\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "CSID:%d Csid reset",
 		csid_hw->hw_intf->hw_idx);
 
 	init_completion(&csid_hw->csid_top_complete);
@@ -287,14 +331,12 @@
 		soc_info->reg_map[0].mem_base +
 		csid_reg->cmn_reg->csid_rst_strobes_addr);
 
-	CDBG("%s:%d: Waiting for reset complete from irq handler\n",
-		__func__, __LINE__);
-
+	CAM_DBG(CAM_ISP, " Waiting for reset complete from irq handler");
 	rc = wait_for_completion_timeout(&csid_hw->csid_top_complete,
 		msecs_to_jiffies(IFE_CSID_TIMEOUT));
 	if (rc <= 0) {
-		pr_err("%s:%d:CSID:%d reset completion in fail rc = %d\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx, rc);
+		CAM_ERR(CAM_ISP, "CSID:%d reset completion in fail rc = %d",
+			 csid_hw->hw_intf->hw_idx, rc);
 		if (rc == 0)
 			rc = -ETIMEDOUT;
 	} else {
@@ -331,26 +373,26 @@
 	res      = reset->node_res;
 
 	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
-		pr_err("%s:%d:CSID:%d Invalid hw state :%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid hw state :%d",
+			csid_hw->hw_intf->hw_idx,
 			csid_hw->hw_info->hw_state);
 		return -EINVAL;
 	}
 
 	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
-		CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_id);
 		rc = -EINVAL;
 		goto end;
 	}
 
-	CDBG("%s:%d:CSID:%d resource:%d\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "CSID:%d resource:%d",
 		csid_hw->hw_intf->hw_idx, res->res_id);
 
 	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
 		if (!csid_reg->ipp_reg) {
-			pr_err("%s:%d:CSID:%d IPP not supported :%d\n",
-				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			CAM_ERR(CAM_ISP, "CSID:%d IPP not supported :%d",
+				 csid_hw->hw_intf->hw_idx,
 				res->res_id);
 			return -EINVAL;
 		}
@@ -368,8 +410,8 @@
 	} else {
 		id = res->res_id;
 		if (!csid_reg->rdi_reg[id]) {
-			pr_err("%s:%d:CSID:%d RDI res not supported :%d\n",
-				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			CAM_ERR(CAM_ISP, "CSID:%d RDI res not supported :%d",
+				 csid_hw->hw_intf->hw_idx,
 				res->res_id);
 			return -EINVAL;
 		}
@@ -401,8 +443,8 @@
 	rc = wait_for_completion_timeout(complete,
 		msecs_to_jiffies(IFE_CSID_TIMEOUT));
 	if (rc <= 0) {
-		pr_err("%s:%d CSID:%d Res id %d fail rc = %d\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP, "CSID:%d Res id %d fail rc = %d",
+			 csid_hw->hw_intf->hw_idx,
 			res->res_id,  rc);
 		if (rc == 0)
 			rc = -ETIMEDOUT;
@@ -423,8 +465,9 @@
 	int rc = 0;
 	struct cam_ife_csid_cid_data       *cid_data;
 
-	CDBG("%s:%d CSID:%d res_sel:%d Lane type:%d lane_num:%d dt:%d vc:%d\n",
-		__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+	CAM_DBG(CAM_ISP,
+		"CSID:%d res_sel:%d Lane type:%d lane_num:%d dt:%d vc:%d",
+		csid_hw->hw_intf->hw_idx,
 		cid_reserv->in_port->res_type,
 		cid_reserv->in_port->lane_type,
 		cid_reserv->in_port->lane_num,
@@ -432,8 +475,8 @@
 		cid_reserv->in_port->vc);
 
 	if (cid_reserv->in_port->res_type >= CAM_ISP_IFE_IN_RES_MAX) {
-		pr_err("%s:%d:CSID:%d  Invalid phy sel %d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP, "CSID:%d  Invalid phy sel %d",
+			csid_hw->hw_intf->hw_idx,
 			cid_reserv->in_port->res_type);
 		rc = -EINVAL;
 		goto end;
@@ -441,8 +484,8 @@
 
 	if (cid_reserv->in_port->lane_type >= CAM_ISP_LANE_TYPE_MAX &&
 		cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
-		pr_err("%s:%d:CSID:%d  Invalid lane type %d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP, "CSID:%d  Invalid lane type %d",
+			csid_hw->hw_intf->hw_idx,
 			cid_reserv->in_port->lane_type);
 		rc = -EINVAL;
 		goto end;
@@ -451,8 +494,8 @@
 	if ((cid_reserv->in_port->lane_type ==  CAM_ISP_LANE_TYPE_DPHY &&
 		cid_reserv->in_port->lane_num > 4) &&
 		cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
-		pr_err("%s:%d:CSID:%d Invalid lane num %d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid lane num %d",
+			csid_hw->hw_intf->hw_idx,
 			cid_reserv->in_port->lane_num);
 		rc = -EINVAL;
 		goto end;
@@ -460,8 +503,8 @@
 	if ((cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_CPHY &&
 		cid_reserv->in_port->lane_num > 3) &&
 		cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
-		pr_err("%s:%d: CSID:%d Invalid lane type %d & num %d\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP, " CSID:%d Invalid lane type %d & num %d",
+			 csid_hw->hw_intf->hw_idx,
 			cid_reserv->in_port->lane_type,
 			cid_reserv->in_port->lane_num);
 		rc = -EINVAL;
@@ -471,8 +514,8 @@
 	/* CSID  CSI2 v2.0 supports 31 vc  */
 	if (cid_reserv->in_port->dt > 0x3f ||
 		cid_reserv->in_port->vc > 0x1f) {
-		pr_err("%s:%d:CSID:%d Invalid vc:%d dt %d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d",
+			csid_hw->hw_intf->hw_idx,
 			cid_reserv->in_port->vc, cid_reserv->in_port->dt);
 		rc = -EINVAL;
 		goto end;
@@ -481,8 +524,8 @@
 	if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG && (
 		(cid_reserv->in_port->format < CAM_FORMAT_MIPI_RAW_8 &&
 		cid_reserv->in_port->format > CAM_FORMAT_MIPI_RAW_16))) {
-		pr_err("%s:%d: CSID:%d Invalid tpg decode fmt %d\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP, " CSID:%d Invalid tpg decode fmt %d",
+			 csid_hw->hw_intf->hw_idx,
 			cid_reserv->in_port->format);
 		rc = -EINVAL;
 		goto end;
@@ -538,8 +581,7 @@
 			csid_hw->csi2_rx_cfg.phy_sel = 0;
 			if (cid_reserv->in_port->format >
 			    CAM_FORMAT_MIPI_RAW_16) {
-				pr_err("%s:%d: Wrong TPG format\n", __func__,
-					__LINE__);
+				CAM_ERR(CAM_ISP, " Wrong TPG format");
 				rc = -EINVAL;
 				goto end;
 			}
@@ -562,8 +604,9 @@
 		cid_reserv->node_res = &csid_hw->cid_res[0];
 		csid_hw->csi2_reserve_cnt++;
 
-		CDBG("%s:%d:CSID:%d CID :%d resource acquired successfully\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_DBG(CAM_ISP,
+			"CSID:%d CID :%d resource acquired successfully",
+			csid_hw->hw_intf->hw_idx,
 			cid_reserv->node_res->res_id);
 	} else {
 		rc = cam_ife_csid_cid_get(csid_hw, &cid_reserv->node_res,
@@ -572,14 +615,13 @@
 		/* if success then increment the reserve count */
 		if (!rc) {
 			if (csid_hw->csi2_reserve_cnt == UINT_MAX) {
-				pr_err("%s:%d:CSID%d reserve cnt reached max\n",
-					__func__, __LINE__,
+				CAM_ERR(CAM_ISP,
+					"CSID%d reserve cnt reached max",
 					csid_hw->hw_intf->hw_idx);
 				rc = -EINVAL;
 			} else {
 				csid_hw->csi2_reserve_cnt++;
-				CDBG("%s:%d:CSID:%d CID:%d acquired\n",
-					__func__, __LINE__,
+				CAM_DBG(CAM_ISP, "CSID:%d CID:%d acquired",
 					csid_hw->hw_intf->hw_idx,
 					cid_reserv->node_res->res_id);
 			}
@@ -601,8 +643,8 @@
 	/* CSID  CSI2 v2.0 supports 31 vc */
 	if (reserve->in_port->dt > 0x3f || reserve->in_port->vc > 0x1f ||
 		(reserve->sync_mode >= CAM_ISP_HW_SYNC_MAX)) {
-		pr_err("%s:%d:CSID:%d Invalid vc:%d dt %d mode:%d\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d mode:%d",
+			 csid_hw->hw_intf->hw_idx,
 			reserve->in_port->vc, reserve->in_port->dt,
 			reserve->sync_mode);
 		rc = -EINVAL;
@@ -613,8 +655,9 @@
 	case CAM_IFE_PIX_PATH_RES_IPP:
 		if (csid_hw->ipp_res.res_state !=
 			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
-			CDBG("%s:%d:CSID:%d IPP resource not available %d\n",
-				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			CAM_DBG(CAM_ISP,
+				"CSID:%d IPP resource not available %d",
+				csid_hw->hw_intf->hw_idx,
 				csid_hw->ipp_res.res_state);
 			rc = -EINVAL;
 			goto end;
@@ -622,8 +665,8 @@
 
 		if (cam_ife_csid_is_ipp_format_supported(
 				reserve->in_port->format)) {
-			pr_err("%s:%d:CSID:%d res id:%d un support format %d\n",
-				__func__, __LINE__,
+			CAM_ERR(CAM_ISP,
+				"CSID:%d res id:%d un support format %d",
 				csid_hw->hw_intf->hw_idx, reserve->res_id,
 				reserve->in_port->format);
 			rc = -EINVAL;
@@ -632,8 +675,8 @@
 
 		/* assign the IPP resource */
 		res = &csid_hw->ipp_res;
-		CDBG("%s:%d:CSID:%d IPP resource:%d acquired successfully\n",
-			__func__, __LINE__,
+		CAM_DBG(CAM_ISP,
+			"CSID:%d IPP resource:%d acquired successfully",
 			csid_hw->hw_intf->hw_idx, res->res_id);
 
 			break;
@@ -643,23 +686,24 @@
 	case CAM_IFE_PIX_PATH_RES_RDI_3:
 		if (csid_hw->rdi_res[reserve->res_id].res_state !=
 			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
-			CDBG("%s:%d:CSID:%d RDI:%d resource not available %d\n",
-				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			CAM_DBG(CAM_ISP,
+				"CSID:%d RDI:%d resource not available %d",
+				csid_hw->hw_intf->hw_idx,
 				reserve->res_id,
 				csid_hw->rdi_res[reserve->res_id].res_state);
 			rc = -EINVAL;
 			goto end;
 		} else {
 			res = &csid_hw->rdi_res[reserve->res_id];
-			CDBG("%s:%d:CSID:%d RDI resource:%d acquire success\n",
-				__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+			CAM_DBG(CAM_ISP,
+				"CSID:%d RDI resource:%d acquire success",
+				csid_hw->hw_intf->hw_idx,
 				res->res_id);
 		}
 
 		break;
 	default:
-		pr_err("%s:%d:CSID:%d Invalid res id:%d\n",
-			__func__, __LINE__,
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res id:%d",
 			csid_hw->hw_intf->hw_idx, reserve->res_id);
 		rc = -EINVAL;
 		goto end;
@@ -668,6 +712,18 @@
 	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 	path_data = (struct cam_ife_csid_path_cfg   *)res->res_priv;
 
+	/* store the output format for RDI */
+	switch (reserve->res_id) {
+	case CAM_IFE_PIX_PATH_RES_RDI_0:
+	case CAM_IFE_PIX_PATH_RES_RDI_1:
+	case CAM_IFE_PIX_PATH_RES_RDI_2:
+	case CAM_IFE_PIX_PATH_RES_RDI_3:
+		path_data->output_fmt = reserve->out_port->format;
+		break;
+	default:
+		break;
+	}
+
 	path_data->cid = reserve->cid;
 	path_data->decode_fmt = reserve->in_port->format;
 	path_data->master_idx = reserve->master_idx;
@@ -711,31 +767,30 @@
 
 	/* overflow check before increment */
 	if (csid_hw->hw_info->open_count == UINT_MAX) {
-		pr_err("%s:%d:CSID:%d Open count reached max\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx);
+		CAM_ERR(CAM_ISP, "CSID:%d Open count reached max",
+			csid_hw->hw_intf->hw_idx);
 		return -EINVAL;
 	}
 
 	/* Increment ref Count */
 	csid_hw->hw_info->open_count++;
 	if (csid_hw->hw_info->open_count > 1) {
-		CDBG("%s:%d: CSID hw has already been enabled\n",
-			__func__, __LINE__);
+		CAM_DBG(CAM_ISP, "CSID hw has already been enabled");
 		return rc;
 	}
 
-	CDBG("%s:%d:CSID:%d init CSID HW\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "CSID:%d init CSID HW",
 		csid_hw->hw_intf->hw_idx);
 
 	rc = cam_ife_csid_enable_soc_resources(soc_info);
 	if (rc) {
-		pr_err("%s:%d:CSID:%d Enable SOC failed\n", __func__, __LINE__,
+		CAM_ERR(CAM_ISP, "CSID:%d Enable SOC failed",
 			csid_hw->hw_intf->hw_idx);
 		goto err;
 	}
 
 
-	CDBG("%s:%d:CSID:%d enable top irq interrupt\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "CSID:%d enable top irq interrupt",
 		csid_hw->hw_intf->hw_idx);
 
 	csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_UP;
@@ -745,8 +800,8 @@
 
 	rc = cam_ife_csid_global_reset(csid_hw);
 	if (rc) {
-		pr_err("%s:%d CSID:%d csid_reset fail rc = %d\n",
-			 __func__, __LINE__, csid_hw->hw_intf->hw_idx, rc);
+		CAM_ERR(CAM_ISP, "CSID:%d csid_reset fail rc = %d",
+			  csid_hw->hw_intf->hw_idx, rc);
 		rc = -ETIMEDOUT;
 		goto disable_soc;
 	}
@@ -756,7 +811,7 @@
 	 * SW register reset also reset the mask irq, so poll the irq status
 	 * to check the reset complete.
 	 */
-	CDBG("%s:%d:CSID:%d Reset Software registers\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "CSID:%d Reset Software registers",
 			csid_hw->hw_intf->hw_idx);
 
 	cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb_sw_all,
@@ -768,8 +823,7 @@
 			status, (status & 0x1) == 0x1,
 		CAM_IFE_CSID_TIMEOUT_SLEEP_US, CAM_IFE_CSID_TIMEOUT_ALL_US);
 	if (rc < 0) {
-		pr_err("%s:%d: software register reset timeout.....\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_ISP, "software register reset timeout.....");
 		rc = -ETIMEDOUT;
 		goto disable_soc;
 	}
@@ -801,7 +855,7 @@
 
 	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
 			csid_reg->cmn_reg->csid_hw_version_addr);
-	CDBG("%s:%d:CSID:%d CSID HW version: 0x%x\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "CSID:%d CSID HW version: 0x%x",
 		csid_hw->hw_intf->hw_idx, val);
 
 	return 0;
@@ -830,7 +884,7 @@
 	soc_info = &csid_hw->hw_info->soc_info;
 	csid_reg = csid_hw->csid_info->csid_reg;
 
-	CDBG("%s:%d:CSID:%d De-init CSID HW\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "CSID:%d De-init CSID HW",
 		csid_hw->hw_intf->hw_idx);
 
 	/*disable the top IRQ interrupt */
@@ -839,8 +893,8 @@
 
 	rc = cam_ife_csid_disable_soc_resources(soc_info);
 	if (rc)
-		pr_err("%s:%d:CSID:%d Disable CSID SOC failed\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx);
+		CAM_ERR(CAM_ISP, "CSID:%d Disable CSID SOC failed",
+			csid_hw->hw_intf->hw_idx);
 
 	csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
 	return rc;
@@ -856,8 +910,8 @@
 	csid_hw->tpg_start_cnt++;
 	if (csid_hw->tpg_start_cnt == 1) {
 		/*Enable the TPG */
-		CDBG("%s:%d CSID:%d start CSID TPG\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx);
+		CAM_DBG(CAM_ISP, "CSID:%d start CSID TPG",
+			csid_hw->hw_intf->hw_idx);
 
 		soc_info = &csid_hw->hw_info->soc_info;
 		{
@@ -865,44 +919,37 @@
 			uint32_t i;
 			uint32_t base = 0x600;
 
-			CDBG("%s:%d: ================== TPG ===============\n",
-				__func__, __LINE__);
+			CAM_DBG(CAM_ISP, "================ TPG ============");
 			for (i = 0; i < 16; i++) {
 				val = cam_io_r_mb(
 					soc_info->reg_map[0].mem_base +
 					base + i * 4);
-				CDBG("%s:%d reg 0x%x = 0x%x\n",
-					__func__, __LINE__,
+				CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x",
 					(base + i*4), val);
 			}
 
-			CDBG("%s:%d: ================== IPP ===============\n",
-				__func__, __LINE__);
+			CAM_DBG(CAM_ISP, "================ IPP =============");
 			base = 0x200;
 			for (i = 0; i < 10; i++) {
 				val = cam_io_r_mb(
 					soc_info->reg_map[0].mem_base +
 					base + i * 4);
-				CDBG("%s:%d reg 0x%x = 0x%x\n",
-					__func__, __LINE__,
+				CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x",
 					(base + i*4), val);
 			}
 
-			CDBG("%s:%d: ================== RX ===============\n",
-				__func__, __LINE__);
+			CAM_DBG(CAM_ISP, "================ RX =============");
 			base = 0x100;
 			for (i = 0; i < 5; i++) {
 				val = cam_io_r_mb(
 					soc_info->reg_map[0].mem_base +
 					base + i * 4);
-				CDBG("%s:%d reg 0x%x = 0x%x\n",
-					__func__, __LINE__,
+				CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x",
 					(base + i*4), val);
 			}
 		}
 
-		CDBG("%s:%d: =============== TPG control ===============\n",
-			__func__, __LINE__);
+		CAM_DBG(CAM_ISP, "============ TPG control ============");
 		val = (4 << 20);
 		val |= (0x80 << 8);
 		val |= (((csid_hw->csi2_rx_cfg.lane_num - 1) & 0x3) << 4);
@@ -912,8 +959,7 @@
 			csid_tpg_ctrl_addr);
 
 		val = cam_io_r_mb(soc_info->reg_map[0].mem_base + 0x600);
-		CDBG("%s:%d reg 0x%x = 0x%x\n", __func__, __LINE__,
-			0x600, val);
+		CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x", 0x600, val);
 	}
 
 	return 0;
@@ -934,8 +980,8 @@
 
 	/* disable the TPG */
 	if (!csid_hw->tpg_start_cnt) {
-		CDBG("%s:%d CSID:%d stop CSID TPG\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx);
+		CAM_DBG(CAM_ISP, "CSID:%d stop CSID TPG",
+			csid_hw->hw_intf->hw_idx);
 
 		/*stop the TPG */
 		cam_io_w_mb(0,  soc_info->reg_map[0].mem_base +
@@ -956,8 +1002,8 @@
 	csid_reg = csid_hw->csid_info->csid_reg;
 	soc_info = &csid_hw->hw_info->soc_info;
 
-	CDBG("%s:%d CSID:%d TPG config\n", __func__,
-		__LINE__, csid_hw->hw_intf->hw_idx);
+	CAM_DBG(CAM_ISP, "CSID:%d TPG config",
+		csid_hw->hw_intf->hw_idx);
 
 	/* configure one DT, infinite frames */
 	val = (0 << 16) | (1 << 10) | CAM_IFE_CSID_TPG_VC_VAL;
@@ -1012,13 +1058,13 @@
 
 	csid_reg = csid_hw->csid_info->csid_reg;
 	soc_info = &csid_hw->hw_info->soc_info;
-	CDBG("%s:%d CSID:%d count:%d config csi2 rx\n", __func__,
-		__LINE__, csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
+	CAM_DBG(CAM_ISP, "CSID:%d count:%d config csi2 rx",
+		csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
 
 	/* overflow check before increment */
 	if (csid_hw->csi2_cfg_cnt == UINT_MAX) {
-		pr_err("%s:%d:CSID:%d Open count reached max\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx);
+		CAM_ERR(CAM_ISP, "CSID:%d Open count reached max",
+			csid_hw->hw_intf->hw_idx);
 		return -EINVAL;
 	}
 
@@ -1081,15 +1127,15 @@
 	struct cam_hw_soc_info              *soc_info;
 
 	if (res->res_id >= CAM_IFE_CSID_CID_MAX) {
-		pr_err("%s:%d CSID:%d Invalid res id :%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res id :%d",
+			csid_hw->hw_intf->hw_idx, res->res_id);
 		return -EINVAL;
 	}
 
 	csid_reg = csid_hw->csid_info->csid_reg;
 	soc_info = &csid_hw->hw_info->soc_info;
-	CDBG("%s:%d CSID:%d cnt : %d Disable csi2 rx\n", __func__,
-		__LINE__, csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
+	CAM_DBG(CAM_ISP, "CSID:%d cnt : %d Disable csi2 rx",
+		csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
 
 	if (csid_hw->csi2_cfg_cnt)
 		csid_hw->csi2_cfg_cnt--;
@@ -1114,22 +1160,21 @@
 	struct cam_ife_csid_path_cfg           *path_data;
 	struct cam_ife_csid_reg_offset         *csid_reg;
 	struct cam_hw_soc_info                 *soc_info;
-	uint32_t path_format = 0, plain_format = 0, val = 0;
+	uint32_t path_format = 0, val = 0;
 
 	path_data = (struct cam_ife_csid_path_cfg  *) res->res_priv;
 	csid_reg = csid_hw->csid_info->csid_reg;
 	soc_info = &csid_hw->hw_info->soc_info;
 
 	if (!csid_reg->ipp_reg) {
-		pr_err("%s:%d CSID:%d IPP:%d is not supported on HW\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP, "CSID:%d IPP:%d is not supported on HW",
+			csid_hw->hw_intf->hw_idx,
 			res->res_id);
 		return -EINVAL;
 	}
 
-	CDBG("%s:%d: Enabled IPP Path.......\n", __func__, __LINE__);
-	rc = cam_ife_csid_get_format(res->res_id,
-		path_data->decode_fmt, &path_format, &plain_format);
+	CAM_DBG(CAM_ISP, "Enabled IPP Path.......");
+	rc = cam_ife_csid_get_format(path_data->decode_fmt, &path_format);
 	if (rc)
 		return rc;
 
@@ -1232,15 +1277,16 @@
 	soc_info = &csid_hw->hw_info->soc_info;
 
 	if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
-		pr_err("%s:%d:CSID:%d Res type %d res_id:%d in wrong state %d\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP,
+			"CSID:%d Res type %d res_id:%d in wrong state %d",
+			csid_hw->hw_intf->hw_idx,
 			res->res_type, res->res_id, res->res_state);
 		rc = -EINVAL;
 	}
 
 	if (!csid_reg->ipp_reg) {
-		pr_err("%s:%d:CSID:%d IPP %d is not supported on HW\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP, "CSID:%d IPP %d is not supported on HW",
+			csid_hw->hw_intf->hw_idx,
 			res->res_id);
 		rc = -EINVAL;
 	}
@@ -1270,20 +1316,21 @@
 	soc_info = &csid_hw->hw_info->soc_info;
 
 	if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
-		pr_err("%s:%d:CSID:%d res type:%d res_id:%d Invalid state%d\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP,
+			"CSID:%d res type:%d res_id:%d Invalid state%d",
+			csid_hw->hw_intf->hw_idx,
 			res->res_type, res->res_id, res->res_state);
 		return -EINVAL;
 	}
 
 	if (!csid_reg->ipp_reg) {
-		pr_err("%s:%d:CSID:%d IPP %d not supported on HW\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP, "CSID:%d IPP %d not supported on HW",
+			csid_hw->hw_intf->hw_idx,
 			res->res_id);
 		return -EINVAL;
 	}
 
-	CDBG("%s:%d: enable IPP path.......\n", __func__, __LINE__);
+	CAM_DBG(CAM_ISP, "enable IPP path.......");
 
 	/*Resume at frame boundary */
 	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
@@ -1300,8 +1347,7 @@
 	/* for slave mode, not need to resume for slave device */
 
 	/* Enable the required ipp interrupts */
-	val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW|
-		CSID_PATH_INFO_INPUT_SOF|CSID_PATH_INFO_INPUT_EOF;
+	val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW;
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 		csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
 
@@ -1326,40 +1372,40 @@
 	soc_info = &csid_hw->hw_info->soc_info;
 
 	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
-		CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_id);
 		return -EINVAL;
 	}
 
 	if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
 		res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
-		CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in stopped state:%d",
+			 csid_hw->hw_intf->hw_idx,
 			res->res_id, res->res_state);
 		return rc;
 	}
 
 	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
-		CDBG("%s:%d:CSID:%d Res:%d Invalid state%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+		CAM_DBG(CAM_ISP, "CSID:%d Res:%d Invalid state%d",
+			csid_hw->hw_intf->hw_idx, res->res_id,
 			res->res_state);
 		return -EINVAL;
 	}
 
 	if (!csid_reg->ipp_reg) {
-		pr_err("%s:%d:CSID:%d IPP%d is not supported on HW\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		CAM_ERR(CAM_ISP, "CSID:%d IPP%d is not supported on HW",
+			csid_hw->hw_intf->hw_idx, res->res_id);
 		return -EINVAL;
 	}
 
 	if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
 		stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
-		pr_err("%s:%d:CSID:%d un supported stop command:%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx, stop_cmd);
+		CAM_ERR(CAM_ISP, "CSID:%d un supported stop command:%d",
+			csid_hw->hw_intf->hw_idx, stop_cmd);
 		return -EINVAL;
 	}
 
-	CDBG("%s:%d CSID:%d res_id:%d\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "CSID:%d res_id:%d",
 		csid_hw->hw_intf->hw_idx, res->res_id);
 
 	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
@@ -1411,13 +1457,13 @@
 
 	id = res->res_id;
 	if (!csid_reg->rdi_reg[id]) {
-		pr_err("%s:%d CSID:%d RDI:%d is not supported on HW\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx, id);
+		CAM_ERR(CAM_ISP, "CSID:%d RDI:%d is not supported on HW",
+			 csid_hw->hw_intf->hw_idx, id);
 		return -EINVAL;
 	}
 
-	rc = cam_ife_csid_get_format(res->res_id,
-		path_data->decode_fmt, &path_format, &plain_fmt);
+	rc = cam_ife_csid_get_rdi_format(path_data->decode_fmt,
+		path_data->output_fmt, &path_format, &plain_fmt);
 	if (rc)
 		return rc;
 
@@ -1514,8 +1560,8 @@
 	if (res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
 		res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
 		!csid_reg->rdi_reg[id]) {
-		pr_err("%s:%d:CSID:%d Invalid res id%d state:%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res id%d state:%d",
+			csid_hw->hw_intf->hw_idx, res->res_id,
 			res->res_state);
 		return -EINVAL;
 	}
@@ -1546,8 +1592,9 @@
 	if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
 		res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
 		!csid_reg->rdi_reg[id]) {
-		pr_err("%s:%d:CSID:%d invalid res type:%d res_id:%d state%d\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP,
+			"CSID:%d invalid res type:%d res_id:%d state%d",
+			csid_hw->hw_intf->hw_idx,
 			res->res_type, res->res_id, res->res_state);
 		return -EINVAL;
 	}
@@ -1558,8 +1605,7 @@
 			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
 
 	/* Enable the required RDI interrupts */
-	val = (CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW|
-		CSID_PATH_INFO_INPUT_SOF | CSID_PATH_INFO_INPUT_EOF);
+	val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW;
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 		csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
 
@@ -1585,35 +1631,35 @@
 
 	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX ||
 		!csid_reg->rdi_reg[res->res_id]) {
-		CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_id);
 		return -EINVAL;
 	}
 
 	if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
 		res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
-		CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in stopped state:%d",
+			 csid_hw->hw_intf->hw_idx,
 			res->res_id, res->res_state);
 		return rc;
 	}
 
 	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
-		CDBG("%s:%d:CSID:%d Res:%d Invalid res_state%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+		CAM_DBG(CAM_ISP, "CSID:%d Res:%d Invalid res_state%d",
+			csid_hw->hw_intf->hw_idx, res->res_id,
 			res->res_state);
 		return -EINVAL;
 	}
 
 	if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
 		stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
-		pr_err("%s:%d:CSID:%d un supported stop command:%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx, stop_cmd);
+		CAM_ERR(CAM_ISP, "CSID:%d un supported stop command:%d",
+			csid_hw->hw_intf->hw_idx, stop_cmd);
 		return -EINVAL;
 	}
 
 
-	CDBG("%s:%d CSID:%d res_id:%d\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "CSID:%d res_id:%d",
 		csid_hw->hw_intf->hw_idx, res->res_id);
 
 	init_completion(&csid_hw->csid_rdin_complete[id]);
@@ -1654,15 +1700,15 @@
 
 	if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH ||
 		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
-		CDBG("%s:%d:CSID:%d Invalid res_type:%d res id%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+		CAM_DBG(CAM_ISP, "CSID:%d Invalid res_type:%d res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_type,
 			res->res_id);
 		return -EINVAL;
 	}
 
 	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
-		pr_err("%s:%d:CSID:%d Invalid dev state :%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid dev state :%d",
+			csid_hw->hw_intf->hw_idx,
 			csid_hw->hw_info->hw_state);
 		return -EINVAL;
 	}
@@ -1706,22 +1752,22 @@
 	soc_info = &csid_hw->hw_info->soc_info;
 
 	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
-		CDBG("%s:%d:CSID:%d Invalid res id%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id);
+		CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_id);
 		return -EINVAL;
 	}
 
 	if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
 		res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
-		CDBG("%s:%d:CSID:%d Res:%d already in stopped state:%d\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in stopped state:%d",
+			 csid_hw->hw_intf->hw_idx,
 			res->res_id, res->res_state);
 		return rc;
 	}
 
 	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
-		CDBG("%s:%d:CSID:%d Res:%d Invalid state%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx, res->res_id,
+		CAM_DBG(CAM_ISP, "CSID:%d Res:%d Invalid state%d",
+			csid_hw->hw_intf->hw_idx, res->res_id,
 			res->res_state);
 		return -EINVAL;
 	}
@@ -1734,8 +1780,8 @@
 	rc = wait_for_completion_timeout(complete,
 		msecs_to_jiffies(IFE_CSID_TIMEOUT));
 	if (rc <= 0) {
-		pr_err("%s:%d:CSID%d stop at frame boundary failid:%drc:%d\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP, "CSID%d stop at frame boundary failid:%drc:%d",
+			 csid_hw->hw_intf->hw_idx,
 			res->res_id, rc);
 		if (rc == 0)
 			/* continue even have timeout */
@@ -1774,7 +1820,7 @@
 	struct cam_ife_csid_reg_offset  *csid_reg;
 
 	if (!hw_priv || !get_hw_cap_args) {
-		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
 		return -EINVAL;
 	}
 
@@ -1789,8 +1835,9 @@
 	hw_caps->minor_version = csid_reg->cmn_reg->minor_version;
 	hw_caps->version_incr = csid_reg->cmn_reg->version_incr;
 
-	CDBG("%s:%d:CSID:%d No rdis:%d, no pix:%d, major:%d minor:%d ver :%d\n",
-		__func__, __LINE__, csid_hw->hw_intf->hw_idx, hw_caps->no_rdis,
+	CAM_DBG(CAM_ISP,
+		"CSID:%d No rdis:%d, no pix:%d, major:%d minor:%d ver :%d",
+		csid_hw->hw_intf->hw_idx, hw_caps->no_rdis,
 		hw_caps->no_pix, hw_caps->major_version, hw_caps->minor_version,
 		hw_caps->version_incr);
 
@@ -1807,7 +1854,7 @@
 
 	if (!hw_priv || !reset_args || (arg_size !=
 		sizeof(struct cam_csid_reset_cfg_args))) {
-		pr_err("%s:%d:CSID:Invalid args\n", __func__, __LINE__);
+		CAM_ERR(CAM_ISP, "CSID:Invalid args");
 		return -EINVAL;
 	}
 
@@ -1823,8 +1870,8 @@
 		rc = cam_ife_csid_path_reset(csid_hw, reset);
 		break;
 	default:
-		pr_err("%s:%d:CSID:Invalid reset type :%d\n", __func__,
-			__LINE__, reset->reset_type);
+		CAM_ERR(CAM_ISP, "CSID:Invalid reset type :%d",
+			reset->reset_type);
 		rc = -EINVAL;
 		break;
 	}
@@ -1842,7 +1889,7 @@
 
 	if (!hw_priv || !reserve_args || (arg_size !=
 		sizeof(struct cam_csid_hw_reserve_resource_args))) {
-		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
 		return -EINVAL;
 	}
 
@@ -1859,8 +1906,8 @@
 		rc = cam_ife_csid_path_reserve(csid_hw, reserv);
 		break;
 	default:
-		pr_err("%s:%d:CSID:%d Invalid res type :%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx, reserv->res_type);
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res type :%d",
+			csid_hw->hw_intf->hw_idx, reserv->res_type);
 		rc = -EINVAL;
 		break;
 	}
@@ -1879,7 +1926,7 @@
 
 	if (!hw_priv || !release_args ||
 		(arg_size != sizeof(struct cam_isp_resource_node))) {
-		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
 		return -EINVAL;
 	}
 
@@ -1892,31 +1939,33 @@
 		res->res_id >= CAM_IFE_CSID_CID_MAX) ||
 		(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
 		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
-		pr_err("%s:%d:CSID:%d Invalid res type:%d res id%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res type:%d res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_type,
 			res->res_id);
 		rc = -EINVAL;
 		goto end;
 	}
 
 	if (res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) {
-		CDBG("%s:%d:CSID:%d res type:%d Res %d  in released state\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_DBG(CAM_ISP,
+			"CSID:%d res type:%d Res %d  in released state",
+			csid_hw->hw_intf->hw_idx,
 			res->res_type, res->res_id);
 		goto end;
 	}
 
 	if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
 		res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
-		CDBG("%s:%d:CSID:%d res type:%d Res id:%d invalid state:%d\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_DBG(CAM_ISP,
+			"CSID:%d res type:%d Res id:%d invalid state:%d",
+			csid_hw->hw_intf->hw_idx,
 			res->res_type, res->res_id, res->res_state);
 		rc = -EINVAL;
 		goto end;
 	}
 
-	CDBG("%s:%d:CSID:%d res type :%d Resource id:%d\n", __func__, __LINE__,
-			csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+	CAM_DBG(CAM_ISP, "CSID:%d res type :%d Resource id:%d",
+		csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
 
 	switch (res->res_type) {
 	case CAM_ISP_RESOURCE_CID:
@@ -1934,8 +1983,8 @@
 			memset(&csid_hw->csi2_rx_cfg, 0,
 				sizeof(struct cam_ife_csid_csi2_rx_cfg));
 
-		CDBG("%s:%d:CSID:%d res id :%d cnt:%d reserv cnt:%d\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_DBG(CAM_ISP, "CSID:%d res id :%d cnt:%d reserv cnt:%d",
+			 csid_hw->hw_intf->hw_idx,
 			res->res_id, cid_data->cnt, csid_hw->csi2_reserve_cnt);
 
 		break;
@@ -1943,8 +1992,8 @@
 		res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
 		break;
 	default:
-		pr_err("%s:%d:CSID:%d Invalid res type:%d res id%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res type:%d res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_type,
 			res->res_id);
 		rc = -EINVAL;
 		break;
@@ -1966,7 +2015,7 @@
 
 	if (!hw_priv || !init_args ||
 		(arg_size != sizeof(struct cam_isp_resource_node))) {
-		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
 		return -EINVAL;
 	}
 
@@ -1980,8 +2029,8 @@
 		res->res_id >= CAM_IFE_CSID_CID_MAX) ||
 		(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
 		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
-		pr_err("%s:%d:CSID:%d Invalid res tpe:%d res id%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res tpe:%d res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_type,
 			res->res_id);
 		rc = -EINVAL;
 		goto end;
@@ -1990,14 +2039,15 @@
 
 	if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH) &&
 		(res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED)) {
-		pr_err("%s:%d:CSID:%d res type:%d res_id:%dInvalid state %d\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP,
+			"CSID:%d res type:%d res_id:%dInvalid state %d",
+			csid_hw->hw_intf->hw_idx,
 			res->res_type, res->res_id, res->res_state);
 		rc = -EINVAL;
 		goto end;
 	}
 
-	CDBG("%s:%d CSID:%d res type :%d res_id:%d\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "CSID:%d res type :%d res_id:%d",
 		csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
 
 
@@ -2018,8 +2068,8 @@
 
 		break;
 	default:
-		pr_err("%s:%d:CSID:%d Invalid res type state %d\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res type state %d",
+			 csid_hw->hw_intf->hw_idx,
 			res->res_type);
 		break;
 	}
@@ -2041,7 +2091,7 @@
 
 	if (!hw_priv || !deinit_args ||
 		(arg_size != sizeof(struct cam_isp_resource_node))) {
-		pr_err("%s:%d:CSID:Invalid arguments\n", __func__, __LINE__);
+		CAM_ERR(CAM_ISP, "CSID:Invalid arguments");
 		return -EINVAL;
 	}
 
@@ -2051,8 +2101,8 @@
 
 	mutex_lock(&csid_hw->hw_info->hw_mutex);
 	if (res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
-		CDBG("%s:%d:CSID:%d Res:%d already in De-init state\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in De-init state",
+			 csid_hw->hw_intf->hw_idx,
 			res->res_id);
 		goto end;
 	}
@@ -2069,8 +2119,8 @@
 
 		break;
 	default:
-		pr_err("%s:%d:CSID:%d Invalid Res type %d\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid Res type %d",
+			 csid_hw->hw_intf->hw_idx,
 			res->res_type);
 		goto end;
 	}
@@ -2094,7 +2144,7 @@
 
 	if (!hw_priv || !start_args ||
 		(arg_size != sizeof(struct cam_isp_resource_node))) {
-		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
 		return -EINVAL;
 	}
 
@@ -2103,19 +2153,18 @@
 	res = (struct cam_isp_resource_node *)start_args;
 	csid_reg = csid_hw->csid_info->csid_reg;
 
-	mutex_lock(&csid_hw->hw_info->hw_mutex);
 	if ((res->res_type == CAM_ISP_RESOURCE_CID &&
 		res->res_id >= CAM_IFE_CSID_CID_MAX) ||
 		(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
 		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
-		CDBG("%s:%d:CSID:%d Invalid res tpe:%d res id:%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx, res->res_type,
+		CAM_DBG(CAM_ISP, "CSID:%d Invalid res tpe:%d res id:%d",
+			csid_hw->hw_intf->hw_idx, res->res_type,
 			res->res_id);
 		rc = -EINVAL;
 		goto end;
 	}
 
-	CDBG("%s:%d CSID:%d res_type :%d res_id:%d\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "CSID:%d res_type :%d res_id:%d",
 		csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
 
 	switch (res->res_type) {
@@ -2130,13 +2179,12 @@
 			rc = cam_ife_csid_enable_rdi_path(csid_hw, res);
 		break;
 	default:
-		pr_err("%s:%d:CSID:%d Invalid res type%d\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx,
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d",
+			 csid_hw->hw_intf->hw_idx,
 			res->res_type);
 		break;
 	}
 end:
-	mutex_unlock(&csid_hw->hw_info->hw_mutex);
 	return rc;
 }
 
@@ -2152,14 +2200,13 @@
 
 	if (!hw_priv || !stop_args ||
 		(arg_size != sizeof(struct cam_csid_hw_stop_args))) {
-		pr_err("%s:%d:CSID: Invalid args\n", __func__, __LINE__);
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
 		return -EINVAL;
 	}
 	csid_stop = (struct cam_csid_hw_stop_args  *) stop_args;
 	csid_hw_info = (struct cam_hw_info  *)hw_priv;
 	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
 
-	mutex_lock(&csid_hw->hw_info->hw_mutex);
 	/* Stop the resource first */
 	for (i = 0; i < csid_stop->num_res; i++) {
 		res = csid_stop->node_res[i];
@@ -2178,8 +2225,8 @@
 
 			break;
 		default:
-			pr_err("%s:%d:CSID:%d Invalid res type%d\n", __func__,
-				__LINE__, csid_hw->hw_intf->hw_idx,
+			CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d",
+				csid_hw->hw_intf->hw_idx,
 				res->res_type);
 			break;
 		}
@@ -2191,9 +2238,10 @@
 		if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
 			csid_stop->stop_cmd == CAM_CSID_HALT_AT_FRAME_BOUNDARY)
 			rc = cam_ife_csid_res_wait_for_halt(csid_hw, res);
+		else
+			res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
 	}
 
-	mutex_unlock(&csid_hw->hw_info->hw_mutex);
 	return rc;
 
 }
@@ -2201,7 +2249,7 @@
 static int cam_ife_csid_read(void *hw_priv,
 	void *read_args, uint32_t arg_size)
 {
-	pr_err("%s:%d:CSID: un supported\n", __func__, __LINE__);
+	CAM_ERR(CAM_ISP, "CSID: un supported");
 
 	return -EINVAL;
 }
@@ -2209,7 +2257,7 @@
 static int cam_ife_csid_write(void *hw_priv,
 	void *write_args, uint32_t arg_size)
 {
-	pr_err("%s:%d:CSID: un supported\n", __func__, __LINE__);
+	CAM_ERR(CAM_ISP, "CSID: un supported");
 	return -EINVAL;
 }
 
@@ -2221,7 +2269,7 @@
 	struct cam_hw_info                   *csid_hw_info;
 
 	if (!hw_priv || !cmd_args) {
-		pr_err("%s:%d:CSID: Invalid arguments\n", __func__, __LINE__);
+		CAM_ERR(CAM_ISP, "CSID: Invalid arguments");
 		return -EINVAL;
 	}
 
@@ -2233,8 +2281,8 @@
 		rc = cam_ife_csid_get_time_stamp(csid_hw, cmd_args);
 		break;
 	default:
-		pr_err("%s:%d:CSID:%d un supported cmd:%d\n", __func__,
-			__LINE__, csid_hw->hw_intf->hw_idx, cmd_type);
+		CAM_ERR(CAM_ISP, "CSID:%d un supported cmd:%d",
+			csid_hw->hw_intf->hw_idx, cmd_type);
 		rc = -EINVAL;
 		break;
 	}
@@ -2253,11 +2301,10 @@
 
 	csid_hw = (struct cam_ife_csid_hw *)data;
 
-	CDBG("%s:%d:CSID %d IRQ Handling\n", __func__, __LINE__,
-		csid_hw->hw_intf->hw_idx);
+	CAM_DBG(CAM_ISP, "CSID %d IRQ Handling", csid_hw->hw_intf->hw_idx);
 
 	if (!data) {
-		pr_err("%s:%d:CSID: Invalid arguments\n", __func__, __LINE__);
+		CAM_ERR(CAM_ISP, "CSID: Invalid arguments");
 		return IRQ_HANDLED;
 	}
 
@@ -2296,55 +2343,52 @@
 	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
 		csid_reg->cmn_reg->csid_irq_cmd_addr);
 
-	CDBG("%s:%d: irq_status_rx = 0x%x\n", __func__, __LINE__,
-		irq_status_rx);
-	CDBG("%s:%d: irq_status_ipp = 0x%x\n", __func__, __LINE__,
-		irq_status_ipp);
+	CAM_DBG(CAM_ISP, "irq_status_rx = 0x%x", irq_status_rx);
+	CAM_DBG(CAM_ISP, "irq_status_ipp = 0x%x", irq_status_ipp);
 
 	if (irq_status_top) {
-		CDBG("%s:%d: CSID global reset complete......Exit\n",
-			__func__, __LINE__);
+		CAM_DBG(CAM_ISP, "CSID global reset complete......Exit");
 		complete(&csid_hw->csid_top_complete);
 		return IRQ_HANDLED;
 	}
 
 
 	if (irq_status_rx & BIT(csid_reg->csi2_reg->csi2_rst_done_shift_val)) {
-		CDBG("%s:%d: csi rx reset complete\n", __func__, __LINE__);
+		CAM_DBG(CAM_ISP, "csi rx reset complete");
 		complete(&csid_hw->csid_csi2_complete);
 	}
 
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
-		pr_err_ratelimited("%s:%d:CSID:%d lane 0 over flow\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+		pr_err_ratelimited("CSID:%d lane 0 over flow",
+			 csid_hw->hw_intf->hw_idx);
 	}
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
-		pr_err_ratelimited("%s:%d:CSID:%d lane 1 over flow\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+		pr_err_ratelimited("CSID:%d lane 1 over flow",
+			 csid_hw->hw_intf->hw_idx);
 	}
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
-		pr_err_ratelimited("%s:%d:CSID:%d lane 2 over flow\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+		pr_err_ratelimited("CSID:%d lane 2 over flow",
+			 csid_hw->hw_intf->hw_idx);
 	}
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
-		pr_err_ratelimited("%s:%d:CSID:%d lane 3 over flow\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+		pr_err_ratelimited("CSID:%d lane 3 over flow",
+			 csid_hw->hw_intf->hw_idx);
 	}
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) {
-		pr_err_ratelimited("%s:%d:CSID:%d TG OVER  FLOW\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+		pr_err_ratelimited("CSID:%d TG OVER  FLOW",
+			 csid_hw->hw_intf->hw_idx);
 	}
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION) {
-		pr_err_ratelimited("%s:%d:CSID:%d CPHY_EOT_RECEPTION\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+		pr_err_ratelimited("CSID:%d CPHY_EOT_RECEPTION",
+			 csid_hw->hw_intf->hw_idx);
 	}
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION) {
-		pr_err_ratelimited("%s:%d:CSID:%d CPHY_SOT_RECEPTION\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+		pr_err_ratelimited("CSID:%d CPHY_SOT_RECEPTION",
+			 csid_hw->hw_intf->hw_idx);
 	}
 	if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_PH_CRC) {
-		pr_err_ratelimited("%s:%d:CSID:%d CPHY_PH_CRC\n",
-			__func__, __LINE__, csid_hw->hw_intf->hw_idx);
+		pr_err_ratelimited("CSID:%d CPHY_PH_CRC",
+			 csid_hw->hw_intf->hw_idx);
 	}
 
 	/*read the IPP errors */
@@ -2352,25 +2396,23 @@
 		/* IPP reset done bit */
 		if (irq_status_ipp &
 			BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
-			CDBG("%s%d: CSID IPP reset complete\n",
-				__func__, __LINE__);
+			CAM_DBG(CAM_ISP, "CSID IPP reset complete");
 			complete(&csid_hw->csid_ipp_complete);
 		}
 		if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOF)
-			CDBG("%s: CSID IPP SOF received\n", __func__);
+			CAM_DBG(CAM_ISP, "CSID IPP SOF received");
 		if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOL)
-			CDBG("%s: CSID IPP SOL received\n", __func__);
+			CAM_DBG(CAM_ISP, "CSID IPP SOL received");
 		if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOL)
-			CDBG("%s: CSID IPP EOL received\n", __func__);
+			CAM_DBG(CAM_ISP, "CSID IPP EOL received");
 		if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
-			CDBG("%s: CSID IPP EOF received\n", __func__);
+			CAM_DBG(CAM_ISP, "CSID IPP EOF received");
 
 		if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
 			complete(&csid_hw->csid_ipp_complete);
 
 		if (irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
-			pr_err("%s:%d:CSID:%d IPP fifo over flow\n",
-				__func__, __LINE__,
+			CAM_ERR(CAM_ISP, "CSID:%d IPP fifo over flow",
 				csid_hw->hw_intf->hw_idx);
 			/*Stop IPP path immediately */
 			cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
@@ -2382,17 +2424,20 @@
 	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
 		if (irq_status_rdi[i] &
 			BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
-			CDBG("%s:%d: CSID rdi%d reset complete\n",
-				__func__, __LINE__, i);
+			CAM_DBG(CAM_ISP, "CSID rdi%d reset complete", i);
 			complete(&csid_hw->csid_rdin_complete[i]);
 		}
 
+		if (irq_status_rdi[i]  & CSID_PATH_INFO_INPUT_SOF)
+			CAM_DBG(CAM_ISP, "CSID RDI SOF received");
+		if (irq_status_rdi[i]  & CSID_PATH_INFO_INPUT_EOF)
+			CAM_DBG(CAM_ISP, "CSID RDI EOF received");
+
 		if (irq_status_rdi[i] & CSID_PATH_INFO_INPUT_EOF)
 			complete(&csid_hw->csid_rdin_complete[i]);
 
 		if (irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
-			pr_err("%s:%d:CSID:%d RDI fifo over flow\n",
-				__func__, __LINE__,
+			CAM_ERR(CAM_ISP, "CSID:%d RDI fifo over flow",
 				csid_hw->hw_intf->hw_idx);
 			/*Stop RDI path immediately */
 			cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
@@ -2401,7 +2446,7 @@
 		}
 	}
 
-	CDBG("%s:%d:IRQ Handling exit\n", __func__, __LINE__);
+	CAM_DBG(CAM_ISP, "IRQ Handling exit");
 	return IRQ_HANDLED;
 }
 
@@ -2416,8 +2461,7 @@
 	struct cam_ife_csid_hw               *ife_csid_hw = NULL;
 
 	if (csid_idx >= CAM_IFE_CSID_HW_RES_MAX) {
-		pr_err("%s:%d: Invalid csid index:%d\n", __func__, __LINE__,
-			csid_idx);
+		CAM_ERR(CAM_ISP, "Invalid csid index:%d", csid_idx);
 		return rc;
 	}
 
@@ -2427,7 +2471,7 @@
 	ife_csid_hw->hw_intf = csid_hw_intf;
 	ife_csid_hw->hw_info = csid_hw_info;
 
-	CDBG("%s:%d: type %d index %d\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "type %d index %d",
 		ife_csid_hw->hw_intf->hw_type, csid_idx);
 
 
@@ -2446,8 +2490,7 @@
 	rc = cam_ife_csid_init_soc_resources(&ife_csid_hw->hw_info->soc_info,
 			cam_ife_csid_irq, ife_csid_hw);
 	if (rc < 0) {
-		pr_err("%s:%d:CSID:%d Failed to init_soc\n", __func__, __LINE__,
-			csid_idx);
+		CAM_ERR(CAM_ISP, "CSID:%d Failed to init_soc", csid_idx);
 		goto err;
 	}
 
@@ -2539,7 +2582,7 @@
 	uint32_t i;
 
 	if (!ife_csid_hw) {
-		pr_err("%s:%d: Invalid param\n", __func__, __LINE__);
+		CAM_ERR(CAM_ISP, "Invalid param");
 		return rc;
 	}
 
@@ -2553,8 +2596,7 @@
 	for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++)
 		kfree(ife_csid_hw->cid_res[i].res_priv);
 
+	cam_ife_csid_deinit_soc_resources(&ife_csid_hw->hw_info->soc_info);
 
 	return 0;
 }
-
-
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
index 60e184b..ef585c3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
@@ -354,6 +354,7 @@
  * @dt :            Data type number
  * @cid             cid number, it is same as DT_ID number in HW
  * @decode_fmt:     input decode format
+ * @output_fmt:     output resource format, needed for RDI resource
  * @crop_enable:    crop is enable or disabled, if enabled
  *                  then remaining parameters are valid.
  * @start_pixel:    start pixel
@@ -373,6 +374,7 @@
 	uint32_t                        dt;
 	uint32_t                        cid;
 	uint32_t                        decode_fmt;
+	uint32_t                        output_fmt;
 	bool                            crop_enable;
 	uint32_t                        start_pixel;
 	uint32_t                        width;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c
index 003d83f..5a57046 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c
@@ -16,9 +16,7 @@
 #include "cam_ife_csid_core.h"
 #include "cam_ife_csid_dev.h"
 #include "cam_ife_csid_hw_intf.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 static struct cam_hw_intf *cam_ife_csid_hw_list[CAM_IFE_CSID_HW_RES_MAX] = {
 	0, 0, 0, 0};
@@ -34,7 +32,7 @@
 	uint32_t                        csid_dev_idx;
 	int                             rc = 0;
 
-	CDBG("%s:%d probe called\n", __func__, __LINE__);
+	CAM_DBG(CAM_ISP, "probe called");
 
 	csid_hw_intf = kzalloc(sizeof(*csid_hw_intf), GFP_KERNEL);
 	if (!csid_hw_intf) {
@@ -60,8 +58,7 @@
 	match_dev = of_match_device(pdev->dev.driver->of_match_table,
 		&pdev->dev);
 	if (!match_dev) {
-		pr_err("%s:%d No matching table for the IFE CSID HW!\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_ISP, "No matching table for the IFE CSID HW!");
 		rc = -EINVAL;
 		goto free_dev;
 	}
@@ -72,6 +69,7 @@
 
 	csid_hw_info->core_info = csid_dev;
 	csid_hw_info->soc_info.pdev = pdev;
+	csid_hw_info->soc_info.index = csid_dev_idx;
 
 	csid_hw_data = (struct cam_ife_csid_hw_info  *)match_dev->data;
 	/* need to setup the pdev before call the ife hw probe init */
@@ -82,7 +80,7 @@
 		goto free_dev;
 
 	platform_set_drvdata(pdev, csid_dev);
-	CDBG("%s:%d CSID:%d probe successful\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "CSID:%d probe successful",
 		csid_hw_intf->hw_idx);
 
 
@@ -113,7 +111,7 @@
 	csid_hw_intf = csid_dev->hw_intf;
 	csid_hw_info = csid_dev->hw_info;
 
-	CDBG("%s:%d CSID:%d remove\n", __func__, __LINE__,
+	CAM_DBG(CAM_ISP, "CSID:%d remove",
 		csid_dev->hw_intf->hw_idx);
 
 	cam_ife_csid_hw_deinit(csid_dev);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c
index 4ed4da5..36c6df0 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.c
@@ -15,7 +15,6 @@
 #include "cam_ife_csid_core.h"
 #include "cam_ife_csid_dev.h"
 
-
 #define CAM_CSID_LITE_DRV_NAME                    "csid_lite_170"
 #define CAM_CSID_LITE_VERSION_V170                 0x10070000
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
index c718bba..72050aa 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
@@ -9,11 +9,10 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-
+#include <linux/slab.h>
 #include "cam_ife_csid_soc.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
 
 static int cam_ife_csid_get_dt_properties(struct cam_hw_soc_info *soc_info)
 {
@@ -50,6 +49,14 @@
 	irq_handler_t csid_irq_handler, void *irq_data)
 {
 	int rc = 0;
+	struct cam_cpas_register_params   cpas_register_param;
+	struct cam_csid_soc_private      *soc_private;
+
+	soc_private = kzalloc(sizeof(struct cam_csid_soc_private), GFP_KERNEL);
+	if (!soc_private)
+		return -ENOMEM;
+
+	soc_info->soc_private = soc_private;
 
 	rc = cam_ife_csid_get_dt_properties(soc_info);
 	if (rc < 0)
@@ -59,36 +66,117 @@
 
 	rc = cam_ife_csid_request_platform_resource(soc_info, csid_irq_handler,
 		irq_data);
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP,
+			"Error Request platform resources failed rc=%d", rc);
+		goto free_soc_private;
+	}
+
+	memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+	strlcpy(cpas_register_param.identifier, "csid",
+		CAM_HW_IDENTIFIER_LENGTH);
+	cpas_register_param.cell_index = soc_info->index;
+	cpas_register_param.dev = &soc_info->pdev->dev;
+	rc = cam_cpas_register_client(&cpas_register_param);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc);
+		goto release_soc;
+	} else {
+		soc_private->cpas_handle = cpas_register_param.client_handle;
+	}
+
+	return rc;
+
+release_soc:
+	cam_soc_util_release_platform_resource(soc_info);
+free_soc_private:
+	kfree(soc_private);
+
+	return rc;
+}
+
+int cam_ife_csid_deinit_soc_resources(
+	struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+	struct cam_csid_soc_private       *soc_private;
+
+	soc_private = soc_info->soc_private;
+	if (!soc_private) {
+		CAM_ERR(CAM_ISP, "Error soc_private NULL");
+		return -ENODEV;
+	}
+
+	rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_ISP, "CPAS unregistration failed rc=%d", rc);
+
+	rc = cam_soc_util_release_platform_resource(soc_info);
 	if (rc < 0)
 		return rc;
 
-	CDBG("%s: mem_base is 0x%llx\n", __func__,
-		(uint64_t) soc_info->reg_map[0].mem_base);
-
 	return rc;
 }
 
 int cam_ife_csid_enable_soc_resources(struct cam_hw_soc_info *soc_info)
 {
 	int rc = 0;
+	struct cam_csid_soc_private       *soc_private;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+
+	soc_private = soc_info->soc_private;
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SVS_VOTE;
+	axi_vote.compressed_bw = 640000000;
+	axi_vote.uncompressed_bw = 640000000;
+
+	CAM_DBG(CAM_ISP, "csid vote compressed_bw:%lld uncompressed_bw:%lld",
+		axi_vote.compressed_bw, axi_vote.uncompressed_bw);
+
+	rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error CPAS start failed");
+		rc = -EFAULT;
+		goto end;
+	}
 
 	rc = cam_soc_util_enable_platform_resource(soc_info, true,
 		CAM_TURBO_VOTE, true);
 	if (rc) {
-		pr_err("%s: enable platform failed\n", __func__);
-		return rc;
+		CAM_ERR(CAM_ISP, "enable platform failed");
+		goto stop_cpas;
 	}
 
 	return rc;
+
+stop_cpas:
+	cam_cpas_stop(soc_private->cpas_handle);
+end:
+	return rc;
 }
 
 int cam_ife_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info)
 {
 	int rc = 0;
+	struct cam_csid_soc_private       *soc_private;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error Invalid params");
+		return -EINVAL;
+	}
+	soc_private = soc_info->soc_private;
 
 	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
 	if (rc)
-		pr_err("%s: Disable platform failed\n", __func__);
+		CAM_ERR(CAM_ISP, "Disable platform failed");
+
+	rc = cam_cpas_stop(soc_private->cpas_handle);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error CPAS stop failed rc=%d", rc);
+		return rc;
+	}
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
index 218e05a..1a30722 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
@@ -15,6 +15,19 @@
 
 #include "cam_isp_hw.h"
 
+/*
+ * struct cam_csid_soc_private:
+ *
+ * @Brief:                   Private SOC data specific to CSID HW Driver
+ *
+ * @cpas_handle:             Handle returned on registering with CPAS driver.
+ *                           This handle is used for all further interface
+ *                           with CPAS.
+ */
+struct cam_csid_soc_private {
+	uint32_t cpas_handle;
+};
+
 /**
  * struct csid_device_soc_info - CSID SOC info object
  *
@@ -38,6 +51,17 @@
 int cam_ife_csid_init_soc_resources(struct cam_hw_soc_info *soc_info,
 	irq_handler_t csid_irq_handler, void *irq_data);
 
+
+/**
+ * cam_ife_csid_deinit_soc_resources()
+ *
+ * @brief:                 csid de initialization function for the soc info
+ *
+ * @soc_info:              soc info structure pointer
+ *
+ */
+int cam_ife_csid_deinit_soc_resources(struct cam_hw_soc_info *soc_info);
+
 /**
  * cam_ife_csid_enable_soc_resources()
  *
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
index ecc6f0e..52b712a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
@@ -69,6 +69,7 @@
  *              if type is path then res id need to be filled
  * @res_id  :  res id to be reserved
  * @in_port : input port resource info
+ * @out_port: output port resource info, used for RDI path only
  * @sync_mode : Sync mode
  *              Sync mode could be master, slave or none
  * @master_idx: master device index to be configured in the slave path
@@ -83,6 +84,7 @@
 	enum cam_isp_resource_type                res_type;
 	uint32_t                                  res_id;
 	struct cam_isp_in_port_info              *in_port;
+	struct cam_isp_out_port_info             *out_port;
 	enum cam_isp_hw_sync_mode                 sync_mode;
 	uint32_t                                  master_idx;
 	uint32_t                                  cid;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
index e25d973..e330c84 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/list.h>
@@ -24,9 +22,7 @@
 #include "cam_vfe_bus.h"
 #include "cam_vfe_top.h"
 #include "cam_ife_hw_mgr.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 static const char drv_name[] = "vfe";
 
@@ -58,7 +54,7 @@
 	if (list_empty(&core_info->free_payload_list)) {
 		*evt_payload = NULL;
 		spin_unlock(&core_info->spin_lock);
-		pr_err_ratelimited("No free payload, core info 0x%x\n",
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload, core info 0x%x\n",
 			core_info->cpas_handle);
 		return -ENODEV;
 	}
@@ -78,11 +74,11 @@
 	unsigned long                       flags;
 
 	if (!core_info) {
-		pr_err("Invalid param core_info NULL");
+		CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
 		return -EINVAL;
 	}
 	if (*evt_payload == NULL) {
-		pr_err("No payload to put\n");
+		CAM_ERR(CAM_ISP, "No payload to put");
 		return -EINVAL;
 	}
 
@@ -100,9 +96,9 @@
 	struct cam_vfe_hw_core_info       *core_info = NULL;
 	int rc = 0;
 
-	CDBG("Enter\n");
+	CAM_DBG(CAM_ISP, "Enter");
 	if (!hw_priv) {
-		pr_err("%s: Invalid arguments\n", __func__);
+		CAM_ERR(CAM_ISP, "Invalid arguments");
 		return -EINVAL;
 	}
 
@@ -113,7 +109,7 @@
 			core_info->vfe_top->top_priv,
 			get_hw_cap_args, arg_size);
 
-	CDBG("Exit\n");
+	CAM_DBG(CAM_ISP, "Exit");
 	return rc;
 }
 
@@ -125,11 +121,11 @@
 
 	handler_priv = th_payload->handler_priv;
 
-	CDBG("Enter\n");
-	CDBG("IRQ status_0 = 0x%x\n", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "Enter");
+	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
 
 	if (th_payload->evt_status_arr[0] & (1<<31)) {
-		CDBG("Calling Complete for RESET CMD\n");
+		CAM_DBG(CAM_ISP, "Calling Complete for RESET CMD");
 		complete(handler_priv->reset_complete);
 
 		/*
@@ -143,7 +139,7 @@
 		rc = 0;
 	}
 
-	CDBG("Exit\n");
+	CAM_DBG(CAM_ISP, "Exit");
 	return rc;
 }
 
@@ -154,9 +150,9 @@
 	struct cam_vfe_hw_core_info       *core_info = NULL;
 	int rc = 0;
 
-	CDBG("Enter\n");
+	CAM_DBG(CAM_ISP, "Enter");
 	if (!hw_priv) {
-		pr_err("Invalid arguments\n");
+		CAM_ERR(CAM_ISP, "Invalid arguments");
 		return -EINVAL;
 	}
 
@@ -164,7 +160,7 @@
 	vfe_hw->open_count++;
 	if (vfe_hw->open_count > 1) {
 		mutex_unlock(&vfe_hw->hw_mutex);
-		CDBG("VFE has already been initialized cnt %d\n",
+		CAM_DBG(CAM_ISP, "VFE has already been initialized cnt %d",
 			vfe_hw->open_count);
 		return 0;
 	}
@@ -176,24 +172,24 @@
 	/* Turn ON Regulators, Clocks and other SOC resources */
 	rc = cam_vfe_enable_soc_resources(soc_info);
 	if (rc) {
-		pr_err("Enable SOC failed\n");
+		CAM_ERR(CAM_ISP, "Enable SOC failed");
 		rc = -EFAULT;
 		goto decrement_open_cnt;
 	}
 
-	CDBG("Enable soc done\n");
+	CAM_DBG(CAM_ISP, "Enable soc done");
 
 	rc = core_info->vfe_bus->hw_ops.init(core_info->vfe_bus->bus_priv,
 		NULL, 0);
 	if (rc) {
-		pr_err("Bus HW init Failed rc=%d\n", rc);
+		CAM_ERR(CAM_ISP, "Bus HW init Failed rc=%d", rc);
 		goto disable_soc;
 	}
 
 	/* Do HW Reset */
 	rc = cam_vfe_reset(hw_priv, NULL, 0);
 	if (rc) {
-		pr_err("Reset Failed rc=%d\n", rc);
+		CAM_ERR(CAM_ISP, "Reset Failed rc=%d", rc);
 		goto deinit_bus;
 	}
 
@@ -217,22 +213,22 @@
 	struct cam_hw_soc_info            *soc_info = NULL;
 	int rc = 0;
 
-	CDBG("Enter\n");
+	CAM_DBG(CAM_ISP, "Enter");
 	if (!hw_priv) {
-		pr_err("%s: Invalid arguments\n", __func__);
+		CAM_ERR(CAM_ISP, "Invalid arguments");
 		return -EINVAL;
 	}
 
 	mutex_lock(&vfe_hw->hw_mutex);
 	if (!vfe_hw->open_count) {
 		mutex_unlock(&vfe_hw->hw_mutex);
-		pr_err("Error! Unbalanced deinit\n");
+		CAM_ERR(CAM_ISP, "Error! Unbalanced deinit");
 		return -EFAULT;
 	}
 	vfe_hw->open_count--;
 	if (vfe_hw->open_count) {
 		mutex_unlock(&vfe_hw->hw_mutex);
-		CDBG("open_cnt non-zero =%d\n", vfe_hw->open_count);
+		CAM_DBG(CAM_ISP, "open_cnt non-zero =%d", vfe_hw->open_count);
 		return 0;
 	}
 	mutex_unlock(&vfe_hw->hw_mutex);
@@ -240,14 +236,14 @@
 	soc_info = &vfe_hw->soc_info;
 
 	/* Turn OFF Regulators, Clocks and other SOC resources */
-	CDBG("Disable SOC resource\n");
+	CAM_DBG(CAM_ISP, "Disable SOC resource");
 	rc = cam_vfe_disable_soc_resources(soc_info);
 	if (rc)
-		pr_err("Disable SOC failed\n");
+		CAM_ERR(CAM_ISP, "Disable SOC failed");
 
 	vfe_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
 
-	CDBG("Exit\n");
+	CAM_DBG(CAM_ISP, "Exit");
 	return rc;
 }
 
@@ -258,10 +254,10 @@
 	struct cam_vfe_hw_core_info       *core_info = NULL;
 	int rc;
 
-	CDBG("Enter\n");
+	CAM_DBG(CAM_ISP, "Enter");
 
 	if (!hw_priv) {
-		pr_err("Invalid input arguments\n");
+		CAM_ERR(CAM_ISP, "Invalid input arguments");
 		return -EINVAL;
 	}
 
@@ -279,28 +275,28 @@
 		top_reset_irq_reg_mask, &core_info->irq_payload,
 		cam_vfe_reset_irq_top_half, NULL, NULL, NULL);
 	if (core_info->irq_handle < 0) {
-		pr_err("subscribe irq controller failed\n");
+		CAM_ERR(CAM_ISP, "subscribe irq controller failed");
 		return -EFAULT;
 	}
 
 	reinit_completion(&vfe_hw->hw_complete);
 
-	CDBG("calling RESET\n");
+	CAM_DBG(CAM_ISP, "calling RESET");
 	core_info->vfe_top->hw_ops.reset(core_info->vfe_top->top_priv, NULL, 0);
-	CDBG("waiting for vfe reset complete\n");
+	CAM_DBG(CAM_ISP, "waiting for vfe reset complete");
 	/* Wait for Completion or Timeout of 500ms */
 	rc = wait_for_completion_timeout(&vfe_hw->hw_complete, 500);
 	if (!rc)
-		pr_err("Error! Reset Timeout\n");
+		CAM_ERR(CAM_ISP, "Error! Reset Timeout");
 
-	CDBG("reset complete done (%d)\n", rc);
+	CAM_DBG(CAM_ISP, "reset complete done (%d)", rc);
 
 	rc = cam_irq_controller_unsubscribe_irq(
 		core_info->vfe_irq_controller, core_info->irq_handle);
 	if (rc)
-		pr_err("Error! Unsubscribe failed\n");
+		CAM_ERR(CAM_ISP, "Error! Unsubscribe failed");
 
-	CDBG("Exit\n");
+	CAM_DBG(CAM_ISP, "Exit");
 	return rc;
 }
 
@@ -324,12 +320,13 @@
 
 	handler_priv = th_payload->handler_priv;
 
-	CDBG("IRQ status_0 = %x\n", th_payload->evt_status_arr[0]);
-	CDBG("IRQ status_1 = %x\n", th_payload->evt_status_arr[1]);
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
 
 	rc  = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
 	if (rc) {
-		pr_err_ratelimited("No tasklet_cmd is free in queue\n");
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue\n");
 		return rc;
 	}
 
@@ -346,14 +343,14 @@
 		evt_payload->irq_reg_val[i] = cam_io_r(handler_priv->mem_base +
 			irq_reg_offset[i]);
 	}
-	CDBG("Violation status = %x\n", evt_payload->irq_reg_val[2]);
+	CAM_DBG(CAM_ISP, "Violation status = %x", evt_payload->irq_reg_val[2]);
 
 	/*
 	 *  need to handle overflow condition here, otherwise irq storm
 	 *  will block everything.
 	 */
 	if (evt_payload->irq_reg_val[1]) {
-		pr_err("Mask all the interrupts\n");
+		CAM_ERR(CAM_ISP, "Mask all the interrupts");
 		cam_io_w(0, handler_priv->mem_base + 0x60);
 		cam_io_w(0, handler_priv->mem_base + 0x5C);
 
@@ -362,7 +359,7 @@
 
 	th_payload->evt_payload_priv = evt_payload;
 
-	CDBG("Exit\n");
+	CAM_DBG(CAM_ISP, "Exit");
 	return rc;
 }
 
@@ -376,7 +373,7 @@
 
 	if (!hw_priv || !reserve_args || (arg_size !=
 		sizeof(struct cam_vfe_acquire_args))) {
-		pr_err("Invalid input arguments\n");
+		CAM_ERR(CAM_ISP, "Invalid input arguments");
 		return -EINVAL;
 	}
 	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
@@ -393,7 +390,7 @@
 			core_info->vfe_bus->bus_priv, acquire,
 			sizeof(*acquire));
 	else
-		pr_err("Invalid res type:%d\n", acquire->rsrc_type);
+		CAM_ERR(CAM_ISP, "Invalid res type:%d", acquire->rsrc_type);
 
 	mutex_unlock(&vfe_hw->hw_mutex);
 
@@ -410,7 +407,7 @@
 
 	if (!hw_priv || !release_args ||
 		(arg_size != sizeof(struct cam_isp_resource_node))) {
-		pr_err("Invalid input arguments\n");
+		CAM_ERR(CAM_ISP, "Invalid input arguments");
 		return -EINVAL;
 	}
 
@@ -427,7 +424,7 @@
 			core_info->vfe_bus->bus_priv, isp_res,
 			sizeof(*isp_res));
 	else
-		pr_err("Invalid res type:%d\n", isp_res->res_type);
+		CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
 
 	mutex_unlock(&vfe_hw->hw_mutex);
 
@@ -444,7 +441,7 @@
 
 	if (!hw_priv || !start_args ||
 		(arg_size != sizeof(struct cam_isp_resource_node))) {
-		pr_err("Invalid input arguments\n");
+		CAM_ERR(CAM_ISP, "Invalid input arguments");
 		return -EINVAL;
 	}
 
@@ -473,11 +470,12 @@
 				core_info->vfe_top->top_priv, isp_res,
 				sizeof(struct cam_isp_resource_node));
 		else
-			pr_err("Error! subscribe irq controller failed\n");
+			CAM_ERR(CAM_ISP,
+				"Error! subscribe irq controller failed");
 	} else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
 		rc = core_info->vfe_bus->hw_ops.start(isp_res, NULL, 0);
 	} else {
-		pr_err("Invalid res type:%d\n", isp_res->res_type);
+		CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
 	}
 
 	mutex_unlock(&vfe_hw->hw_mutex);
@@ -494,7 +492,7 @@
 
 	if (!hw_priv || !stop_args ||
 		(arg_size != sizeof(struct cam_isp_resource_node))) {
-		pr_err("Invalid input arguments\n");
+		CAM_ERR(CAM_ISP, "Invalid input arguments");
 		return -EINVAL;
 	}
 
@@ -513,7 +511,7 @@
 			core_info->vfe_irq_controller, isp_res->irq_handle);
 		rc = core_info->vfe_bus->hw_ops.stop(isp_res, NULL, 0);
 	} else {
-		pr_err("Invalid res type:%d\n", isp_res->res_type);
+		CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
 	}
 
 	mutex_unlock(&vfe_hw->hw_mutex);
@@ -541,7 +539,7 @@
 	int rc = 0;
 
 	if (!hw_priv) {
-		pr_err("Invalid arguments\n");
+		CAM_ERR(CAM_ISP, "Invalid arguments");
 		return -EINVAL;
 	}
 
@@ -564,7 +562,7 @@
 		break;
 
 	default:
-		pr_err("Invalid cmd type:%d\n", cmd_type);
+		CAM_ERR(CAM_ISP, "Invalid cmd type:%d", cmd_type);
 		rc = -EINVAL;
 		break;
 	}
@@ -594,13 +592,13 @@
 	int rc = -EINVAL;
 	int i;
 
-	CDBG("Enter");
+	CAM_DBG(CAM_ISP, "Enter");
 
 	rc = cam_irq_controller_init(drv_name,
 		CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX),
 		vfe_hw_info->irq_reg_info, &core_info->vfe_irq_controller);
 	if (rc) {
-		pr_err("Error! cam_irq_controller_init failed\n");
+		CAM_ERR(CAM_ISP, "Error! cam_irq_controller_init failed");
 		return rc;
 	}
 
@@ -608,7 +606,7 @@
 		soc_info, hw_intf, vfe_hw_info->top_hw_info,
 		&core_info->vfe_top);
 	if (rc) {
-		pr_err("Error! cam_vfe_top_init failed\n");
+		CAM_ERR(CAM_ISP, "Error! cam_vfe_top_init failed");
 		goto deinit_controller;
 	}
 
@@ -616,7 +614,7 @@
 		vfe_hw_info->bus_hw_info, core_info->vfe_irq_controller,
 		&core_info->vfe_bus);
 	if (rc) {
-		pr_err("Error! cam_vfe_bus_init failed\n");
+		CAM_ERR(CAM_ISP, "Error! cam_vfe_bus_init failed");
 		goto deinit_top;
 	}
 
@@ -657,16 +655,17 @@
 	rc = cam_vfe_bus_deinit(vfe_hw_info->bus_version,
 		&core_info->vfe_bus);
 	if (rc)
-		pr_err("Error cam_vfe_bus_deinit failed rc=%d\n", rc);
+		CAM_ERR(CAM_ISP, "Error cam_vfe_bus_deinit failed rc=%d", rc);
 
 	rc = cam_vfe_top_deinit(vfe_hw_info->top_version,
 		&core_info->vfe_top);
 	if (rc)
-		pr_err("Error cam_vfe_top_deinit failed rc=%d\n", rc);
+		CAM_ERR(CAM_ISP, "Error cam_vfe_top_deinit failed rc=%d", rc);
 
 	rc = cam_irq_controller_deinit(&core_info->vfe_irq_controller);
 	if (rc)
-		pr_err("Error cam_irq_controller_deinit failed rc=%d\n", rc);
+		CAM_ERR(CAM_ISP,
+			"Error cam_irq_controller_deinit failed rc=%d", rc);
 
 	spin_unlock_irqrestore(&core_info->spin_lock, flags);
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c
index cdb8d6f..3e2307c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c
@@ -10,7 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
 
 #include <linux/slab.h>
 #include <linux/mod_devicetable.h>
@@ -18,9 +17,7 @@
 #include "cam_vfe_dev.h"
 #include "cam_vfe_core.h"
 #include "cam_vfe_soc.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 static struct cam_hw_intf *cam_vfe_hw_list[CAM_VFE_HW_NUM_MAX] = {0, 0, 0, 0};
 
@@ -62,14 +59,15 @@
 	vfe_hw_intf->hw_ops.process_cmd = cam_vfe_process_cmd;
 	vfe_hw_intf->hw_type = CAM_ISP_HW_TYPE_VFE;
 
-	CDBG("type %d index %d\n", vfe_hw_intf->hw_type, vfe_hw_intf->hw_idx);
+	CAM_DBG(CAM_ISP, "type %d index %d",
+		vfe_hw_intf->hw_type, vfe_hw_intf->hw_idx);
 
 	platform_set_drvdata(pdev, vfe_hw_intf);
 
 	vfe_hw->core_info = kzalloc(sizeof(struct cam_vfe_hw_core_info),
 		GFP_KERNEL);
 	if (!vfe_hw->core_info) {
-		CDBG("Failed to alloc for core\n");
+		CAM_DBG(CAM_ISP, "Failed to alloc for core");
 		rc = -ENOMEM;
 		goto free_vfe_hw;
 	}
@@ -78,7 +76,7 @@
 	match_dev = of_match_device(pdev->dev.driver->of_match_table,
 		&pdev->dev);
 	if (!match_dev) {
-		pr_err("Of_match Failed\n");
+		CAM_ERR(CAM_ISP, "Of_match Failed");
 		rc = -EINVAL;
 		goto free_core_info;
 	}
@@ -88,14 +86,14 @@
 	rc = cam_vfe_init_soc_resources(&vfe_hw->soc_info, cam_vfe_irq,
 		vfe_hw);
 	if (rc < 0) {
-		pr_err("Failed to init soc rc=%d\n", rc);
+		CAM_ERR(CAM_ISP, "Failed to init soc rc=%d", rc);
 		goto free_core_info;
 	}
 
 	rc = cam_vfe_core_init(core_info, &vfe_hw->soc_info,
 		vfe_hw_intf, hw_info);
 	if (rc < 0) {
-		pr_err("Failed to init core rc=%d\n", rc);
+		CAM_ERR(CAM_ISP, "Failed to init core rc=%d", rc);
 		goto deinit_soc;
 	}
 
@@ -110,13 +108,13 @@
 	cam_vfe_init_hw(vfe_hw, NULL, 0);
 	cam_vfe_deinit_hw(vfe_hw, NULL, 0);
 
-	CDBG("VFE%d probe successful\n", vfe_hw_intf->hw_idx);
+	CAM_DBG(CAM_ISP, "VFE%d probe successful", vfe_hw_intf->hw_idx);
 
 	return rc;
 
 deinit_soc:
 	if (cam_vfe_deinit_soc_resources(&vfe_hw->soc_info))
-		pr_err("Failed to deinit soc\n");
+		CAM_ERR(CAM_ISP, "Failed to deinit soc");
 free_core_info:
 	kfree(vfe_hw->core_info);
 free_vfe_hw:
@@ -136,44 +134,45 @@
 
 	vfe_hw_intf = platform_get_drvdata(pdev);
 	if (!vfe_hw_intf) {
-		pr_err("Error! No data in pdev\n");
+		CAM_ERR(CAM_ISP, "Error! No data in pdev");
 		return -EINVAL;
 	}
 
-	CDBG("type %d index %d\n", vfe_hw_intf->hw_type, vfe_hw_intf->hw_idx);
+	CAM_DBG(CAM_ISP, "type %d index %d",
+		vfe_hw_intf->hw_type, vfe_hw_intf->hw_idx);
 
 	if (vfe_hw_intf->hw_idx < CAM_VFE_HW_NUM_MAX)
 		cam_vfe_hw_list[vfe_hw_intf->hw_idx] = NULL;
 
 	vfe_hw = vfe_hw_intf->hw_priv;
 	if (!vfe_hw) {
-		pr_err("Error! HW data is NULL\n");
+		CAM_ERR(CAM_ISP, "Error! HW data is NULL");
 		rc = -ENODEV;
 		goto free_vfe_hw_intf;
 	}
 
 	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
 	if (!core_info) {
-		pr_err("Error! core data NULL");
+		CAM_ERR(CAM_ISP, "Error! core data NULL");
 		rc = -EINVAL;
 		goto deinit_soc;
 	}
 
 	rc = cam_vfe_core_deinit(core_info, core_info->vfe_hw_info);
 	if (rc < 0)
-		pr_err("Failed to deinit core rc=%d\n", rc);
+		CAM_ERR(CAM_ISP, "Failed to deinit core rc=%d", rc);
 
 	kfree(vfe_hw->core_info);
 
 deinit_soc:
 	rc = cam_vfe_deinit_soc_resources(&vfe_hw->soc_info);
 	if (rc < 0)
-		pr_err("Failed to deinit soc rc=%d\n", rc);
+		CAM_ERR(CAM_ISP, "Failed to deinit soc rc=%d", rc);
 
 	mutex_destroy(&vfe_hw->hw_mutex);
 	kfree(vfe_hw);
 
-	CDBG("VFE%d remove successful\n", vfe_hw_intf->hw_idx);
+	CAM_DBG(CAM_ISP, "VFE%d remove successful", vfe_hw_intf->hw_idx);
 
 free_vfe_hw_intf:
 	kfree(vfe_hw_intf);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
index fa9d86b..3b2ead2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
@@ -10,14 +10,10 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
 #include <linux/slab.h>
 #include "cam_cpas_api.h"
 #include "cam_vfe_soc.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 static int cam_vfe_get_dt_properties(struct cam_hw_soc_info *soc_info)
 {
@@ -25,7 +21,7 @@
 
 	rc = cam_soc_util_get_dt_properties(soc_info);
 	if (rc) {
-		pr_err("Error! get DT properties failed rc=%d\n", rc);
+		CAM_ERR(CAM_ISP, "Error! get DT properties failed rc=%d", rc);
 		return rc;
 	}
 
@@ -41,7 +37,8 @@
 	rc = cam_soc_util_request_platform_resource(soc_info, vfe_irq_handler,
 		irq_data);
 	if (rc)
-		pr_err("Error! Request platform resource failed rc=%d\n", rc);
+		CAM_ERR(CAM_ISP,
+			"Error! Request platform resource failed rc=%d", rc);
 
 	return rc;
 }
@@ -52,7 +49,8 @@
 
 	rc = cam_soc_util_release_platform_resource(soc_info);
 	if (rc)
-		pr_err("Error! Release platform resource failed rc=%d\n", rc);
+		CAM_ERR(CAM_ISP,
+			"Error! Release platform resource failed rc=%d", rc);
 
 	return rc;
 }
@@ -67,21 +65,22 @@
 	soc_private = kzalloc(sizeof(struct cam_vfe_soc_private),
 		GFP_KERNEL);
 	if (!soc_private) {
-		CDBG("Error! soc_private Alloc Failed\n");
+		CAM_DBG(CAM_ISP, "Error! soc_private Alloc Failed");
 		return -ENOMEM;
 	}
 	soc_info->soc_private = soc_private;
 
 	rc = cam_vfe_get_dt_properties(soc_info);
 	if (rc < 0) {
-		pr_err("Error! Get DT properties failed rc=%d\n", rc);
+		CAM_ERR(CAM_ISP, "Error! Get DT properties failed rc=%d", rc);
 		goto free_soc_private;
 	}
 
 	rc = cam_vfe_request_platform_resource(soc_info, vfe_irq_handler,
 		irq_data);
 	if (rc < 0) {
-		pr_err("Error! Request platform resources failed rc=%d\n", rc);
+		CAM_ERR(CAM_ISP,
+			"Error! Request platform resources failed rc=%d", rc);
 		goto free_soc_private;
 	}
 
@@ -92,7 +91,7 @@
 	cpas_register_param.dev = &soc_info->pdev->dev;
 	rc = cam_cpas_register_client(&cpas_register_param);
 	if (rc) {
-		pr_err("CPAS registration failed rc=%d\n", rc);
+		CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc);
 		goto release_soc;
 	} else {
 		soc_private->cpas_handle = cpas_register_param.client_handle;
@@ -114,23 +113,24 @@
 	struct cam_vfe_soc_private       *soc_private;
 
 	if (!soc_info) {
-		pr_err("Error! soc_info NULL\n");
+		CAM_ERR(CAM_ISP, "Error! soc_info NULL");
 		return -ENODEV;
 	}
 
 	soc_private = soc_info->soc_private;
 	if (!soc_private) {
-		pr_err("Error! soc_private NULL\n");
+		CAM_ERR(CAM_ISP, "Error! soc_private NULL");
 		return -ENODEV;
 	}
 
 	rc = cam_cpas_unregister_client(soc_private->cpas_handle);
 	if (rc)
-		pr_err("CPAS unregistration failed rc=%d\n", rc);
+		CAM_ERR(CAM_ISP, "CPAS unregistration failed rc=%d", rc);
 
 	rc = cam_vfe_release_platform_resource(soc_info);
 	if (rc < 0)
-		pr_err("Error! Release platform resources failed rc=%d\n", rc);
+		CAM_ERR(CAM_ISP,
+			"Error! Release platform resources failed rc=%d", rc);
 
 	kfree(soc_private);
 
@@ -145,7 +145,7 @@
 	struct cam_axi_vote               axi_vote;
 
 	if (!soc_info) {
-		pr_err("Error! Invalid params\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid params");
 		rc = -EINVAL;
 		goto end;
 	}
@@ -159,7 +159,7 @@
 
 	rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
 	if (rc) {
-		pr_err("Error! CPAS start failed rc=%d\n", rc);
+		CAM_ERR(CAM_ISP, "Error! CPAS start failed rc=%d", rc);
 		rc = -EFAULT;
 		goto end;
 	}
@@ -167,7 +167,7 @@
 	rc = cam_soc_util_enable_platform_resource(soc_info, true,
 		CAM_TURBO_VOTE, true);
 	if (rc) {
-		pr_err("Error! enable platform failed rc=%d\n", rc);
+		CAM_ERR(CAM_ISP, "Error! enable platform failed rc=%d", rc);
 		goto stop_cpas;
 	}
 
@@ -186,7 +186,7 @@
 	struct cam_vfe_soc_private       *soc_private;
 
 	if (!soc_info) {
-		pr_err("Error! Invalid params\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid params");
 		rc = -EINVAL;
 		return rc;
 	}
@@ -194,13 +194,13 @@
 
 	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
 	if (rc) {
-		pr_err("Disable platform failed rc=%d\n", rc);
+		CAM_ERR(CAM_ISP, "Disable platform failed rc=%d", rc);
 		return rc;
 	}
 
 	rc = cam_cpas_stop(soc_private->cpas_handle);
 	if (rc) {
-		pr_err("Error! CPAS stop failed rc=%d\n", rc);
+		CAM_ERR(CAM_ISP, "Error! CPAS stop failed rc=%d", rc);
 		return rc;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.c
index 0ac5f6d..0af32ad 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.c
@@ -16,9 +16,6 @@
 #include "cam_vfe_core.h"
 #include "cam_vfe_dev.h"
 
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-
 static const struct of_device_id cam_vfe170_dt_match[] = {
 	{
 		.compatible = "qcom,vfe170",
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
index b550071..275c7b5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
@@ -117,6 +117,28 @@
 	.reg_update_cmd           = 0x000004AC,
 };
 
+static struct cam_vfe_rdi_ver2_reg vfe170_rdi_reg = {
+	.reg_update_cmd           = 0x000004AC,
+};
+
+static struct cam_vfe_rdi_reg_data  vfe_170_rdi_0_data = {
+	.reg_update_cmd_data      = 0x2,
+	.sof_irq_mask             = 0x8000000,
+	.reg_update_irq_mask      = 0x20,
+};
+
+static struct cam_vfe_rdi_reg_data  vfe_170_rdi_1_data = {
+	.reg_update_cmd_data      = 0x4,
+	.sof_irq_mask             = 0x10000000,
+	.reg_update_irq_mask      = 0x40,
+};
+
+static struct cam_vfe_rdi_reg_data  vfe_170_rdi_2_data = {
+	.reg_update_cmd_data      = 0x8,
+	.sof_irq_mask             = 0x20000000,
+	.reg_update_irq_mask      = 0x80,
+};
+
 static struct cam_vfe_top_ver2_hw_info vfe170_top_hw_info = {
 	.common_reg = &vfe170_top_common_reg,
 	.camif_hw_info = {
@@ -124,6 +146,16 @@
 		.camif_reg =  &vfe170_camif_reg,
 		.reg_data  =  &vfe_170_camif_reg_data,
 		},
+	.rdi_hw_info = {
+		.common_reg = &vfe170_top_common_reg,
+		.rdi_reg    = &vfe170_rdi_reg,
+		.reg_data = {
+			&vfe_170_rdi_0_data,
+			&vfe_170_rdi_1_data,
+			&vfe_170_rdi_2_data,
+			NULL,
+			},
+		},
 	.mux_type = {
 		CAM_VFE_CAMIF_VER_2_0,
 		CAM_VFE_RDI_VER_1_0,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c
index 63ca5c2..c6c3272 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c
@@ -10,11 +10,10 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
 #include "cam_vfe_bus.h"
 #include "cam_vfe_bus_ver1.h"
 #include "cam_vfe_bus_ver2.h"
+#include "cam_debug_util.h"
 
 int cam_vfe_bus_init(uint32_t          bus_version,
 	struct cam_hw_soc_info        *soc_info,
@@ -31,7 +30,7 @@
 			vfe_irq_controller, vfe_bus);
 		break;
 	default:
-		pr_err("Unsupported Bus Version %x\n", bus_version);
+		CAM_ERR(CAM_ISP, "Unsupported Bus Version %x", bus_version);
 		break;
 	}
 
@@ -48,7 +47,7 @@
 		rc = cam_vfe_bus_ver2_deinit(vfe_bus);
 		break;
 	default:
-		pr_err("Unsupported Bus Version %x\n", bus_version);
+		CAM_ERR(CAM_ISP, "Unsupported Bus Version %x", bus_version);
 		break;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index 489689c..f37ec38 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
 #include <linux/ratelimit.h>
 #include <linux/slab.h>
 #include "cam_io_util.h"
@@ -24,9 +22,7 @@
 #include "cam_vfe_bus.h"
 #include "cam_vfe_bus_ver2.h"
 #include "cam_vfe_core.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 static const char drv_name[] = "vfe_bus";
 
@@ -37,6 +33,9 @@
 
 #define CAM_VFE_BUS_VER2_PAYLOAD_MAX             256
 
+#define CAM_VFE_RDI_BUS_DEFAULT_WIDTH           0xFF01
+#define CAM_VFE_RDI_BUS_DEFAULT_STRIDE          0xFF01
+
 #define MAX_BUF_UPDATE_REG_NUM   \
 	(sizeof(struct cam_vfe_bus_ver2_reg_offset_bus_client)/4)
 #define MAX_REG_VAL_PAIR_SIZE    \
@@ -174,7 +173,7 @@
 {
 	if (list_empty(&common_data->free_payload_list)) {
 		*evt_payload = NULL;
-		pr_err("No free payload\n");
+		CAM_ERR(CAM_ISP, "No free payload");
 		return -ENODEV;
 	}
 
@@ -192,11 +191,11 @@
 	uint32_t   status_reg0, status_reg1, status_reg2;
 
 	if (!core_info) {
-		pr_err("Invalid param core_info NULL");
+		CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
 		return -EINVAL;
 	}
 	if (*evt_payload == NULL) {
-		pr_err("No payload to put\n");
+		CAM_ERR(CAM_ISP, "No payload to put");
 		return -EINVAL;
 	}
 
@@ -206,7 +205,7 @@
 	status_reg2 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS2];
 
 	if (status_reg0 || status_reg1 || status_reg2) {
-		CDBG("status0 0x%x status1 0x%x status2 0x%x\n",
+		CAM_DBG(CAM_ISP, "status0 0x%x status1 0x%x status2 0x%x",
 			status_reg0, status_reg1, status_reg2);
 		return 0;
 	}
@@ -229,7 +228,8 @@
 	*intra_client_mask = 0;
 
 	if (dual_slave_core == current_core) {
-		pr_err("Invalid params. Same core as Master and Slave\n");
+		CAM_ERR(CAM_ISP,
+			"Invalid params. Same core as Master and Slave");
 		return -EINVAL;
 	}
 
@@ -243,7 +243,7 @@
 			*intra_client_mask = 0x2;
 			break;
 		default:
-			pr_err("Invalid value for slave core %u\n",
+			CAM_ERR(CAM_ISP, "Invalid value for slave core %u",
 				dual_slave_core);
 			rc = -EINVAL;
 			break;
@@ -258,7 +258,7 @@
 			*intra_client_mask = 0x2;
 			break;
 		default:
-			pr_err("Invalid value for slave core %u\n",
+			CAM_ERR(CAM_ISP, "Invalid value for slave core %u",
 				dual_slave_core);
 			rc = -EINVAL;
 			break;
@@ -273,14 +273,15 @@
 			*intra_client_mask = 0x2;
 			break;
 		default:
-			pr_err("Invalid value for slave core %u\n",
+			CAM_ERR(CAM_ISP, "Invalid value for slave core %u",
 				dual_slave_core);
 			rc = -EINVAL;
 			break;
 		}
 		break;
 	default:
-		pr_err("Invalid value for master core %u\n", current_core);
+		CAM_ERR(CAM_ISP,
+			"Invalid value for master core %u", current_core);
 		rc = -EINVAL;
 		break;
 	}
@@ -348,6 +349,19 @@
 		case CAM_FORMAT_MIPI_RAW_14:
 		case CAM_FORMAT_MIPI_RAW_16:
 		case CAM_FORMAT_MIPI_RAW_20:
+		case CAM_FORMAT_DPCM_10_6_10:
+		case CAM_FORMAT_DPCM_10_8_10:
+		case CAM_FORMAT_DPCM_12_6_12:
+		case CAM_FORMAT_DPCM_12_8_12:
+		case CAM_FORMAT_DPCM_14_8_14:
+		case CAM_FORMAT_DPCM_14_10_14:
+		case CAM_FORMAT_PLAIN8:
+		case CAM_FORMAT_PLAIN16_8:
+		case CAM_FORMAT_PLAIN16_10:
+		case CAM_FORMAT_PLAIN16_12:
+		case CAM_FORMAT_PLAIN16_14:
+		case CAM_FORMAT_PLAIN16_16:
+		case CAM_FORMAT_PLAIN32_20:
 		case CAM_FORMAT_PLAIN128:
 			return 1;
 		default:
@@ -442,7 +456,8 @@
 		break;
 	}
 
-	pr_err("Unsupported format %u for resource_type %u", format, res_type);
+	CAM_ERR(CAM_ISP, "Unsupported format %u for resource_type %u",
+		format, res_type);
 
 	return -EINVAL;
 }
@@ -636,8 +651,6 @@
 	case CAM_FORMAT_NV21:
 	case CAM_FORMAT_NV12:
 		return PACKER_FMT_PLAIN_8_LSB_MSB_10;
-	case CAM_FORMAT_PLAIN16_16:
-		return PACKER_FMT_PLAIN_16_16BPP;
 	case CAM_FORMAT_PLAIN64:
 		return PACKER_FMT_PLAIN_64;
 	case CAM_FORMAT_MIPI_RAW_6:
@@ -652,6 +665,13 @@
 	case CAM_FORMAT_QTI_RAW_12:
 	case CAM_FORMAT_QTI_RAW_14:
 	case CAM_FORMAT_PLAIN128:
+	case CAM_FORMAT_PLAIN8:
+	case CAM_FORMAT_PLAIN16_8:
+	case CAM_FORMAT_PLAIN16_10:
+	case CAM_FORMAT_PLAIN16_12:
+	case CAM_FORMAT_PLAIN16_14:
+	case CAM_FORMAT_PLAIN16_16:
+	case CAM_FORMAT_PLAIN32_20:
 	case CAM_FORMAT_PD8:
 	case CAM_FORMAT_PD10:
 		return PACKER_FMT_PLAIN_128;
@@ -682,7 +702,7 @@
 	/* No need to allocate for BUS VER2. VFE OUT to WM is fixed. */
 	wm_idx = cam_vfe_bus_get_wm_idx(vfe_out_res_id, plane);
 	if (wm_idx < 0 || wm_idx >= CAM_VFE_BUS_VER2_MAX_CLIENTS) {
-		pr_err("Unsupported VFE out %d plane %d\n",
+		CAM_ERR(CAM_ISP, "Unsupported VFE out %d plane %d",
 			vfe_out_res_id, plane);
 		return -EINVAL;
 	}
@@ -701,8 +721,10 @@
 	rsrc_data->height = out_port_info->height;
 
 	if (rsrc_data->index < 3) {
-		rsrc_data->width = rsrc_data->width * 5/4 * rsrc_data->height;
-		rsrc_data->height = 1;
+		rsrc_data->width = CAM_VFE_RDI_BUS_DEFAULT_WIDTH;
+		rsrc_data->height = 0;
+		rsrc_data->stride = CAM_VFE_RDI_BUS_DEFAULT_STRIDE;
+		rsrc_data->pack_fmt = 0x0;
 		rsrc_data->en_cfg = 0x3;
 	} else if (rsrc_data->index < 5 ||
 		rsrc_data->index == 7 || rsrc_data->index == 8) {
@@ -735,7 +757,7 @@
 			}
 			break;
 		default:
-			pr_err("Invalid plane type %d\n", plane);
+			CAM_ERR(CAM_ISP, "Invalid plane type %d", plane);
 			return -EINVAL;
 		}
 		rsrc_data->en_cfg = 0x1;
@@ -807,7 +829,6 @@
 
 	cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->header_addr);
 	cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->header_cfg);
-	cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->frame_inc);
 	cam_io_w(0xf, common_data->mem_base + rsrc_data->hw_regs->burst_limit);
 
 	cam_io_w_mb(rsrc_data->width,
@@ -817,19 +838,14 @@
 	cam_io_w(rsrc_data->pack_fmt,
 		common_data->mem_base + rsrc_data->hw_regs->packer_cfg);
 
-	cam_io_w(0xFFFFFFFF, common_data->mem_base +
-		rsrc_data->hw_regs->irq_subsample_pattern);
-	cam_io_w(0x0, common_data->mem_base +
-		rsrc_data->hw_regs->irq_subsample_period);
-
-	cam_io_w(0xFFFFFFFF,
-		common_data->mem_base + rsrc_data->hw_regs->framedrop_pattern);
-	cam_io_w(0x0,
-		common_data->mem_base + rsrc_data->hw_regs->framedrop_period);
+	/* Configure stride for RDIs */
+	if (rsrc_data->index < 3)
+		cam_io_w_mb(rsrc_data->stride, (common_data->mem_base +
+			rsrc_data->hw_regs->stride));
 
 	/* Subscribe IRQ */
 	if (rsrc_data->irq_enabled) {
-		CDBG("Subscribe WM%d IRQ\n", rsrc_data->index);
+		CAM_DBG(CAM_ISP, "Subscribe WM%d IRQ", rsrc_data->index);
 		bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG1] =
 			(1 << rsrc_data->index);
 		wm_res->irq_handle = cam_irq_controller_subscribe_irq(
@@ -839,24 +855,31 @@
 			cam_ife_mgr_do_tasklet_buf_done,
 			wm_res->tasklet_info, cam_tasklet_enqueue_cmd);
 		if (wm_res->irq_handle < 0) {
-			pr_err("Subscribe IRQ failed for WM %d\n",
+			CAM_ERR(CAM_ISP, "Subscribe IRQ failed for WM %d",
 				rsrc_data->index);
 			return -EFAULT;
 		}
 	}
 
+	/* enable ubwc if needed*/
+	if (rsrc_data->en_ubwc) {
+		cam_io_w_mb(0x1, common_data->mem_base +
+			rsrc_data->hw_regs->ubwc_regs->mode_cfg);
+	}
+
 	/* Enable WM */
 	cam_io_w_mb(rsrc_data->en_cfg, common_data->mem_base +
 		rsrc_data->hw_regs->cfg);
 
-	CDBG("WM res %d width = %d, height = %d\n", rsrc_data->index,
+	CAM_DBG(CAM_ISP, "WM res %d width = %d, height = %d", rsrc_data->index,
 		rsrc_data->width, rsrc_data->height);
-	CDBG("WM res %d pk_fmt = %d\n", rsrc_data->index,
+	CAM_DBG(CAM_ISP, "WM res %d pk_fmt = %d", rsrc_data->index,
 		rsrc_data->pack_fmt & PACKER_FMT_MAX);
-	CDBG("WM res %d stride = %d, burst len = %d\n",
+	CAM_DBG(CAM_ISP, "WM res %d stride = %d, burst len = %d",
 		rsrc_data->index, rsrc_data->stride, 0xf);
-	CDBG("enable WM res %d offset 0x%x val 0x%x\n", rsrc_data->index,
-		(uint32_t) rsrc_data->hw_regs->cfg, rsrc_data->en_cfg);
+	CAM_DBG(CAM_ISP, "enable WM res %d offset 0x%x val 0x%x",
+		rsrc_data->index, (uint32_t) rsrc_data->hw_regs->cfg,
+		rsrc_data->en_cfg);
 
 	wm_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 
@@ -875,7 +898,7 @@
 	cam_io_w_mb(0x0,
 		common_data->mem_base + rsrc_data->hw_regs->cfg);
 
-	CDBG("irq_enabled %d", rsrc_data->irq_enabled);
+	CAM_DBG(CAM_ISP, "irq_enabled %d", rsrc_data->irq_enabled);
 	/* Unsubscribe IRQ */
 	if (rsrc_data->irq_enabled)
 		rc = cam_irq_controller_unsubscribe_irq(
@@ -902,18 +925,19 @@
 
 	wm_res = th_payload->handler_priv;
 	if (!wm_res) {
-		pr_err_ratelimited("Error! No resource\n");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Error! No resource\n");
 		return -ENODEV;
 	}
 
 	rsrc_data = wm_res->res_priv;
 
-	CDBG("IRQ status_0 = %x\n", th_payload->evt_status_arr[0]);
-	CDBG("IRQ status_1 = %x\n", th_payload->evt_status_arr[1]);
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
 
 	rc  = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
 	if (rc) {
-		pr_err_ratelimited("No tasklet_cmd is free in queue\n");
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue\n");
 		return rc;
 	}
 
@@ -928,7 +952,7 @@
 
 	th_payload->evt_payload_priv = evt_payload;
 
-	CDBG("Exit\n");
+	CAM_DBG(CAM_ISP, "Exit");
 	return rc;
 }
 
@@ -954,7 +978,7 @@
 			~BIT(rsrc_data->index);
 		rc = CAM_VFE_IRQ_STATUS_SUCCESS;
 	}
-	CDBG("status_reg %x rc %d\n", status_reg, rc);
+	CAM_DBG(CAM_ISP, "status_reg %x rc %d", status_reg, rc);
 
 	if (rc == CAM_VFE_IRQ_STATUS_SUCCESS)
 		cam_vfe_bus_put_evt_payload(rsrc_data->common_data,
@@ -973,7 +997,7 @@
 	rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_wm_resource_data),
 		GFP_KERNEL);
 	if (!rsrc_data) {
-		CDBG("Failed to alloc for WM res priv\n");
+		CAM_DBG(CAM_ISP, "Failed to alloc for WM res priv");
 		return -ENOMEM;
 	}
 	wm_res->res_priv = rsrc_data;
@@ -1011,7 +1035,7 @@
 	rsrc_data = wm_res->res_priv;
 	wm_res->res_priv = NULL;
 	if (!rsrc_data) {
-		pr_err("Error! WM res priv is NULL\n");
+		CAM_ERR(CAM_ISP, "Error! WM res priv is NULL");
 		return -ENOMEM;
 	}
 	kfree(rsrc_data);
@@ -1074,7 +1098,7 @@
 		/* First find a free group */
 		if (is_dual) {
 			if (list_empty(&ver2_bus_priv->free_dual_comp_grp)) {
-				pr_err("No Free Composite Group\n");
+				CAM_ERR(CAM_ISP, "No Free Composite Group");
 				return -ENODEV;
 			}
 			comp_grp_local = list_first_entry(
@@ -1087,7 +1111,7 @@
 				&rsrc_data->intra_client_mask);
 		} else {
 			if (list_empty(&ver2_bus_priv->free_comp_grp)) {
-				pr_err("No Free Composite Group\n");
+				CAM_ERR(CAM_ISP, "No Free Composite Group");
 				return -ENODEV;
 			}
 			comp_grp_local = list_first_entry(
@@ -1113,7 +1137,7 @@
 		/* Do not support runtime change in composite mask */
 		if (comp_grp_local->res_state ==
 			CAM_ISP_RESOURCE_STATE_STREAMING) {
-			pr_err("Invalid State %d Comp Grp %u\n",
+			CAM_ERR(CAM_ISP, "Invalid State %d Comp Grp %u",
 				comp_grp_local->res_state,
 				rsrc_data->comp_grp_type);
 			return -EBUSY;
@@ -1135,7 +1159,7 @@
 	int match_found = 0;
 
 	if (!in_comp_grp) {
-		pr_err("Invalid Params Comp Grp %pK\n", in_rsrc_data);
+		CAM_ERR(CAM_ISP, "Invalid Params Comp Grp %pK", in_rsrc_data);
 		return -EINVAL;
 	}
 
@@ -1154,7 +1178,7 @@
 	}
 
 	if (!match_found) {
-		pr_err("Could not find matching Comp Grp type %u\n",
+		CAM_ERR(CAM_ISP, "Could not find matching Comp Grp type %u",
 			in_rsrc_data->comp_grp_type);
 		return -ENODEV;
 	}
@@ -1192,8 +1216,9 @@
 	cam_io_w_mb(rsrc_data->composite_mask, common_data->mem_base +
 		rsrc_data->hw_regs->comp_mask);
 
-	CDBG("composite_mask is 0x%x\n", rsrc_data->composite_mask);
-	CDBG("composite_mask addr 0x%x\n",  rsrc_data->hw_regs->comp_mask);
+	CAM_DBG(CAM_ISP, "composite_mask is 0x%x", rsrc_data->composite_mask);
+	CAM_DBG(CAM_ISP, "composite_mask addr 0x%x",
+		rsrc_data->hw_regs->comp_mask);
 
 	if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
 		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 &&
@@ -1218,7 +1243,7 @@
 	}
 
 	/* Subscribe IRQ */
-	CDBG("Subscribe COMP_GRP%d IRQ\n", rsrc_data->comp_grp_type);
+	CAM_DBG(CAM_ISP, "Subscribe COMP_GRP%d IRQ", rsrc_data->comp_grp_type);
 	comp_grp->irq_handle = cam_irq_controller_subscribe_irq(
 		common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
 		bus_irq_reg_mask, comp_grp,
@@ -1226,7 +1251,7 @@
 		cam_ife_mgr_do_tasklet_buf_done,
 		comp_grp->tasklet_info, cam_tasklet_enqueue_cmd);
 	if (comp_grp->irq_handle < 0) {
-		pr_err("Subscribe IRQ failed for comp_grp %d\n",
+		CAM_ERR(CAM_ISP, "Subscribe IRQ failed for comp_grp %d",
 			rsrc_data->comp_grp_type);
 		return -EFAULT;
 	}
@@ -1283,18 +1308,19 @@
 
 	comp_grp = th_payload->handler_priv;
 	if (!comp_grp) {
-		pr_err_ratelimited("Error! No resource\n");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Error! No resource\n");
 		return -ENODEV;
 	}
 
 	rsrc_data = comp_grp->res_priv;
 
-	CDBG("IRQ status_0 = %x\n", th_payload->evt_status_arr[0]);
-	CDBG("IRQ status_1 = %x\n", th_payload->evt_status_arr[1]);
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
 
 	rc  = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
 	if (rc) {
-		pr_err_ratelimited("No tasklet_cmd is free in queue\n");
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue\n");
 		return rc;
 	}
 
@@ -1309,7 +1335,7 @@
 
 	th_payload->evt_payload_priv = evt_payload;
 
-	CDBG("Exit\n");
+	CAM_DBG(CAM_ISP, "Exit");
 	return rc;
 }
 
@@ -1326,7 +1352,7 @@
 	uint32_t                               comp_err_reg;
 	uint32_t                               comp_grp_id;
 
-	CDBG("comp grp type %d\n", rsrc_data->comp_grp_type);
+	CAM_DBG(CAM_ISP, "comp grp type %d", rsrc_data->comp_grp_type);
 
 	if (!evt_payload)
 		return rc;
@@ -1369,7 +1395,7 @@
 			rc = CAM_VFE_IRQ_STATUS_SUCCESS;
 		}
 
-		CDBG("status reg = 0x%x, bit index = %d rc %d\n",
+		CAM_DBG(CAM_ISP, "status reg = 0x%x, bit index = %d rc %d",
 			status_reg, (comp_grp_id + 5), rc);
 		break;
 
@@ -1411,7 +1437,7 @@
 		break;
 	default:
 		rc = CAM_VFE_IRQ_STATUS_ERR;
-		pr_err("Error! Invalid comp_grp_type %u\n",
+		CAM_ERR(CAM_ISP, "Error! Invalid comp_grp_type %u",
 			rsrc_data->comp_grp_type);
 		break;
 	}
@@ -1433,7 +1459,7 @@
 	rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_comp_grp_data),
 		GFP_KERNEL);
 	if (!rsrc_data) {
-		CDBG("Failed to alloc for comp_grp_priv\n");
+		CAM_DBG(CAM_ISP, "Failed to alloc for comp_grp_priv");
 		return -ENOMEM;
 	}
 	comp_grp->res_priv = rsrc_data;
@@ -1482,7 +1508,7 @@
 	comp_grp->res_priv = NULL;
 
 	if (!rsrc_data) {
-		pr_err("Error! comp_grp_priv is NULL\n");
+		CAM_ERR(CAM_ISP, "Error! comp_grp_priv is NULL");
 		return -ENODEV;
 	}
 	kfree(rsrc_data);
@@ -1507,14 +1533,14 @@
 	struct cam_vfe_bus_ver2_vfe_out_data   *rsrc_data = NULL;
 
 	if (!bus_priv || !acquire_args) {
-		pr_err("Invalid Param");
+		CAM_ERR(CAM_ISP, "Invalid Param");
 		return -EINVAL;
 	}
 
 	out_acquire_args = &acq_args->vfe_out;
 	format = out_acquire_args->out_port_info->format;
 
-	CDBG("Acquiring resource type 0x%x\n",
+	CAM_DBG(CAM_ISP, "Acquiring resource type 0x%x",
 		out_acquire_args->out_port_info->res_type);
 
 	vfe_out_res_id = cam_vfe_bus_get_out_res_id(
@@ -1528,7 +1554,7 @@
 
 	rsrc_node = &ver2_bus_priv->vfe_out[vfe_out_res_id];
 	if (rsrc_node->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) {
-		pr_err("Resource not available: Res_id %d state:%d\n",
+		CAM_ERR(CAM_ISP, "Resource not available: Res_id %d state:%d",
 			vfe_out_res_id, rsrc_node->res_state);
 		return -EBUSY;
 	}
@@ -1555,7 +1581,8 @@
 			out_acquire_args->dual_slave_core,
 			&rsrc_data->comp_grp);
 		if (rc) {
-			pr_err("VFE%d Comp_Grp acquire failed for Out %d rc=%d\n",
+			CAM_ERR(CAM_ISP,
+				"VFE%d Comp_Grp acquire fail for Out %d rc=%d",
 				rsrc_data->common_data->core_index,
 				vfe_out_res_id, rc);
 			return rc;
@@ -1579,7 +1606,8 @@
 			&rsrc_data->wm_res[i],
 			&client_done_mask);
 		if (rc) {
-			pr_err("VFE%d WM acquire failed for Out %d rc=%d\n",
+			CAM_ERR(CAM_ISP,
+				"VFE%d WM acquire failed for Out %d rc=%d",
 				rsrc_data->common_data->core_index,
 				vfe_out_res_id, rc);
 			goto release_wm;
@@ -1593,7 +1621,7 @@
 	rsrc_node->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 	out_acquire_args->rsrc_node = rsrc_node;
 
-	CDBG("Acquire successful\n");
+	CAM_DBG(CAM_ISP, "Acquire successful");
 	return rc;
 
 release_wm:
@@ -1614,7 +1642,7 @@
 	struct cam_vfe_bus_ver2_vfe_out_data  *rsrc_data = NULL;
 
 	if (!bus_priv || !release_args) {
-		pr_err("Invalid input bus_priv %pK release_args %pK\n",
+		CAM_ERR(CAM_ISP, "Invalid input bus_priv %pK release_args %pK",
 			bus_priv, release_args);
 		return -EINVAL;
 	}
@@ -1623,7 +1651,7 @@
 	rsrc_data = vfe_out->res_priv;
 
 	if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
-		pr_err("Error! Invalid resource state:%d\n",
+		CAM_ERR(CAM_ISP, "Error! Invalid resource state:%d",
 			vfe_out->res_state);
 	}
 
@@ -1653,17 +1681,17 @@
 	struct cam_vfe_bus_ver2_common_data   *common_data = NULL;
 
 	if (!vfe_out) {
-		pr_err("Invalid input\n");
+		CAM_ERR(CAM_ISP, "Invalid input");
 		return -EINVAL;
 	}
 
 	rsrc_data = vfe_out->res_priv;
 	common_data = rsrc_data->common_data;
 
-	CDBG("Start resource index %d\n", rsrc_data->out_type);
+	CAM_DBG(CAM_ISP, "Start resource index %d", rsrc_data->out_type);
 
 	if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
-		pr_err("Error! Invalid resource state:%d\n",
+		CAM_ERR(CAM_ISP, "Error! Invalid resource state:%d",
 			vfe_out->res_state);
 		return -EACCES;
 	}
@@ -1707,7 +1735,7 @@
 	struct cam_vfe_bus_ver2_vfe_out_data  *rsrc_data = NULL;
 
 	if (!vfe_out) {
-		pr_err("Invalid input\n");
+		CAM_ERR(CAM_ISP, "Invalid input");
 		return -EINVAL;
 	}
 
@@ -1770,7 +1798,7 @@
 	rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_vfe_out_data),
 		GFP_KERNEL);
 	if (!rsrc_data) {
-		CDBG("Error! Failed to alloc for vfe out priv\n");
+		CAM_DBG(CAM_ISP, "Error! Failed to alloc for vfe out priv");
 		rc = -ENOMEM;
 		return rc;
 	}
@@ -1813,7 +1841,7 @@
 	vfe_out->res_priv = NULL;
 
 	if (!rsrc_data) {
-		pr_err("Error! vfe out priv is NULL\n");
+		CAM_ERR(CAM_ISP, "Error! vfe out priv is NULL");
 		return -ENOMEM;
 	}
 	kfree(rsrc_data);
@@ -1827,7 +1855,7 @@
 	struct cam_vfe_bus_ver2_priv          *bus_priv;
 
 	bus_priv     = th_payload->handler_priv;
-	CDBG("Enter\n");
+	CAM_DBG(CAM_ISP, "Enter");
 	return cam_irq_controller_handle_irq(evt_id,
 		bus_priv->common_data.bus_irq_controller);
 }
@@ -1856,12 +1884,13 @@
 		update_buf->cdm.res->res_priv;
 
 	if (!vfe_out_data || !vfe_out_data->cdm_util_ops) {
-		pr_err("Failed! Invalid data\n");
+		CAM_ERR(CAM_ISP, "Failed! Invalid data");
 		return -EINVAL;
 	}
 
 	if (update_buf->num_buf != vfe_out_data->num_wm) {
-		pr_err("Failed! Invalid number buffers:%d required:%d\n",
+		CAM_ERR(CAM_ISP,
+			"Failed! Invalid number buffers:%d required:%d",
 			update_buf->num_buf, vfe_out_data->num_wm);
 		return -EINVAL;
 	}
@@ -1871,7 +1900,8 @@
 
 	for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
 		if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
-			pr_err("reg_val_pair %d exceeds the array limit %lu\n",
+			CAM_ERR(CAM_ISP,
+				"reg_val_pair %d exceeds the array limit %lu",
 				j, MAX_REG_VAL_PAIR_SIZE);
 			return -ENOMEM;
 		}
@@ -1879,14 +1909,14 @@
 		wm_data = vfe_out_data->wm_res[i]->res_priv;
 
 		/* For initial configuration program all bus registers */
-		if (wm_data->stride != io_cfg->planes[i].plane_stride ||
-			!wm_data->init_cfg_done) {
+		if ((wm_data->stride != io_cfg->planes[i].plane_stride ||
+			!wm_data->init_cfg_done) && (wm_data->index >= 3)) {
 			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
 				wm_data->hw_regs->stride,
 				io_cfg->planes[i].plane_stride);
 			wm_data->stride = io_cfg->planes[i].plane_stride;
 		}
-		CDBG("image stride 0x%x\n", wm_data->stride);
+		CAM_DBG(CAM_ISP, "image stride 0x%x", wm_data->stride);
 
 		if (wm_data->framedrop_pattern != io_cfg->framedrop_pattern ||
 			!wm_data->init_cfg_done) {
@@ -1895,7 +1925,8 @@
 				io_cfg->framedrop_pattern);
 			wm_data->framedrop_pattern = io_cfg->framedrop_pattern;
 		}
-		CDBG("framedrop pattern 0x%x\n", wm_data->framedrop_pattern);
+		CAM_DBG(CAM_ISP, "framedrop pattern 0x%x",
+			wm_data->framedrop_pattern);
 
 		if (wm_data->framedrop_period != io_cfg->framedrop_period ||
 			!wm_data->init_cfg_done) {
@@ -1904,7 +1935,8 @@
 				io_cfg->framedrop_period);
 			wm_data->framedrop_period = io_cfg->framedrop_period;
 		}
-		CDBG("framedrop period 0x%x\n", wm_data->framedrop_period);
+		CAM_DBG(CAM_ISP, "framedrop period 0x%x",
+			wm_data->framedrop_period);
 
 		if (wm_data->irq_subsample_period != io_cfg->subsample_period
 			|| !wm_data->init_cfg_done) {
@@ -1914,7 +1946,7 @@
 			wm_data->irq_subsample_period =
 				io_cfg->subsample_period;
 		}
-		CDBG("irq subsample period 0x%x\n",
+		CAM_DBG(CAM_ISP, "irq subsample period 0x%x",
 			wm_data->irq_subsample_period);
 
 		if (wm_data->irq_subsample_pattern != io_cfg->subsample_pattern
@@ -1925,13 +1957,13 @@
 			wm_data->irq_subsample_pattern =
 				io_cfg->subsample_pattern;
 		}
-		CDBG("irq subsample pattern 0x%x\n",
+		CAM_DBG(CAM_ISP, "irq subsample pattern 0x%x",
 			wm_data->irq_subsample_pattern);
 
 		if (wm_data->en_ubwc) {
 			if (!wm_data->hw_regs->ubwc_regs) {
-				pr_err("%s: No UBWC register to configure.\n",
-					__func__);
+				CAM_ERR(CAM_ISP,
+					"No UBWC register to configure.");
 				return -EINVAL;
 			}
 			if (wm_data->packer_cfg !=
@@ -1943,7 +1975,8 @@
 				wm_data->packer_cfg =
 					io_cfg->planes[i].packer_config;
 			}
-			CDBG("packer cfg 0x%x\n", wm_data->packer_cfg);
+			CAM_DBG(CAM_ISP, "packer cfg 0x%x",
+				wm_data->packer_cfg);
 
 			if (wm_data->tile_cfg != io_cfg->planes[i].tile_config
 				|| !wm_data->init_cfg_done) {
@@ -1953,7 +1986,7 @@
 				wm_data->tile_cfg =
 					io_cfg->planes[i].tile_config;
 			}
-			CDBG("tile cfg 0x%x\n", wm_data->tile_cfg);
+			CAM_DBG(CAM_ISP, "tile cfg 0x%x", wm_data->tile_cfg);
 
 			if (wm_data->h_init != io_cfg->planes[i].h_init ||
 				!wm_data->init_cfg_done) {
@@ -1962,7 +1995,7 @@
 					io_cfg->planes[i].h_init);
 				wm_data->h_init = io_cfg->planes[i].h_init;
 			}
-			CDBG("h_init 0x%x\n", wm_data->h_init);
+			CAM_DBG(CAM_ISP, "h_init 0x%x", wm_data->h_init);
 
 			if (wm_data->v_init != io_cfg->planes[i].v_init ||
 				!wm_data->init_cfg_done) {
@@ -1971,7 +2004,7 @@
 					io_cfg->planes[i].v_init);
 				wm_data->v_init = io_cfg->planes[i].v_init;
 			}
-			CDBG("v_init 0x%x\n", wm_data->v_init);
+			CAM_DBG(CAM_ISP, "v_init 0x%x", wm_data->v_init);
 
 			if (wm_data->ubwc_meta_stride !=
 				io_cfg->planes[i].meta_stride ||
@@ -1983,7 +2016,8 @@
 				wm_data->ubwc_meta_stride =
 					io_cfg->planes[i].meta_stride;
 			}
-			CDBG("meta stride 0x%x\n", wm_data->ubwc_meta_stride);
+			CAM_DBG(CAM_ISP, "meta stride 0x%x",
+				wm_data->ubwc_meta_stride);
 
 			if (wm_data->ubwc_mode_cfg !=
 				io_cfg->planes[i].mode_config ||
@@ -1994,7 +2028,8 @@
 				wm_data->ubwc_mode_cfg =
 					io_cfg->planes[i].mode_config;
 			}
-			CDBG("ubwc mode cfg 0x%x\n", wm_data->ubwc_mode_cfg);
+			CAM_DBG(CAM_ISP, "ubwc mode cfg 0x%x",
+				wm_data->ubwc_mode_cfg);
 
 			if (wm_data->ubwc_meta_offset !=
 				io_cfg->planes[i].meta_offset ||
@@ -2006,14 +2041,14 @@
 				wm_data->ubwc_meta_offset =
 					io_cfg->planes[i].meta_offset;
 			}
-			CDBG("ubwc meta offset 0x%x\n",
+			CAM_DBG(CAM_ISP, "ubwc meta offset 0x%x",
 				wm_data->ubwc_meta_offset);
 
 			/* UBWC meta address */
 			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
 				wm_data->hw_regs->ubwc_regs->meta_addr,
 				update_buf->image_buf[i]);
-			CDBG("ubwc meta addr 0x%llx\n",
+			CAM_DBG(CAM_ISP, "ubwc meta addr 0x%llx",
 				update_buf->image_buf[i]);
 		}
 
@@ -2028,7 +2063,7 @@
 				wm_data->hw_regs->image_addr,
 				update_buf->image_buf[i]);
 
-		CDBG("image address 0x%x\n", reg_val_pair[j-1]);
+		CAM_DBG(CAM_ISP, "image address 0x%x", reg_val_pair[j-1]);
 
 		frame_inc = io_cfg->planes[i].plane_stride *
 			io_cfg->planes[i].slice_height;
@@ -2049,7 +2084,8 @@
 
 	/* cdm util returns dwords, need to convert to bytes */
 	if ((size * 4) > update_buf->cdm.size) {
-		pr_err("Failed! Buf size:%d insufficient, expected size:%d\n",
+		CAM_ERR(CAM_ISP,
+			"Failed! Buf size:%d insufficient, expected size:%d",
 			update_buf->cdm.size, size);
 		return -ENOMEM;
 	}
@@ -2082,7 +2118,7 @@
 	uint32_t                         top_irq_reg_mask[2] = {0};
 
 	if (!bus_priv) {
-		pr_err("Error! Invalid args\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid args");
 		return -EINVAL;
 	}
 
@@ -2099,7 +2135,7 @@
 		NULL);
 
 	if (bus_priv->irq_handle <= 0) {
-		pr_err("Failed to subscribe BUS IRQ\n");
+		CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ");
 		return -EFAULT;
 	}
 
@@ -2113,7 +2149,7 @@
 	int                              rc;
 
 	if (!bus_priv || (bus_priv->irq_handle <= 0)) {
-		pr_err("Error! Invalid args\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid args");
 		return -EINVAL;
 	}
 
@@ -2121,7 +2157,7 @@
 		bus_priv->common_data.vfe_irq_controller,
 		bus_priv->irq_handle);
 	if (rc)
-		pr_err("Failed to unsubscribe irq rc=%d\n", rc);
+		CAM_ERR(CAM_ISP, "Failed to unsubscribe irq rc=%d", rc);
 
 	return rc;
 }
@@ -2132,7 +2168,7 @@
 	int rc = -EINVAL;
 
 	if (!priv || !cmd_args) {
-		pr_err_ratelimited("Error! Invalid input arguments\n");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Error! Invalid input arguments\n");
 		return -EINVAL;
 	}
 
@@ -2141,7 +2177,7 @@
 		rc = cam_vfe_bus_update_buf(priv, cmd_args, arg_size);
 		break;
 	default:
-		pr_err_ratelimited("Error! Invalid camif process command:%d\n",
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Inval camif process command:%d\n",
 			cmd_type);
 		break;
 	}
@@ -2161,18 +2197,20 @@
 	struct cam_vfe_bus              *vfe_bus_local;
 	struct cam_vfe_bus_ver2_hw_info *ver2_hw_info = bus_hw_info;
 
-	CDBG("Enter\n");
+	CAM_DBG(CAM_ISP, "Enter");
 
 	if (!soc_info || !hw_intf || !bus_hw_info || !vfe_irq_controller) {
-		pr_err("Error! Invalid params soc_info %pK hw_intf %pK hw_info %pK controller %pK\n",
-			soc_info, hw_intf, bus_hw_info, vfe_irq_controller);
+		CAM_ERR(CAM_ISP,
+			"Inval_prms soc_info:%pK hw_intf:%pK hw_info%pK",
+			soc_info, hw_intf, bus_hw_info);
+		CAM_ERR(CAM_ISP, "controller: %pK", vfe_irq_controller);
 		rc = -EINVAL;
 		goto end;
 	}
 
 	vfe_bus_local = kzalloc(sizeof(struct cam_vfe_bus), GFP_KERNEL);
 	if (!vfe_bus_local) {
-		CDBG("Failed to alloc for vfe_bus\n");
+		CAM_DBG(CAM_ISP, "Failed to alloc for vfe_bus");
 		rc = -ENOMEM;
 		goto end;
 	}
@@ -2180,7 +2218,7 @@
 	bus_priv = kzalloc(sizeof(struct cam_vfe_bus_ver2_priv),
 		GFP_KERNEL);
 	if (!bus_priv) {
-		CDBG("Failed to alloc for vfe_bus_priv\n");
+		CAM_DBG(CAM_ISP, "Failed to alloc for vfe_bus_priv");
 		rc = -ENOMEM;
 		goto free_bus_local;
 	}
@@ -2197,7 +2235,7 @@
 		&ver2_hw_info->common_reg.irq_reg_info,
 		&bus_priv->common_data.bus_irq_controller);
 	if (rc) {
-		pr_err("Error! cam_irq_controller_init failed\n");
+		CAM_ERR(CAM_ISP, "Error! cam_irq_controller_init failed");
 		goto free_bus_priv;
 	}
 
@@ -2209,7 +2247,7 @@
 		rc = cam_vfe_bus_init_wm_resource(i, bus_priv, bus_hw_info,
 			&bus_priv->bus_client[i]);
 		if (rc < 0) {
-			pr_err("Error! Init WM failed rc=%d\n", rc);
+			CAM_ERR(CAM_ISP, "Error! Init WM failed rc=%d", rc);
 			goto deinit_wm;
 		}
 	}
@@ -2218,7 +2256,7 @@
 		rc = cam_vfe_bus_init_comp_grp(i, bus_priv, bus_hw_info,
 			&bus_priv->comp_grp[i]);
 		if (rc < 0) {
-			pr_err("Error! Init Comp Grp failed rc=%d\n", rc);
+			CAM_ERR(CAM_ISP, "Init Comp Grp failed rc=%d", rc);
 			goto deinit_comp_grp;
 		}
 	}
@@ -2227,7 +2265,7 @@
 		rc = cam_vfe_bus_init_vfe_out_resource(i, bus_priv, bus_hw_info,
 			&bus_priv->vfe_out[i]);
 		if (rc < 0) {
-			pr_err("Error! Init VFE Out failed rc=%d\n", rc);
+			CAM_ERR(CAM_ISP, "Init VFE Out failed rc=%d", rc);
 			goto deinit_vfe_out;
 		}
 	}
@@ -2251,7 +2289,7 @@
 
 	*vfe_bus = vfe_bus_local;
 
-	CDBG("Exit\n");
+	CAM_DBG(CAM_ISP, "Exit");
 	return rc;
 
 deinit_vfe_out:
@@ -2290,14 +2328,14 @@
 	struct cam_vfe_bus              *vfe_bus_local;
 
 	if (!vfe_bus || !*vfe_bus) {
-		pr_err("Error! Invalid input\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid input");
 		return -EINVAL;
 	}
 	vfe_bus_local = *vfe_bus;
 
 	bus_priv = vfe_bus_local->bus_priv;
 	if (!bus_priv) {
-		pr_err("Error! bus_priv is NULL\n");
+		CAM_ERR(CAM_ISP, "Error! bus_priv is NULL");
 		rc = -ENODEV;
 		goto free_bus_local;
 	}
@@ -2309,19 +2347,22 @@
 	for (i = 0; i < CAM_VFE_BUS_VER2_MAX_CLIENTS; i++) {
 		rc = cam_vfe_bus_deinit_wm_resource(&bus_priv->bus_client[i]);
 		if (rc < 0)
-			pr_err("Error! Deinit WM failed rc=%d\n", rc);
+			CAM_ERR(CAM_ISP,
+				"Error! Deinit WM failed rc=%d", rc);
 	}
 
 	for (i = 0; i < CAM_VFE_BUS_VER2_COMP_GRP_MAX; i++) {
 		rc = cam_vfe_bus_deinit_comp_grp(&bus_priv->comp_grp[i]);
 		if (rc < 0)
-			pr_err("Error! Deinit Comp Grp failed rc=%d\n", rc);
+			CAM_ERR(CAM_ISP,
+				"Error! Deinit Comp Grp failed rc=%d", rc);
 	}
 
 	for (i = 0; i < CAM_VFE_BUS_VER2_VFE_OUT_MAX; i++) {
 		rc = cam_vfe_bus_deinit_vfe_out_resource(&bus_priv->vfe_out[i]);
 		if (rc < 0)
-			pr_err("Error! Deinit VFE Out failed rc=%d\n", rc);
+			CAM_ERR(CAM_ISP,
+				"Error! Deinit VFE Out failed rc=%d", rc);
 	}
 
 	INIT_LIST_HEAD(&bus_priv->free_comp_grp);
@@ -2331,7 +2372,8 @@
 	rc = cam_irq_controller_deinit(
 		&bus_priv->common_data.bus_irq_controller);
 	if (rc)
-		pr_err("Error! Deinit IRQ Controller failed rc=%d\n", rc);
+		CAM_ERR(CAM_ISP,
+			"Error! Deinit IRQ Controller failed rc=%d", rc);
 
 	kfree(vfe_bus_local->bus_priv);
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
index fa00769..e70ecc5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -10,9 +10,7 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
-#include <linux/slab.h>
+ #include <linux/slab.h>
 #include <uapi/media/cam_isp.h>
 #include "cam_io_util.h"
 #include "cam_isp_hw_mgr_intf.h"
@@ -20,9 +18,7 @@
 #include "cam_vfe_top.h"
 #include "cam_vfe_top_ver2.h"
 #include "cam_vfe_camif_ver2.h"
-
-#undef  CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 struct cam_vfe_mux_camif_data {
 	void __iomem                                *mem_base;
@@ -55,7 +51,7 @@
 		rc = 0;
 		break;
 	default:
-		pr_err("Error! Invalid pix pattern:%d\n", pattern);
+		CAM_ERR(CAM_ISP, "Error! Invalid pix pattern:%d", pattern);
 		rc = -EINVAL;
 		break;
 	}
@@ -96,12 +92,12 @@
 	uint32_t                             val = 0;
 
 	if (!camif_res) {
-		pr_err("Error! Invalid input arguments\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
 		return -EINVAL;
 	}
 
 	if (camif_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
-		pr_err("Error! Invalid camif res res_state:%d\n",
+		CAM_ERR(CAM_ISP, "Error! Invalid camif res res_state:%d",
 			camif_res->res_state);
 		return -EINVAL;
 	}
@@ -129,7 +125,7 @@
 	/* Reg Update */
 	cam_io_w_mb(0x1, rsrc_data->mem_base + 0x4AC);
 
-	CDBG("Exit\n");
+	CAM_DBG(CAM_ISP, "Exit");
 	return 0;
 }
 
@@ -142,7 +138,7 @@
 	int rc = 0;
 
 	if (!camif_res) {
-		pr_err("Error! Invalid input arguments\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
 		return -EINVAL;
 	}
 
@@ -188,26 +184,26 @@
 	payload = evt_payload_priv;
 	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
 
-	CDBG("event ID:%d\n", payload->evt_id);
-	CDBG("irq_status_0 = %x\n", irq_status0);
+	CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
+	CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
 
 	switch (payload->evt_id) {
 	case CAM_ISP_HW_EVENT_SOF:
 		if (irq_status0 & camif_priv->reg_data->sof_irq_mask) {
-			CDBG("Received SOF\n");
+			CAM_DBG(CAM_ISP, "Received SOF");
 			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 		}
 		break;
 	case CAM_ISP_HW_EVENT_EPOCH:
 		if (irq_status0 & camif_priv->reg_data->epoch0_irq_mask) {
-			CDBG("Received EPOCH\n");
+			CAM_DBG(CAM_ISP, "Received EPOCH");
 			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 		}
 		cam_vfe_put_evt_payload(payload->core_info, &payload);
 		break;
 	case CAM_ISP_HW_EVENT_REG_UPDATE:
 		if (irq_status0 & camif_priv->reg_data->reg_update_irq_mask) {
-			CDBG("Received REG_UPDATE_ACK\n");
+			CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
 			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
 		}
 		break;
@@ -215,7 +211,7 @@
 		break;
 	}
 
-	CDBG("returing status = %d\n", ret);
+	CAM_DBG(CAM_ISP, "returing status = %d", ret);
 	return ret;
 }
 
@@ -231,7 +227,7 @@
 	camif_priv = kzalloc(sizeof(struct cam_vfe_mux_camif_data),
 		GFP_KERNEL);
 	if (!camif_priv) {
-		CDBG("Error! Failed to alloc for camif_priv\n");
+		CAM_DBG(CAM_ISP, "Error! Failed to alloc for camif_priv");
 		return -ENOMEM;
 	}
 
@@ -264,7 +260,7 @@
 	camif_node->res_priv = NULL;
 
 	if (!camif_priv) {
-		pr_err("Error! camif_priv is NULL\n");
+		CAM_ERR(CAM_ISP, "Error! camif_priv is NULL");
 		return -ENODEV;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
index f96f4d9..df7b0f9 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
@@ -10,21 +10,19 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
 #include <linux/slab.h>
 #include "cam_vfe_rdi.h"
 #include "cam_isp_hw_mgr_intf.h"
 #include "cam_vfe_hw_intf.h"
 #include "cam_io_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 struct cam_vfe_mux_rdi_data {
 	void __iomem                                *mem_base;
 	struct cam_hw_intf                          *hw_intf;
 	struct cam_vfe_top_ver2_reg_offset_common   *common_reg;
+	struct cam_vfe_rdi_ver2_reg                 *rdi_reg;
+	struct cam_vfe_rdi_reg_data                 *reg_data;
 
 	enum cam_isp_hw_sync_mode          sync_mode;
 };
@@ -51,12 +49,12 @@
 	int                            rc = 0;
 
 	if (!rdi_res) {
-		pr_err("Error! Invalid input arguments\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
 		return -EINVAL;
 	}
 
 	if (rdi_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
-		pr_err("Error! Invalid rdi res res_state:%d\n",
+		CAM_ERR(CAM_ISP, "Error! Invalid rdi res res_state:%d",
 			rdi_res->res_state);
 		return -EINVAL;
 	}
@@ -67,7 +65,7 @@
 	/* Reg Update */
 	cam_io_w_mb(0x2, rsrc_data->mem_base + 0x4AC);
 
-	CDBG("Exit\n");
+	CAM_DBG(CAM_ISP, "Exit");
 
 	return rc;
 }
@@ -80,7 +78,7 @@
 	int rc = 0;
 
 	if (!rdi_res) {
-		pr_err("Error! Invalid input arguments\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
 		return -EINVAL;
 	}
 
@@ -103,13 +101,14 @@
 	int rc = -EINVAL;
 
 	if (!priv || !cmd_args) {
-		pr_err("Error! Invalid input arguments\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
 		return -EINVAL;
 	}
 
 	switch (cmd_type) {
 	default:
-		pr_err("Error! unsupported RDI process command:%d\n", cmd_type);
+		CAM_ERR(CAM_ISP,
+			"unsupported RDI process command:%d", cmd_type);
 		break;
 	}
 
@@ -139,23 +138,28 @@
 	payload = evt_payload_priv;
 	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
 
-	CDBG("event ID:%d\n", payload->evt_id);
-	CDBG("irq_status_0 = %x\n", irq_status0);
+	CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
+	CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
 
 	switch (payload->evt_id) {
 	case CAM_ISP_HW_EVENT_SOF:
-		if (irq_status0 & 0x8000000)
+		if (irq_status0 & rdi_priv->reg_data->sof_irq_mask) {
+			CAM_DBG(CAM_ISP, "Received SOF");
 			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
 		break;
 	case CAM_ISP_HW_EVENT_REG_UPDATE:
-		if (irq_status0 & 0x20)
+		if (irq_status0 & rdi_priv->reg_data->reg_update_irq_mask) {
+			CAM_DBG(CAM_ISP, "Received REG UPDATE");
 			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+		cam_vfe_put_evt_payload(payload->core_info, &payload);
 		break;
 	default:
 		break;
 	}
 
-	CDBG("returing status = %d\n", ret);
+	CAM_DBG(CAM_ISP, "returing status = %d", ret);
 	return ret;
 }
 
@@ -166,11 +170,12 @@
 	struct cam_isp_resource_node  *rdi_node)
 {
 	struct cam_vfe_mux_rdi_data     *rdi_priv = NULL;
+	struct cam_vfe_rdi_ver2_hw_info *rdi_info = rdi_hw_info;
 
 	rdi_priv = kzalloc(sizeof(struct cam_vfe_mux_rdi_data),
 			GFP_KERNEL);
 	if (!rdi_priv) {
-		CDBG("Error! Failed to alloc for rdi_priv\n");
+		CAM_DBG(CAM_ISP, "Error! Failed to alloc for rdi_priv");
 		return -ENOMEM;
 	}
 
@@ -178,6 +183,31 @@
 
 	rdi_priv->mem_base   = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
 	rdi_priv->hw_intf    = hw_intf;
+	rdi_priv->common_reg = rdi_info->common_reg;
+	rdi_priv->rdi_reg    = rdi_info->rdi_reg;
+
+	switch (rdi_node->res_id) {
+	case CAM_ISP_HW_VFE_IN_RDI0:
+		rdi_priv->reg_data = rdi_info->reg_data[0];
+		break;
+	case CAM_ISP_HW_VFE_IN_RDI1:
+		rdi_priv->reg_data = rdi_info->reg_data[1];
+		break;
+	case CAM_ISP_HW_VFE_IN_RDI2:
+		rdi_priv->reg_data = rdi_info->reg_data[2];
+		break;
+	case CAM_ISP_HW_VFE_IN_RDI3:
+		if (rdi_info->reg_data[3]) {
+			rdi_priv->reg_data = rdi_info->reg_data[3];
+		} else {
+			CAM_ERR(CAM_ISP, "Error! RDI3 is not supported");
+			goto err_init;
+		}
+		break;
+	default:
+		CAM_DBG(CAM_ISP, "invalid Resource id:%d", rdi_node->res_id);
+		goto err_init;
+	}
 
 	rdi_node->start = cam_vfe_rdi_resource_start;
 	rdi_node->stop  = cam_vfe_rdi_resource_stop;
@@ -185,6 +215,9 @@
 	rdi_node->bottom_half_handler = cam_vfe_rdi_handle_irq_bottom_half;
 
 	return 0;
+err_init:
+	kfree(rdi_priv);
+	return -EINVAL;
 }
 
 int cam_vfe_rdi_ver2_deinit(
@@ -200,7 +233,7 @@
 	rdi_node->res_priv = NULL;
 
 	if (!rdi_priv) {
-		pr_err("Error! rdi_priv NULL\n");
+		CAM_ERR(CAM_ISP, "Error! rdi_priv NULL");
 		return -ENODEV;
 	}
 	kfree(rdi_priv);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
index 9893474..04e4f02 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
@@ -16,18 +16,22 @@
 #include "cam_isp_hw.h"
 #include "cam_vfe_top.h"
 
+#define CAM_VFE_RDI_VER2_MAX  4
+
 struct cam_vfe_rdi_ver2_reg {
 	uint32_t     reg_update_cmd;
 };
 
 struct cam_vfe_rdi_reg_data {
+	uint32_t     reg_update_cmd_data;
+	uint32_t     sof_irq_mask;
 	uint32_t     reg_update_irq_mask;
 };
 
 struct cam_vfe_rdi_ver2_hw_info {
-	struct cam_vfe_top_ver2_reg_offset_common   *common_reg;
-	struct cam_vfe_rdi_ver2_reg               *rdi_reg;
-	struct cam_vfe_rdi_reg_data               *reg_data;
+	struct cam_vfe_top_ver2_reg_offset_common  *common_reg;
+	struct cam_vfe_rdi_ver2_reg                *rdi_reg;
+	struct cam_vfe_rdi_reg_data  *reg_data[CAM_VFE_RDI_VER2_MAX];
 };
 
 int cam_vfe_rdi_ver2_acquire_resource(
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
index ee608fa..8eb1835 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
@@ -10,10 +10,9 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
 #include "cam_vfe_top.h"
 #include "cam_vfe_top_ver2.h"
+#include "cam_debug_util.h"
 
 int cam_vfe_top_init(uint32_t          top_version,
 	struct cam_hw_soc_info        *soc_info,
@@ -29,7 +28,7 @@
 			vfe_top);
 		break;
 	default:
-		pr_err("Error! Unsupported Version %x\n", top_version);
+		CAM_ERR(CAM_ISP, "Error! Unsupported Version %x", top_version);
 		break;
 	}
 
@@ -46,7 +45,7 @@
 		rc = cam_vfe_top_ver2_deinit(vfe_top);
 		break;
 	default:
-		pr_err("Error! Unsupported Version %x\n", top_version);
+		CAM_ERR(CAM_ISP, "Error! Unsupported Version %x", top_version);
 		break;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
index ce7c63e..1a3eeae 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
@@ -10,17 +10,13 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
 #include <linux/slab.h>
 #include "cam_io_util.h"
 #include "cam_cdm_util.h"
 #include "cam_vfe_hw_intf.h"
 #include "cam_vfe_top.h"
 #include "cam_vfe_top_ver2.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 struct cam_vfe_top_ver2_common_data {
 	struct cam_hw_soc_info                     *soc_info;
@@ -43,13 +39,13 @@
 	struct cam_cdm_utils_ops         *cdm_util_ops = NULL;
 
 	if (arg_size != sizeof(struct cam_isp_hw_get_cdm_args)) {
-		pr_err("Error! Invalid cmd size\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid cmd size");
 		return -EINVAL;
 	}
 
 	if (!cdm_args || !cdm_args->res || !top_priv ||
 		!top_priv->common_data.soc_info) {
-		pr_err("Error! Invalid args\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid args");
 		return -EINVAL;
 	}
 
@@ -57,22 +53,22 @@
 		(struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
 
 	if (!cdm_util_ops) {
-		pr_err("Invalid CDM ops\n");
+		CAM_ERR(CAM_ISP, "Invalid CDM ops");
 		return -EINVAL;
 	}
 
 	size = cdm_util_ops->cdm_required_size_changebase();
 	/* since cdm returns dwords, we need to convert it into bytes */
 	if ((size * 4) > cdm_args->size) {
-		pr_err("buf size:%d is not sufficient, expected: %d\n",
+		CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
 			cdm_args->size, size);
 		return -EINVAL;
 	}
 
 	mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(
 		top_priv->common_data.soc_info, VFE_CORE_BASE_IDX);
-	CDBG("core %d mem_base 0x%x\n", top_priv->common_data.soc_info->index,
-		mem_base);
+	CAM_DBG(CAM_ISP, "core %d mem_base 0x%x",
+		top_priv->common_data.soc_info->index, mem_base);
 
 	cdm_util_ops->cdm_write_changebase(cdm_args->cmd_buf_addr, mem_base);
 	cdm_args->used_bytes = (size * 4);
@@ -90,26 +86,26 @@
 	struct cam_cdm_utils_ops         *cdm_util_ops = NULL;
 
 	if (arg_size != sizeof(struct cam_isp_hw_get_cdm_args)) {
-		pr_err("Error! Invalid cmd size\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid cmd size");
 		return -EINVAL;
 	}
 
 	if (!cdm_args || !cdm_args->res) {
-		pr_err("Error! Invalid args\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid args");
 		return -EINVAL;
 	}
 
 	cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
 
 	if (!cdm_util_ops) {
-		pr_err("Error! Invalid CDM ops\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid CDM ops");
 		return -EINVAL;
 	}
 
 	size = cdm_util_ops->cdm_required_size_reg_random(1);
 	/* since cdm returns dwords, we need to convert it into bytes */
 	if ((size * 4) > cdm_args->size) {
-		pr_err("Error! buf size:%d is not sufficient, expected: %d\n",
+		CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
 			cdm_args->size, size);
 		return -EINVAL;
 	}
@@ -153,7 +149,7 @@
 	struct cam_vfe_top_ver2_reg_offset_common *reg_common = NULL;
 
 	if (!top_priv) {
-		pr_err("Invalid arguments\n");
+		CAM_ERR(CAM_ISP, "Invalid arguments");
 		return -EINVAL;
 	}
 
@@ -169,7 +165,7 @@
 		CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) +
 		reg_common->global_reset_cmd);
 
-	CDBG("Reset HW exit\n");
+	CAM_DBG(CAM_ISP, "Reset HW exit");
 	return 0;
 }
 
@@ -183,7 +179,7 @@
 	int rc = -EINVAL;
 
 	if (!device_priv || !reserve_args) {
-		pr_err("Error! Invalid input arguments\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
 		return -EINVAL;
 	}
 
@@ -228,16 +224,16 @@
 	struct cam_isp_resource_node            *mux_res;
 
 	if (!device_priv || !release_args) {
-		pr_err("Error! Invalid input arguments\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
 		return -EINVAL;
 	}
 
 	top_priv = (struct cam_vfe_top_ver2_priv   *)device_priv;
 	mux_res = (struct cam_isp_resource_node *)release_args;
 
-	CDBG("%s: Resource in state %d\n", __func__, mux_res->res_state);
+	CAM_DBG(CAM_ISP, "Resource in state %d", mux_res->res_state);
 	if (mux_res->res_state < CAM_ISP_RESOURCE_STATE_RESERVED) {
-		pr_err("Error! Resource in Invalid res_state :%d\n",
+		CAM_ERR(CAM_ISP, "Error! Resource in Invalid res_state :%d",
 			mux_res->res_state);
 		return -EINVAL;
 	}
@@ -254,7 +250,7 @@
 	int rc = 0;
 
 	if (!device_priv || !start_args) {
-		pr_err("Error! Invalid input arguments\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
 		return -EINVAL;
 	}
 
@@ -268,7 +264,7 @@
 		mux_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 		rc = 0;
 	} else {
-		pr_err("Invalid res id:%d\n", mux_res->res_id);
+		CAM_ERR(CAM_ISP, "Invalid res id:%d", mux_res->res_id);
 		rc = -EINVAL;
 	}
 
@@ -283,7 +279,7 @@
 	int rc = 0;
 
 	if (!device_priv || !stop_args) {
-		pr_err("Error! Invalid input arguments\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
 		return -EINVAL;
 	}
 
@@ -295,7 +291,7 @@
 		mux_res->res_id <= CAM_ISP_HW_VFE_IN_RDI3)) {
 		rc = mux_res->stop(mux_res);
 	} else {
-		pr_err("Invalid res id:%d\n", mux_res->res_id);
+		CAM_ERR(CAM_ISP, "Invalid res id:%d", mux_res->res_id);
 		rc = -EINVAL;
 	}
 
@@ -322,7 +318,7 @@
 	struct cam_vfe_top_ver2_priv            *top_priv;
 
 	if (!device_priv || !cmd_args) {
-		pr_err("Error! Invalid arguments\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid arguments");
 		return -EINVAL;
 	}
 	top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
@@ -337,7 +333,7 @@
 		break;
 	default:
 		rc = -EINVAL;
-		pr_err("Error! Invalid cmd:%d\n", cmd_type);
+		CAM_ERR(CAM_ISP, "Error! Invalid cmd:%d", cmd_type);
 		break;
 	}
 
@@ -357,7 +353,7 @@
 
 	vfe_top = kzalloc(sizeof(struct cam_vfe_top), GFP_KERNEL);
 	if (!vfe_top) {
-		CDBG("Error! Failed to alloc for vfe_top\n");
+		CAM_DBG(CAM_ISP, "Error! Failed to alloc for vfe_top");
 		rc = -ENOMEM;
 		goto end;
 	}
@@ -365,7 +361,7 @@
 	top_priv = kzalloc(sizeof(struct cam_vfe_top_ver2_priv),
 		GFP_KERNEL);
 	if (!top_priv) {
-		CDBG("Error! Failed to alloc for vfe_top_priv\n");
+		CAM_DBG(CAM_ISP, "Error! Failed to alloc for vfe_top_priv");
 		rc = -ENOMEM;
 		goto free_vfe_top;
 	}
@@ -391,7 +387,8 @@
 				CAM_ISP_HW_VFE_IN_RDI0 + j++;
 
 			rc = cam_vfe_rdi_ver2_init(hw_intf, soc_info,
-				NULL, &top_priv->mux_rsrc[i]);
+				&ver2_hw_info->rdi_hw_info,
+				&top_priv->mux_rsrc[i]);
 			if (rc)
 				goto deinit_resources;
 		}
@@ -419,10 +416,10 @@
 	for (--i; i >= 0; i--) {
 		if (ver2_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_2_0) {
 			if (cam_vfe_camif_ver2_deinit(&top_priv->mux_rsrc[i]))
-				pr_err("Camif Deinit failed\n");
+				CAM_ERR(CAM_ISP, "Camif Deinit failed");
 		} else {
 			if (cam_vfe_rdi_ver2_deinit(&top_priv->mux_rsrc[i]))
-				pr_err("RDI Deinit failed\n");
+				CAM_ERR(CAM_ISP, "RDI Deinit failed");
 		}
 		top_priv->mux_rsrc[i].res_state =
 			CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
@@ -442,19 +439,19 @@
 	struct cam_vfe_top                     *vfe_top;
 
 	if (!vfe_top_ptr) {
-		pr_err("Error! Invalid input\n");
+		CAM_ERR(CAM_ISP, "Error! Invalid input");
 		return -EINVAL;
 	}
 
 	vfe_top = *vfe_top_ptr;
 	if (!vfe_top) {
-		pr_err("Error! vfe_top NULL\n");
+		CAM_ERR(CAM_ISP, "Error! vfe_top NULL");
 		return -ENODEV;
 	}
 
 	top_priv = vfe_top->top_priv;
 	if (!top_priv) {
-		pr_err("Error! vfe_top_priv NULL\n");
+		CAM_ERR(CAM_ISP, "Error! vfe_top_priv NULL");
 		rc = -ENODEV;
 		goto free_vfe_top;
 	}
@@ -466,12 +463,12 @@
 			CAM_ISP_HW_VFE_IN_CAMIF) {
 			rc = cam_vfe_camif_ver2_deinit(&top_priv->mux_rsrc[i]);
 			if (rc)
-				pr_err("Error! Camif deinit failed rc=%d\n",
+				CAM_ERR(CAM_ISP, "Camif deinit failed rc=%d",
 					rc);
 		} else {
 			rc = cam_vfe_rdi_ver2_deinit(&top_priv->mux_rsrc[i]);
 			if (rc)
-				pr_err("Error! RDI deinit failed rc=%d\n", rc);
+				CAM_ERR(CAM_ISP, "RDI deinit failed rc=%d", rc);
 		}
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
index 0813202..bafd7f2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
@@ -52,6 +52,7 @@
 struct cam_vfe_top_ver2_hw_info {
 	struct cam_vfe_top_ver2_reg_offset_common  *common_reg;
 	struct cam_vfe_camif_ver2_hw_info  camif_hw_info;
+	struct cam_vfe_rdi_ver2_hw_info    rdi_hw_info;
 	uint32_t mux_type[CAM_VFE_TOP_VER2_MUX_MAX];
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/Makefile b/drivers/media/platform/msm/camera/cam_jpeg/Makefile
new file mode 100644
index 0000000..4d272d3
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg_dev.o cam_jpeg_context.o
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
new file mode 100644
index 0000000..a299179
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
@@ -0,0 +1,138 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "cam_mem_mgr.h"
+#include "cam_sync_api.h"
+#include "cam_jpeg_context.h"
+#include "cam_context_utils.h"
+#include "cam_debug_util.h"
+
+static int __cam_jpeg_ctx_acquire_dev_in_available(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "Unable to Acquire device %d", rc);
+	else
+		ctx->state = CAM_CTX_ACQUIRED;
+
+	return rc;
+}
+
+static int __cam_jpeg_ctx_release_dev_in_acquired(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_release_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "Unable to release device %d", rc);
+
+	ctx->state = CAM_CTX_AVAILABLE;
+
+	return rc;
+}
+
+static int __cam_jpeg_ctx_config_dev_in_acquired(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	return cam_context_prepare_dev_to_hw(ctx, cmd);
+}
+
+static int __cam_jpeg_ctx_handle_buf_done_in_acquired(void *ctx,
+	uint32_t evt_id, void *done)
+{
+	return cam_context_buf_done_from_hw(ctx, done, evt_id);
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+	cam_jpeg_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+	/* Uninit */
+	{
+		.ioctl_ops = { },
+		.crm_ops = { },
+		.irq_ops = NULL,
+	},
+	/* Available */
+	{
+		.ioctl_ops = {
+			.acquire_dev = __cam_jpeg_ctx_acquire_dev_in_available,
+		},
+		.crm_ops = { },
+		.irq_ops = NULL,
+	},
+	/* Acquired */
+	{
+		.ioctl_ops = {
+			.release_dev = __cam_jpeg_ctx_release_dev_in_acquired,
+			.config_dev = __cam_jpeg_ctx_config_dev_in_acquired,
+		},
+		.crm_ops = { },
+		.irq_ops = __cam_jpeg_ctx_handle_buf_done_in_acquired,
+	},
+};
+
+int cam_jpeg_context_init(struct cam_jpeg_context *ctx,
+	struct cam_context *ctx_base,
+	struct cam_hw_mgr_intf *hw_intf)
+{
+	int rc;
+	int i;
+
+	if (!ctx || !ctx_base) {
+		CAM_ERR(CAM_JPEG, "Invalid Context");
+		rc = -EFAULT;
+		goto err;
+	}
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	ctx->base = ctx_base;
+
+	for (i = 0; i < CAM_CTX_REQ_MAX; i++)
+		ctx->req_base[i].req_priv = ctx;
+
+	rc = cam_context_init(ctx_base, NULL, hw_intf, ctx->req_base,
+		CAM_CTX_REQ_MAX);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "Camera Context Base init failed");
+		goto err;
+	}
+
+	ctx_base->state_machine = cam_jpeg_ctx_state_machine;
+	ctx_base->ctx_priv = ctx;
+
+err:
+	return rc;
+}
+
+int cam_jpeg_context_deinit(struct cam_jpeg_context *ctx)
+{
+	if (!ctx || !ctx->base) {
+		CAM_ERR(CAM_JPEG, "Invalid params: %pK", ctx);
+		return -EINVAL;
+	}
+
+	cam_context_deinit(ctx->base);
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.h b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.h
new file mode 100644
index 0000000..90ac5cf
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_JPEG_CONTEXT_H_
+#define _CAM_JPEG_CONTEXT_H_
+
+#include <uapi/media/cam_jpeg.h>
+
+#include "cam_context.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+
+#define CAM_JPEG_HW_EVENT_MAX 20
+
+/**
+ * struct cam_jpeg_context - Jpeg context
+ * @base: Base jpeg cam context object
+ * @req_base: Common request structure
+ */
+struct cam_jpeg_context {
+	struct cam_context *base;
+	struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
+};
+
+/* cam jpeg context irq handling function type */
+typedef int (*cam_jpeg_hw_event_cb_func)(
+	struct cam_jpeg_context *ctx_jpeg,
+	void *evt_data);
+
+/**
+ * struct cam_jpeg_ctx_irq_ops - Function table for handling IRQ callbacks
+ *
+ * @irq_ops: Array of handle function pointers.
+ *
+ */
+struct cam_jpeg_ctx_irq_ops {
+	cam_jpeg_hw_event_cb_func irq_ops[CAM_JPEG_HW_EVENT_MAX];
+};
+
+/**
+ * cam_jpeg_context_init()
+ *
+ * @brief: Initialization function for the JPEG context
+ *
+ * @ctx: JPEG context obj to be initialized
+ * @ctx_base: Context base from cam_context
+ * @hw_intf: JPEG hw manager interface
+ *
+ */
+int cam_jpeg_context_init(struct cam_jpeg_context *ctx,
+	struct cam_context *ctx_base,
+	struct cam_hw_mgr_intf *hw_intf);
+
+/**
+ * cam_jpeg_context_deinit()
+ *
+ * @brief: Deinitialize function for the JPEG context
+ *
+ * @ctx: JPEG context obj to be deinitialized
+ *
+ */
+int cam_jpeg_context_deinit(struct cam_jpeg_context *ctx);
+
+#endif  /* __CAM_JPEG_CONTEXT_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c
new file mode 100644
index 0000000..fb68ddb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c
@@ -0,0 +1,136 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/kernel.h>
+
+#include "cam_node.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_jpeg_dev.h"
+#include "cam_debug_util.h"
+
+#define CAM_JPEG_DEV_NAME "cam-jpeg"
+
+static struct cam_jpeg_dev g_jpeg_dev;
+
+static const struct of_device_id cam_jpeg_dt_match[] = {
+	{
+		.compatible = "qcom,cam-jpeg"
+	},
+	{ }
+};
+
+static int cam_jpeg_dev_remove(struct platform_device *pdev)
+{
+	int rc;
+	int i;
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_jpeg_context_deinit(&g_jpeg_dev.ctx_jpeg[i]);
+		if (rc)
+			CAM_ERR(CAM_JPEG, "JPEG context %d deinit failed %d",
+				i, rc);
+	}
+
+	rc = cam_subdev_remove(&g_jpeg_dev.sd);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "Unregister failed %d", rc);
+
+	return rc;
+}
+
+static int cam_jpeg_dev_probe(struct platform_device *pdev)
+{
+	int rc;
+	int i;
+	struct cam_hw_mgr_intf hw_mgr_intf;
+	struct cam_node *node;
+
+	rc = cam_subdev_probe(&g_jpeg_dev.sd, pdev, CAM_JPEG_DEV_NAME,
+		CAM_JPEG_DEVICE_TYPE);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "JPEG cam_subdev_probe failed %d", rc);
+		goto err;
+	}
+	node = (struct cam_node *)g_jpeg_dev.sd.token;
+
+	rc = cam_jpeg_hw_mgr_init(pdev->dev.of_node,
+		(uint64_t *)&hw_mgr_intf);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "Can not initialize JPEG HWmanager %d", rc);
+		goto unregister;
+	}
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_jpeg_context_init(&g_jpeg_dev.ctx_jpeg[i],
+			&g_jpeg_dev.ctx[i],
+			&node->hw_mgr_intf);
+		if (rc) {
+			CAM_ERR(CAM_JPEG, "JPEG context init failed %d %d",
+				i, rc);
+			goto ctx_init_fail;
+		}
+	}
+
+	rc = cam_node_init(node, &hw_mgr_intf, g_jpeg_dev.ctx, CAM_CTX_MAX,
+		CAM_JPEG_DEV_NAME);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "JPEG node init failed %d", rc);
+		goto ctx_init_fail;
+	}
+
+	mutex_init(&g_jpeg_dev.jpeg_mutex);
+
+	CAM_INFO(CAM_JPEG, "Camera JPEG probe complete");
+
+	return rc;
+
+ctx_init_fail:
+	for (--i; i >= 0; i--)
+		if (cam_jpeg_context_deinit(&g_jpeg_dev.ctx_jpeg[i]))
+			CAM_ERR(CAM_JPEG, "deinit fail %d %d", i, rc);
+unregister:
+	if (cam_subdev_remove(&g_jpeg_dev.sd))
+		CAM_ERR(CAM_JPEG, "remove fail %d", rc);
+err:
+	return rc;
+}
+
+static struct platform_driver jpeg_driver = {
+	.probe = cam_jpeg_dev_probe,
+	.remove = cam_jpeg_dev_remove,
+	.driver = {
+		.name = "cam_jpeg",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_jpeg_dt_match,
+	},
+};
+
+static int __init cam_jpeg_dev_init_module(void)
+{
+	return platform_driver_register(&jpeg_driver);
+}
+
+static void __exit cam_jpeg_dev_exit_module(void)
+{
+	platform_driver_unregister(&jpeg_driver);
+}
+
+module_init(cam_jpeg_dev_init_module);
+module_exit(cam_jpeg_dev_exit_module);
+MODULE_DESCRIPTION("MSM JPEG driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h
new file mode 100644
index 0000000..deab2d5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_JPEG_DEV_H_
+#define _CAM_JPEG_DEV_H_
+
+#include "cam_subdev.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_context.h"
+#include "cam_jpeg_context.h"
+
+/**
+ * struct cam_jpeg_dev - Camera JPEG V4l2 device node
+ *
+ * @sd: Commone camera subdevice node
+ * @node: Pointer to jpeg subdevice
+ * @ctx: JPEG base context storage
+ * @ctx_jpeg: JPEG private context storage
+ * @jpeg_mutex: Jpeg dev mutex
+ */
+struct cam_jpeg_dev {
+	struct cam_subdev sd;
+	struct cam_node *node;
+	struct cam_context ctx[CAM_CTX_MAX];
+	struct cam_jpeg_context ctx_jpeg[CAM_CTX_MAX];
+	struct mutex jpeg_mutex;
+};
+#endif /* __CAM_JPEG_DEV_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/Makefile b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/Makefile
new file mode 100644
index 0000000..08c9528
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/Makefile
@@ -0,0 +1,13 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_enc_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_dma_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
new file mode 100644
index 0000000..b06b5c4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -0,0 +1,1178 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/debugfs.h>
+#include <media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "cam_sync_api.h"
+#include "cam_packet_util.h"
+#include "cam_hw.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_jpeg_hw_mgr.h"
+#include "cam_enc_hw_intf.h"
+#include "cam_dma_hw_intf.h"
+#include "cam_smmu_api.h"
+#include "cam_mem_mgr.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_debug_util.h"
+
+#define CAM_JPEG_HW_ENTRIES_MAX  20
+
+static struct cam_jpeg_hw_mgr g_jpeg_hw_mgr;
+
+static int32_t cam_jpeg_hw_mgr_cb(uint32_t irq_status,
+	int32_t result_size, void *data);
+static int cam_jpeg_mgr_process_cmd(void *priv, void *data);
+
+static int cam_jpeg_mgr_process_irq(void *priv, void *data)
+{
+	int rc = 0;
+	struct cam_jpeg_process_irq_work_data_t *task_data;
+	struct cam_jpeg_hw_mgr *hw_mgr;
+	int32_t i;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+	struct cam_hw_done_event_data buf_data;
+	struct cam_jpeg_set_irq_cb irq_cb;
+	uint32_t dev_type = 0;
+	uint64_t kaddr;
+	uint32_t *cmd_buf_kaddr;
+	size_t cmd_buf_len;
+	struct cam_jpeg_config_inout_param_info *p_params;
+	struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+	struct crm_workq_task *task;
+	struct cam_jpeg_process_frame_work_data_t *wq_task_data;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_JPEG, "Invalid data");
+		return -EINVAL;
+	}
+
+	task_data = data;
+	hw_mgr = &g_jpeg_hw_mgr;
+
+	ctx_data = (struct cam_jpeg_hw_ctx_data *)task_data->data;
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is not in use");
+		return -EINVAL;
+	}
+
+	dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+	irq_cb.jpeg_hw_mgr_cb = cam_jpeg_hw_mgr_cb;
+	irq_cb.data = NULL;
+	irq_cb.b_set_cb = false;
+	if (!hw_mgr->devices[dev_type][0]->hw_ops.process_cmd) {
+		CAM_ERR(CAM_JPEG, "process_cmd null ");
+		return -EINVAL;
+	}
+	rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd(
+		hw_mgr->devices[dev_type][0]->hw_priv,
+		CAM_JPEG_ENC_CMD_SET_IRQ_CB,
+		&irq_cb, sizeof(irq_cb));
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "CMD_SET_IRQ_CB failed %d", rc);
+		return rc;
+	}
+
+	mutex_lock(&g_jpeg_hw_mgr.hw_mgr_mutex);
+	hw_mgr->device_in_use[dev_type][0] = false;
+	p_cfg_req = hw_mgr->dev_hw_cfg_args[dev_type][0];
+	hw_mgr->dev_hw_cfg_args[dev_type][0] = NULL;
+	mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex);
+
+	task = cam_req_mgr_workq_get_task(
+		g_jpeg_hw_mgr.work_process_frame);
+	if (!task) {
+		CAM_ERR(CAM_JPEG, "no empty task");
+		return -EINVAL;
+	}
+
+	wq_task_data = (struct cam_jpeg_process_frame_work_data_t *)
+		task->payload;
+	if (!task_data) {
+		CAM_ERR(CAM_JPEG, "task_data is NULL");
+		return -EINVAL;
+	}
+	wq_task_data->data = (void *)(uint64_t)dev_type;
+	wq_task_data->request_id = 0;
+	wq_task_data->type = CAM_JPEG_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_jpeg_mgr_process_cmd;
+	rc = cam_req_mgr_workq_enqueue_task(task, &g_jpeg_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "could not enque task %d", rc);
+		return rc;
+	}
+
+	rc = cam_mem_get_cpu_buf(
+		p_cfg_req->hw_cfg_args.hw_update_entries[1].handle,
+		(uint64_t *)&kaddr, &cmd_buf_len);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "unable to get info for cmd buf: %x %d",
+			hw_mgr->iommu_hdl, rc);
+		return rc;
+	}
+
+	cmd_buf_kaddr = (uint32_t *)kaddr;
+
+	cmd_buf_kaddr =
+		(cmd_buf_kaddr +
+		(p_cfg_req->hw_cfg_args.hw_update_entries[1].offset/4));
+
+	p_params = (struct cam_jpeg_config_inout_param_info *)cmd_buf_kaddr;
+
+	p_params->output_size = task_data->result_size;
+	CAM_DBG(CAM_JPEG, "Encoded Size %d", task_data->result_size);
+
+	buf_data.num_handles = p_cfg_req->
+		hw_cfg_args.num_out_map_entries;
+	for (i = 0; i < buf_data.num_handles; i++) {
+		buf_data.resource_handle[i] =
+			p_cfg_req->hw_cfg_args.
+			out_map_entries[i].resource_handle;
+	}
+	buf_data.request_id =
+		(uint64_t)p_cfg_req->hw_cfg_args.priv;
+	ctx_data->ctxt_event_cb(ctx_data->context_priv, 0, &buf_data);
+
+	list_add_tail(&p_cfg_req->list, &hw_mgr->free_req_list);
+
+
+	return rc;
+}
+
+static int cam_jpeg_hw_mgr_cb(
+	uint32_t irq_status, int32_t result_size, void *data)
+{
+	int32_t rc;
+	unsigned long flags;
+	struct cam_jpeg_hw_mgr *hw_mgr = &g_jpeg_hw_mgr;
+	struct crm_workq_task *task;
+	struct cam_jpeg_process_irq_work_data_t *task_data;
+
+	spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags);
+	task = cam_req_mgr_workq_get_task(
+		g_jpeg_hw_mgr.work_process_irq_cb);
+	if (!task) {
+		CAM_ERR(CAM_JPEG, "no empty task");
+		spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+		return -ENOMEM;
+	}
+
+	task_data = (struct cam_jpeg_process_irq_work_data_t *)task->payload;
+	task_data->data = data;
+	task_data->irq_status = irq_status;
+	task_data->result_size = result_size;
+	task_data->type = CAM_JPEG_WORKQ_TASK_MSG_TYPE;
+	task->process_cb = cam_jpeg_mgr_process_irq;
+
+	rc = cam_req_mgr_workq_enqueue_task(task, &g_jpeg_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+
+	return rc;
+}
+
+static int cam_jpeg_mgr_get_free_ctx(struct cam_jpeg_hw_mgr *hw_mgr)
+{
+	int i = 0;
+	int num_ctx = CAM_JPEG_CTX_MAX;
+
+	for (i = 0; i < num_ctx; i++) {
+		mutex_lock(&hw_mgr->ctx_data[i].ctx_mutex);
+		if (hw_mgr->ctx_data[i].in_use == false) {
+			hw_mgr->ctx_data[i].in_use = true;
+			mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
+			break;
+		}
+		mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
+	}
+
+	return i;
+}
+
+
+static int cam_jpeg_mgr_release_ctx(
+	struct cam_jpeg_hw_mgr *hw_mgr, int ctx_id)
+{
+	if (ctx_id >= CAM_JPEG_CTX_MAX) {
+		CAM_ERR(CAM_JPEG, "ctx_id is wrong: %d", ctx_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+	if (!hw_mgr->ctx_data[ctx_id].in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is already in use: %d", ctx_id);
+		mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+		return -EINVAL;
+	}
+
+	hw_mgr->ctx_data[ctx_id].in_use = 0;
+	mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+
+	return 0;
+}
+
+static int cam_jpeg_mgr_process_cmd(void *priv, void *data)
+{
+	int rc;
+	int i = 0;
+	struct cam_jpeg_hw_mgr *hw_mgr = priv;
+	struct cam_hw_update_entry *cmd;
+	struct cam_cdm_bl_request *cdm_cmd;
+	struct cam_hw_config_args *config_args = NULL;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+	uint64_t request_id = 0;
+	struct cam_jpeg_process_frame_work_data_t *task_data =
+		(struct cam_jpeg_process_frame_work_data_t *)data;
+	uint32_t dev_type;
+	struct cam_jpeg_set_irq_cb irq_cb;
+	struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+	uint32_t size = 0;
+	uint32_t mem_cam_base = 0;
+	struct cam_hw_done_event_data buf_data;
+
+	CAM_DBG(CAM_JPEG, "in cam_jpeg_mgr_process_cmd");
+	if (!hw_mgr || !task_data) {
+		CAM_ERR(CAM_JPEG, "Invalid arguments %pK %pK",
+			hw_mgr, task_data);
+		return -EINVAL;
+	}
+
+	if (list_empty(&hw_mgr->hw_config_req_list)) {
+		CAM_DBG(CAM_JPEG, "no available request");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	p_cfg_req = list_first_entry(&hw_mgr->hw_config_req_list,
+		struct cam_jpeg_hw_cfg_req, list);
+	if (!p_cfg_req) {
+		CAM_ERR(CAM_JPEG, "no request");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	if (false == hw_mgr->device_in_use[p_cfg_req->dev_type][0]) {
+		hw_mgr->device_in_use[p_cfg_req->dev_type][0] = true;
+		hw_mgr->dev_hw_cfg_args[p_cfg_req->dev_type][0] = p_cfg_req;
+		list_del_init(&p_cfg_req->list);
+	} else {
+		CAM_ERR(CAM_JPEG, "NOT dequeing, just return");
+		rc = -EFAULT;
+		goto end;
+	}
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	config_args = (struct cam_hw_config_args *)&p_cfg_req->hw_cfg_args;
+	request_id = task_data->request_id;
+	if (request_id != (uint64_t)config_args->priv) {
+		CAM_WARN(CAM_JPEG, "not a recent req %d %d",
+			request_id, (uint64_t)config_args->priv);
+	}
+
+	if (!config_args->num_hw_update_entries) {
+		CAM_ERR(CAM_JPEG, "No hw update enteries are available");
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	ctx_data = (struct cam_jpeg_hw_ctx_data *)config_args->ctxt_to_hw_map;
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is not in use");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+	if (dev_type != p_cfg_req->dev_type)
+		CAM_WARN(CAM_JPEG, "dev types not same something wrong");
+
+	irq_cb.jpeg_hw_mgr_cb = cam_jpeg_hw_mgr_cb;
+	irq_cb.data = (void *)ctx_data;
+	irq_cb.b_set_cb = true;
+	if (!hw_mgr->devices[dev_type][0]->hw_ops.process_cmd) {
+		CAM_ERR(CAM_JPEG, "op process_cmd null ");
+		return -EINVAL;
+	}
+	rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd(
+		hw_mgr->devices[dev_type][0]->hw_priv,
+		CAM_JPEG_ENC_CMD_SET_IRQ_CB,
+		&irq_cb, sizeof(irq_cb));
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "SET_IRQ_CB failed %d", rc);
+		return -EINVAL;
+	}
+
+	if (!hw_mgr->devices[dev_type][0]->hw_ops.reset) {
+		CAM_ERR(CAM_JPEG, "op reset null ");
+		return -EINVAL;
+	}
+	rc = hw_mgr->devices[dev_type][0]->hw_ops.reset(
+		hw_mgr->devices[dev_type][0]->hw_priv,
+		NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "jpeg hw reset failed %d", rc);
+		return -EINVAL;
+	}
+
+	mem_cam_base = (uint64_t)hw_mgr->cdm_reg_map[dev_type][0]->
+		mem_cam_base;
+	size = hw_mgr->cdm_info[dev_type][0].cdm_ops->
+		cdm_required_size_changebase();
+	hw_mgr->cdm_info[dev_type][0].cdm_ops->
+		cdm_write_changebase(ctx_data->cmd_chbase_buf_addr,
+		(uint64_t)hw_mgr->cdm_reg_map[dev_type][0]->mem_cam_base);
+	ctx_data->cdm_cmd_chbase->cmd_arrary_count = 1;
+	ctx_data->cdm_cmd_chbase->type = CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA;
+	ctx_data->cdm_cmd_chbase->flag = false;
+	ctx_data->cdm_cmd_chbase->userdata = NULL;
+	ctx_data->cdm_cmd_chbase->cookie = 0;
+	ctx_data->cdm_cmd_chbase->cmd[0].bl_addr.kernel_iova =
+		ctx_data->cmd_chbase_buf_addr;
+	ctx_data->cdm_cmd_chbase->cmd[0].offset = 0;
+	ctx_data->cdm_cmd_chbase->cmd[0].len = size;
+	rc = cam_cdm_submit_bls(hw_mgr->cdm_info[dev_type][0].cdm_handle,
+		ctx_data->cdm_cmd_chbase);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "failed cdm cmd %d", rc);
+
+	CAM_DBG(CAM_JPEG, "cfg e %pK num %d",
+		config_args->hw_update_entries,
+		config_args->num_hw_update_entries);
+
+	if (config_args->num_hw_update_entries > 0) {
+		cdm_cmd = ctx_data->cdm_cmd;
+		cdm_cmd->cmd_arrary_count =
+			config_args->num_hw_update_entries - 1;
+		cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
+		cdm_cmd->flag = false;
+		cdm_cmd->userdata = NULL;
+		cdm_cmd->cookie = 0;
+
+		for (i = 0; i <= cdm_cmd->cmd_arrary_count; i++) {
+			cmd = (config_args->hw_update_entries + i);
+			cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle;
+			cdm_cmd->cmd[i].offset = cmd->offset;
+			cdm_cmd->cmd[i].len = cmd->len;
+		}
+
+		rc = cam_cdm_submit_bls(
+			hw_mgr->cdm_info[dev_type][0].cdm_handle,
+			cdm_cmd);
+		if (rc) {
+			CAM_ERR(CAM_JPEG, "Failed to apply the configs %d",
+				rc);
+			goto end_callcb;
+		}
+
+		if (!hw_mgr->devices[dev_type][0]->hw_ops.start) {
+			CAM_ERR(CAM_JPEG, "op start null ");
+			rc = -EINVAL;
+			goto end_callcb;
+		}
+		rc = hw_mgr->devices[dev_type][0]->hw_ops.start(
+			hw_mgr->devices[dev_type][0]->hw_priv,
+			NULL, 0);
+		if (rc) {
+			CAM_ERR(CAM_JPEG, "Failed to apply the configs %d",
+				rc);
+			goto end_callcb;
+		}
+	} else {
+		CAM_ERR(CAM_JPEG, "No commands to config");
+	}
+
+	return rc;
+
+end_callcb:
+	if (p_cfg_req) {
+		buf_data.num_handles = p_cfg_req->
+			hw_cfg_args.num_out_map_entries;
+		for (i = 0; i < buf_data.num_handles; i++) {
+			buf_data.resource_handle[i] =
+				p_cfg_req->hw_cfg_args.
+				out_map_entries[i].resource_handle;
+		}
+		buf_data.request_id =
+			(uint64_t)p_cfg_req->hw_cfg_args.priv;
+		ctx_data->ctxt_event_cb(ctx_data->context_priv, 0, &buf_data);
+	}
+end:
+
+	return rc;
+}
+
+static int cam_jpeg_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
+{
+	int rc;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_config_args *config_args = config_hw_args;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+	uint64_t request_id = 0;
+	struct cam_hw_update_entry *hw_update_entries;
+	struct crm_workq_task *task;
+	struct cam_jpeg_process_frame_work_data_t *task_data;
+	struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+
+	if (!hw_mgr || !config_args) {
+		CAM_ERR(CAM_JPEG, "Invalid arguments %pK %pK",
+			hw_mgr, config_args);
+		return -EINVAL;
+	}
+
+	if (!config_args->num_hw_update_entries) {
+		CAM_ERR(CAM_JPEG, "No hw update enteries are available");
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	ctx_data = (struct cam_jpeg_hw_ctx_data *)config_args->ctxt_to_hw_map;
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is not in use");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	if (list_empty(&hw_mgr->free_req_list)) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		CAM_ERR(CAM_JPEG, "list empty");
+		return -ENOMEM;
+	}
+
+	p_cfg_req = list_first_entry(&hw_mgr->free_req_list,
+		struct cam_jpeg_hw_cfg_req, list);
+	list_del_init(&p_cfg_req->list);
+
+	/* Update Currently Processing Config Request */
+	p_cfg_req->hw_cfg_args = *config_args;
+	p_cfg_req->dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+	request_id = (uint64_t)config_args->priv;
+	hw_update_entries = config_args->hw_update_entries;
+	CAM_DBG(CAM_JPEG, "ctx_data = %pK req_id = %d %pK",
+		ctx_data, request_id, config_args->priv);
+	task = cam_req_mgr_workq_get_task(g_jpeg_hw_mgr.work_process_frame);
+	if (!task) {
+		CAM_ERR(CAM_JPEG, "no empty task");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		rc = -ENOMEM;
+		goto err_after_dq_free_list;
+	}
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	task_data = (struct cam_jpeg_process_frame_work_data_t *)
+		task->payload;
+	if (!task_data) {
+		CAM_ERR(CAM_JPEG, "task_data is NULL");
+		rc = -EINVAL;
+		goto err_after_dq_free_list;
+	}
+	CAM_DBG(CAM_JPEG, "cfge %pK num %d",
+		p_cfg_req->hw_cfg_args.hw_update_entries,
+		p_cfg_req->hw_cfg_args.num_hw_update_entries);
+
+	list_add_tail(&p_cfg_req->list, &hw_mgr->hw_config_req_list);
+
+	task_data->data = (void *)(int64_t)p_cfg_req->dev_type;
+	task_data->request_id = request_id;
+	task_data->type = CAM_JPEG_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_jpeg_mgr_process_cmd;
+
+	rc = cam_req_mgr_workq_enqueue_task(task, &g_jpeg_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "failed to enqueue task %d", rc);
+		goto err_after_get_task;
+	}
+
+	return rc;
+
+err_after_get_task:
+	list_del_init(&p_cfg_req->list);
+err_after_dq_free_list:
+	list_add_tail(&p_cfg_req->list, &hw_mgr->free_req_list);
+
+	return rc;
+}
+
+
+static int cam_jpeg_mgr_prepare_hw_update(void *hw_mgr_priv,
+	void *prepare_hw_update_args)
+{
+	int rc, i, j, k;
+	struct cam_hw_prepare_update_args *prepare_args =
+		prepare_hw_update_args;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+	struct cam_packet *packet = NULL;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+
+	if (!prepare_args || !hw_mgr) {
+		CAM_ERR(CAM_JPEG, "Invalid args %pK %pK",
+			prepare_args, hw_mgr);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	ctx_data = (struct cam_jpeg_hw_ctx_data *)prepare_args->ctxt_to_hw_map;
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is not in use");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	packet = prepare_args->packet;
+	if (!packet) {
+		CAM_ERR(CAM_JPEG, "received packet is NULL");
+		return -EINVAL;
+	}
+
+	if (((packet->header.op_code & 0xff) != CAM_JPEG_OPCODE_ENC_UPDATE) &&
+		((packet->header.op_code
+		& 0xff) != CAM_JPEG_OPCODE_DMA_UPDATE)) {
+		CAM_ERR(CAM_JPEG, "Invalid Opcode in pkt: %d",
+			packet->header.op_code & 0xff);
+		return -EINVAL;
+	}
+	if ((packet->num_cmd_buf > 2) || !packet->num_patches ||
+		!packet->num_io_configs) {
+		CAM_ERR(CAM_JPEG, "wrong number of cmd/patch info: %u %u",
+			packet->num_cmd_buf,
+			packet->num_patches);
+		return -EINVAL;
+	}
+
+	cmd_desc = (struct cam_cmd_buf_desc *)
+		((uint32_t *)&packet->payload +
+		(packet->cmd_buf_offset / 4));
+	CAM_DBG(CAM_JPEG, "packet = %pK cmd_desc = %pK size = %lu",
+		(void *)packet, (void *)cmd_desc,
+		sizeof(struct cam_cmd_buf_desc));
+
+	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "Patch processing failed %d", rc);
+		return rc;
+	}
+
+	io_cfg_ptr = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+		packet->io_configs_offset / 4);
+	CAM_DBG(CAM_JPEG, "packet = %pK io_cfg_ptr = %pK size = %lu",
+		(void *)packet, (void *)io_cfg_ptr,
+		sizeof(struct cam_buf_io_cfg));
+
+	prepare_args->num_out_map_entries = 0;
+
+	for (i = 0, j = 0, k = 0; i < packet->num_io_configs; i++) {
+		if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
+			prepare_args->in_map_entries[j].resource_handle =
+				io_cfg_ptr[i].resource_type;
+			prepare_args->in_map_entries[j++].sync_id =
+				io_cfg_ptr[i].fence;
+			prepare_args->num_in_map_entries++;
+		} else {
+			prepare_args->in_map_entries[k].resource_handle =
+				io_cfg_ptr[i].resource_type;
+			prepare_args->out_map_entries[k++].sync_id =
+				io_cfg_ptr[i].fence;
+			prepare_args->num_out_map_entries++;
+		}
+		CAM_DBG(CAM_JPEG, "dir[%d]: %u, fence: %u",
+			i, io_cfg_ptr[i].direction, io_cfg_ptr[i].fence);
+	}
+
+	for (i = 0; i < packet->num_cmd_buf;  i++) {
+		prepare_args->hw_update_entries[i].len =
+			(uint32_t)cmd_desc[i].length;
+		prepare_args->hw_update_entries[i].handle =
+			(uint32_t)cmd_desc[i].mem_handle;
+		prepare_args->hw_update_entries[i].offset =
+			(uint32_t)cmd_desc[i].offset;
+		prepare_args->num_hw_update_entries++;
+	}
+
+	prepare_args->priv = (void *)packet->header.request_id;
+
+	CAM_DBG(CAM_JPEG, "will wait on input sync sync_id %d",
+		prepare_args->in_map_entries[0].sync_id);
+
+	return rc;
+}
+
+static int cam_jpeg_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
+{
+	int rc;
+	int ctx_id = 0;
+	struct cam_hw_release_args *release_hw = release_hw_args;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+	uint32_t dev_type;
+
+	if (!release_hw || !hw_mgr) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return -EINVAL;
+	}
+
+	ctx_data = (struct cam_jpeg_hw_ctx_data *)release_hw->ctxt_to_hw_map;
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is not in use");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+	dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	hw_mgr->cdm_info[dev_type][0].ref_cnt--;
+	if (!(hw_mgr->cdm_info[dev_type][0].ref_cnt)) {
+		if (cam_cdm_stream_off(
+			hw_mgr->cdm_info[dev_type][0].cdm_handle)) {
+			CAM_ERR(CAM_JPEG, "CDM stream off failed %d",
+				hw_mgr->cdm_info[dev_type][0].cdm_handle);
+		}
+		/* release cdm handle */
+		cam_cdm_release(hw_mgr->cdm_info[dev_type][0].cdm_handle);
+	}
+
+	if (g_jpeg_hw_mgr.devices[dev_type][0]->hw_ops.deinit) {
+		rc = g_jpeg_hw_mgr.devices[dev_type][0]->hw_ops.deinit(
+			g_jpeg_hw_mgr.devices[dev_type][0]->hw_priv, NULL, 0);
+		if (rc)
+			CAM_ERR(CAM_JPEG, "Failed to Init %d HW", dev_type);
+	}
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	rc = cam_jpeg_mgr_release_ctx(hw_mgr, ctx_id);
+	if (rc) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_JPEG, "handle %llu", ctx_data);
+
+	return rc;
+}
+
+static int cam_jpeg_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
+{
+	int rc;
+	int32_t ctx_id = 0;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+	struct cam_hw_acquire_args *args = acquire_hw_args;
+	struct cam_jpeg_acquire_dev_info jpeg_dev_acquire_info;
+	struct cam_cdm_acquire_data cdm_acquire;
+	uint32_t dev_type;
+	uint32_t size = 0;
+
+	if ((!hw_mgr_priv) || (!acquire_hw_args)) {
+		CAM_ERR(CAM_JPEG, "Invalid params: %pK %pK", hw_mgr_priv,
+			acquire_hw_args);
+		return -EINVAL;
+	}
+
+	if (args->num_acq > 1) {
+		CAM_ERR(CAM_JPEG,
+			"number of resources are wrong: %u",
+			args->num_acq);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&jpeg_dev_acquire_info,
+			(void __user *)args->acquire_info,
+			sizeof(jpeg_dev_acquire_info))) {
+		CAM_ERR(CAM_JPEG, "copy failed");
+		return -EFAULT;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	ctx_id = cam_jpeg_mgr_get_free_ctx(hw_mgr);
+	if (ctx_id >= CAM_JPEG_CTX_MAX) {
+		CAM_ERR(CAM_JPEG, "No free ctx space in hw_mgr");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EFAULT;
+	}
+
+	ctx_data = &hw_mgr->ctx_data[ctx_id];
+
+	ctx_data->cdm_cmd =
+		kzalloc(((sizeof(struct cam_cdm_bl_request)) +
+			((CAM_JPEG_HW_ENTRIES_MAX - 1) *
+			sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+	if (!ctx_data->cdm_cmd) {
+		rc = -ENOMEM;
+		goto acq_cdm_hdl_failed;
+	}
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	ctx_data->jpeg_dev_acquire_info = jpeg_dev_acquire_info;
+	mutex_unlock(&ctx_data->ctx_mutex);
+
+	dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+	if (!hw_mgr->cdm_info[dev_type][0].ref_cnt) {
+
+		if (dev_type == CAM_JPEG_RES_TYPE_ENC) {
+			memcpy(cdm_acquire.identifier,
+				"jpegenc", sizeof("jpegenc"));
+		} else {
+			memcpy(cdm_acquire.identifier,
+				"jpegdma", sizeof("jpegdma"));
+		}
+		cdm_acquire.cell_index = 0;
+		cdm_acquire.handle = 0;
+		cdm_acquire.userdata = ctx_data;
+		if (hw_mgr->cdm_reg_map[dev_type][0]) {
+			cdm_acquire.base_array[0] =
+				hw_mgr->cdm_reg_map[dev_type][0];
+		}
+		cdm_acquire.base_array_cnt = 1;
+		cdm_acquire.id = CAM_CDM_VIRTUAL;
+		cdm_acquire.cam_cdm_callback = NULL;
+
+		rc = cam_cdm_acquire(&cdm_acquire);
+		if (rc) {
+			CAM_ERR(CAM_JPEG, "Failed to acquire the CDM HW %d",
+				rc);
+			rc = -EFAULT;
+			goto acq_cdm_hdl_failed;
+		}
+		hw_mgr->cdm_info[dev_type][0].cdm_handle = cdm_acquire.handle;
+		hw_mgr->cdm_info[dev_type][0].cdm_ops = cdm_acquire.ops;
+		hw_mgr->cdm_info[dev_type][0].ref_cnt++;
+	} else {
+		hw_mgr->cdm_info[dev_type][0].ref_cnt++;
+	}
+
+	ctx_data->cdm_cmd_chbase =
+		kzalloc(((sizeof(struct cam_cdm_bl_request)) +
+			(2 * sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+	if (!ctx_data->cdm_cmd_chbase) {
+		rc = -ENOMEM;
+		goto start_cdm_hdl_failed;
+	}
+	size = hw_mgr->cdm_info[dev_type][0].
+		cdm_ops->cdm_required_size_changebase();
+	ctx_data->cmd_chbase_buf_addr = kzalloc(size*4, GFP_KERNEL);
+	if (!ctx_data->cdm_cmd_chbase) {
+		rc = -ENOMEM;
+		goto start_cdm_hdl_failed;
+	}
+
+	if (!g_jpeg_hw_mgr.devices[dev_type][0]->hw_ops.init) {
+		CAM_ERR(CAM_JPEG, "hw op init null ");
+		rc = -EINVAL;
+		goto start_cdm_hdl_failed;
+	}
+	rc = g_jpeg_hw_mgr.devices[dev_type][0]->hw_ops.init(
+		g_jpeg_hw_mgr.devices[dev_type][0]->hw_priv,
+		ctx_data,
+		sizeof(ctx_data));
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "Failed to Init %d HW", dev_type);
+		goto start_cdm_hdl_failed;
+	}
+
+	if (hw_mgr->cdm_info[dev_type][0].ref_cnt == 1)
+		if (cam_cdm_stream_on(
+			hw_mgr->cdm_info[dev_type][0].cdm_handle)) {
+			CAM_ERR(CAM_JPEG, "Can not start cdm (%d)!",
+				hw_mgr->cdm_info[dev_type][0].cdm_handle);
+			rc = -EFAULT;
+			goto start_cdm_hdl_failed;
+		}
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	ctx_data->context_priv = args->context_data;
+
+	args->ctxt_to_hw_map = (void *)&(hw_mgr->ctx_data[ctx_id]);
+
+	mutex_unlock(&ctx_data->ctx_mutex);
+
+	hw_mgr->ctx_data[ctx_id].ctxt_event_cb = args->event_cb;
+
+
+	if (copy_to_user((void __user *)args->acquire_info,
+		&jpeg_dev_acquire_info,
+		sizeof(jpeg_dev_acquire_info))) {
+		rc = -EFAULT;
+		goto copy_to_user_failed;
+	}
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	CAM_DBG(CAM_JPEG, "success ctx_data= %pK", ctx_data);
+
+	return rc;
+
+copy_to_user_failed:
+	cam_cdm_stream_off(hw_mgr->cdm_info[dev_type][0].cdm_handle);
+start_cdm_hdl_failed:
+	cam_cdm_release(hw_mgr->cdm_info[dev_type][0].cdm_handle);
+acq_cdm_hdl_failed:
+	kfree(ctx_data->cdm_cmd);
+	cam_jpeg_mgr_release_ctx(hw_mgr, ctx_id);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return rc;
+}
+
+static int cam_jpeg_mgr_get_hw_caps(void *hw_mgr_priv, void *hw_caps_args)
+{
+	int rc;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_query_cap_cmd *query_cap = hw_caps_args;
+
+	if (!hw_mgr_priv || !hw_caps_args) {
+		CAM_ERR(CAM_JPEG, "Invalid params: %pK %pK",
+			hw_mgr_priv, hw_caps_args);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	if (copy_to_user((void __user *)query_cap->caps_handle,
+		&g_jpeg_hw_mgr.jpeg_caps,
+		sizeof(struct cam_jpeg_query_cap_cmd))) {
+		CAM_ERR(CAM_JPEG, "copy_to_user failed");
+		rc = -EFAULT;
+		goto copy_error;
+	}
+	CAM_DBG(CAM_JPEG, "cam_jpeg_mgr_get_hw_caps success");
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return 0;
+
+copy_error:
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static int cam_jpeg_setup_workqs(void)
+{
+	int rc, i;
+
+	rc = cam_req_mgr_workq_create(
+		"jpeg_command_queue",
+		CAM_JPEG_WORKQ_NUM_TASK,
+		&g_jpeg_hw_mgr.work_process_frame,
+		CRM_WORKQ_USAGE_NON_IRQ);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "unable to create a worker %d", rc);
+		goto work_process_frame_failed;
+	}
+
+	rc = cam_req_mgr_workq_create(
+		"jpeg_message_queue",
+		CAM_JPEG_WORKQ_NUM_TASK,
+		&g_jpeg_hw_mgr.work_process_irq_cb,
+		CRM_WORKQ_USAGE_IRQ);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "unable to create a worker %d", rc);
+		goto work_process_irq_cb_failed;
+	}
+
+	g_jpeg_hw_mgr.process_frame_work_data =
+		(struct cam_jpeg_process_frame_work_data_t *)
+		kzalloc(sizeof(struct cam_jpeg_process_frame_work_data_t) *
+			CAM_JPEG_WORKQ_NUM_TASK, GFP_KERNEL);
+	if (!g_jpeg_hw_mgr.process_frame_work_data) {
+		rc = -ENOMEM;
+		goto work_process_frame_data_failed;
+	}
+
+	g_jpeg_hw_mgr.process_irq_cb_work_data =
+		(struct cam_jpeg_process_irq_work_data_t *)
+		kzalloc(sizeof(struct cam_jpeg_process_irq_work_data_t) *
+			CAM_JPEG_WORKQ_NUM_TASK, GFP_KERNEL);
+	if (!g_jpeg_hw_mgr.process_irq_cb_work_data) {
+		rc = -ENOMEM;
+		goto work_process_irq_cb_data_failed;
+	}
+
+	for (i = 0; i < CAM_JPEG_WORKQ_NUM_TASK; i++)
+		g_jpeg_hw_mgr.work_process_irq_cb->task.pool[i].payload =
+			&g_jpeg_hw_mgr.process_irq_cb_work_data[i];
+
+	for (i = 0; i < CAM_JPEG_WORKQ_NUM_TASK; i++)
+		g_jpeg_hw_mgr.work_process_frame->task.pool[i].payload =
+			&g_jpeg_hw_mgr.process_frame_work_data[i];
+
+	INIT_LIST_HEAD(&g_jpeg_hw_mgr.hw_config_req_list);
+	INIT_LIST_HEAD(&g_jpeg_hw_mgr.free_req_list);
+	for (i = 0; i < CAM_JPEG_HW_CFG_Q_MAX; i++) {
+		INIT_LIST_HEAD(&(g_jpeg_hw_mgr.req_list[i].list));
+		list_add_tail(&(g_jpeg_hw_mgr.req_list[i].list),
+			&(g_jpeg_hw_mgr.free_req_list));
+	}
+
+	return rc;
+
+work_process_irq_cb_data_failed:
+	kfree(g_jpeg_hw_mgr.process_frame_work_data);
+work_process_frame_data_failed:
+	cam_req_mgr_workq_destroy(&g_jpeg_hw_mgr.work_process_irq_cb);
+work_process_irq_cb_failed:
+	cam_req_mgr_workq_destroy(&g_jpeg_hw_mgr.work_process_frame);
+work_process_frame_failed:
+
+	return rc;
+}
+
+static int cam_jpeg_init_devices(struct device_node *of_node,
+	uint32_t *p_num_enc_dev,
+	uint32_t *p_num_dma_dev)
+{
+	int count, i, rc;
+	uint32_t num_dev;
+	uint32_t num_dma_dev;
+	const char *name = NULL;
+	struct device_node *child_node = NULL;
+	struct platform_device *child_pdev = NULL;
+	struct cam_hw_intf *child_dev_intf = NULL;
+	struct cam_hw_info *enc_hw = NULL;
+	struct cam_hw_info *dma_hw = NULL;
+	struct cam_hw_soc_info *enc_soc_info = NULL;
+	struct cam_hw_soc_info *dma_soc_info = NULL;
+
+	if (!p_num_enc_dev || !p_num_dma_dev) {
+		rc = -EINVAL;
+		goto num_dev_failed;
+	}
+	count = of_property_count_strings(of_node, "compat-hw-name");
+	if (!count) {
+		CAM_ERR(CAM_JPEG,
+			"no compat hw found in dev tree, count = %d",
+			count);
+		rc = -EINVAL;
+		goto num_dev_failed;
+	}
+
+	rc = of_property_read_u32(of_node, "num-jpeg-enc", &num_dev);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "read num enc devices failed %d", rc);
+		goto num_enc_failed;
+	}
+	g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC] = kzalloc(
+		sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL);
+	if (!g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC]) {
+		rc = -ENOMEM;
+		CAM_ERR(CAM_JPEG, "getting number of dma dev nodes failed");
+		goto num_enc_failed;
+	}
+
+	rc = of_property_read_u32(of_node, "num-jpeg-dma", &num_dma_dev);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "get num dma dev nodes failed %d", rc);
+		goto num_dma_failed;
+	}
+
+	g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA] = kzalloc(
+		sizeof(struct cam_hw_intf *) * num_dma_dev, GFP_KERNEL);
+	if (!g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA]) {
+		rc = -ENOMEM;
+		goto num_dma_failed;
+	}
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node, "compat-hw-name",
+			i, &name);
+		if (rc) {
+			CAM_ERR(CAM_JPEG, "getting dev object name failed");
+			goto compat_hw_name_failed;
+		}
+
+		child_node = of_find_node_by_name(NULL, name);
+		if (!child_node) {
+			CAM_ERR(CAM_JPEG,
+				"error! Cannot find node in dtsi %s", name);
+			rc = -ENODEV;
+			goto compat_hw_name_failed;
+		}
+
+		child_pdev = of_find_device_by_node(child_node);
+		if (!child_pdev) {
+			CAM_ERR(CAM_JPEG, "failed to find device on bus %s",
+				child_node->name);
+			rc = -ENODEV;
+			of_node_put(child_node);
+			goto compat_hw_name_failed;
+		}
+
+		child_dev_intf = (struct cam_hw_intf *)platform_get_drvdata(
+			child_pdev);
+		if (!child_dev_intf) {
+			CAM_ERR(CAM_JPEG, "no child device");
+			of_node_put(child_node);
+			rc = -ENODEV;
+			goto compat_hw_name_failed;
+		}
+		CAM_DBG(CAM_JPEG, "child_intf %pK type %d id %d",
+			child_dev_intf,
+			child_dev_intf->hw_type,
+			child_dev_intf->hw_idx);
+
+		if ((child_dev_intf->hw_type == CAM_JPEG_DEV_ENC &&
+			child_dev_intf->hw_idx >= num_dev) ||
+			(child_dev_intf->hw_type == CAM_JPEG_DEV_DMA &&
+			child_dev_intf->hw_idx >= num_dma_dev)) {
+			CAM_ERR(CAM_JPEG, "index out of range");
+			rc = -ENODEV;
+			goto compat_hw_name_failed;
+		}
+		g_jpeg_hw_mgr.devices[child_dev_intf->hw_type]
+			[child_dev_intf->hw_idx] = child_dev_intf;
+
+		of_node_put(child_node);
+	}
+
+	enc_hw = (struct cam_hw_info *)
+		g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC][0]->hw_priv;
+	enc_soc_info = &enc_hw->soc_info;
+	g_jpeg_hw_mgr.cdm_reg_map[CAM_JPEG_DEV_ENC][0] =
+		&enc_soc_info->reg_map[0];
+	dma_hw = (struct cam_hw_info *)
+		g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA][0]->hw_priv;
+	dma_soc_info = &dma_hw->soc_info;
+	g_jpeg_hw_mgr.cdm_reg_map[CAM_JPEG_DEV_DMA][0] =
+		&dma_soc_info->reg_map[0];
+
+	*p_num_enc_dev = num_dev;
+	*p_num_dma_dev = num_dma_dev;
+
+	return rc;
+
+compat_hw_name_failed:
+	kfree(g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA]);
+num_dma_failed:
+	kfree(g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC]);
+num_enc_failed:
+num_dev_failed:
+
+	return rc;
+}
+
+int cam_jpeg_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
+{
+	int i, rc;
+	uint32_t num_dev;
+	uint32_t num_dma_dev;
+	struct cam_hw_mgr_intf *hw_mgr_intf;
+	struct cam_iommu_handle cdm_handles;
+
+	hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr_hdl;
+	if (!of_node || !hw_mgr_intf) {
+		CAM_ERR(CAM_JPEG, "Invalid args of_node %pK hw_mgr %pK",
+			of_node, hw_mgr_intf);
+		return -EINVAL;
+	}
+
+	memset(hw_mgr_hdl, 0x0, sizeof(struct cam_hw_mgr_intf));
+	hw_mgr_intf->hw_mgr_priv = &g_jpeg_hw_mgr;
+	hw_mgr_intf->hw_get_caps = cam_jpeg_mgr_get_hw_caps;
+	hw_mgr_intf->hw_acquire = cam_jpeg_mgr_acquire_hw;
+	hw_mgr_intf->hw_release = cam_jpeg_mgr_release_hw;
+	hw_mgr_intf->hw_prepare_update = cam_jpeg_mgr_prepare_hw_update;
+	hw_mgr_intf->hw_config = cam_jpeg_mgr_config_hw;
+
+	mutex_init(&g_jpeg_hw_mgr.hw_mgr_mutex);
+	spin_lock_init(&g_jpeg_hw_mgr.hw_mgr_lock);
+
+	for (i = 0; i < CAM_JPEG_CTX_MAX; i++)
+		mutex_init(&g_jpeg_hw_mgr.ctx_data[i].ctx_mutex);
+
+	rc = cam_jpeg_init_devices(of_node, &num_dev, &num_dma_dev);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "jpeg init devices %d", rc);
+		goto smmu_get_failed;
+	}
+
+	rc = cam_smmu_get_handle("jpeg", &g_jpeg_hw_mgr.iommu_hdl);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "jpeg get iommu handle failed %d", rc);
+		goto smmu_get_failed;
+	}
+
+	CAM_DBG(CAM_JPEG, "mmu handle :%d", g_jpeg_hw_mgr.iommu_hdl);
+	rc = cam_smmu_ops(g_jpeg_hw_mgr.iommu_hdl, CAM_SMMU_ATTACH);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "jpeg attach failed: %d", rc);
+		goto jpeg_attach_failed;
+	}
+
+	rc = cam_cdm_get_iommu_handle("jpegenc", &cdm_handles);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "acquire cdm iommu handle Fail  %d", rc);
+		g_jpeg_hw_mgr.cdm_iommu_hdl = -1;
+		g_jpeg_hw_mgr.cdm_iommu_hdl_secure = -1;
+		goto cdm_iommu_failed;
+	}
+	g_jpeg_hw_mgr.cdm_iommu_hdl = cdm_handles.non_secure;
+	g_jpeg_hw_mgr.cdm_iommu_hdl_secure = cdm_handles.secure;
+
+	g_jpeg_hw_mgr.jpeg_caps.dev_iommu_handle.non_secure =
+		g_jpeg_hw_mgr.iommu_hdl;
+	g_jpeg_hw_mgr.jpeg_caps.dev_iommu_handle.secure =
+		g_jpeg_hw_mgr.iommu_sec_hdl;
+	g_jpeg_hw_mgr.jpeg_caps.cdm_iommu_handle.non_secure =
+		g_jpeg_hw_mgr.cdm_iommu_hdl;
+	g_jpeg_hw_mgr.jpeg_caps.cdm_iommu_handle.secure =
+		g_jpeg_hw_mgr.cdm_iommu_hdl_secure;
+	g_jpeg_hw_mgr.jpeg_caps.num_enc = num_dev;
+	g_jpeg_hw_mgr.jpeg_caps.num_dma = num_dma_dev;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.major = 4;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.minor = 2;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.incr  = 0;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.reserved = 0;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.major = 4;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.minor = 2;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.incr  = 0;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.reserved = 0;
+
+	rc = cam_jpeg_setup_workqs();
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "setup work qs failed  %d", rc);
+		goto cdm_iommu_failed;
+	}
+
+	return rc;
+
+cdm_iommu_failed:
+	cam_smmu_ops(g_jpeg_hw_mgr.iommu_hdl, CAM_SMMU_DETACH);
+	cam_smmu_destroy_handle(g_jpeg_hw_mgr.iommu_hdl);
+jpeg_attach_failed:
+	g_jpeg_hw_mgr.iommu_hdl = 0;
+smmu_get_failed:
+	mutex_destroy(&g_jpeg_hw_mgr.hw_mgr_mutex);
+	for (i = 0; i < CAM_JPEG_CTX_MAX; i++)
+		mutex_destroy(&g_jpeg_hw_mgr.ctx_data[i].ctx_mutex);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
new file mode 100644
index 0000000..9e3418d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
@@ -0,0 +1,164 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_HW_MGR_H
+#define CAM_JPEG_HW_MGR_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <media/cam_jpeg.h>
+
+#include "cam_jpeg_hw_intf.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_hw_intf.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+
+#define CAM_JPEG_WORKQ_NUM_TASK      30
+#define CAM_JPEG_WORKQ_TASK_CMD_TYPE 1
+#define CAM_JPEG_WORKQ_TASK_MSG_TYPE 2
+#define CAM_JPEG_HW_CFG_Q_MAX        50
+
+/**
+ * struct cam_jpeg_process_frame_work_data_t
+ *
+ * @type: Task type
+ * @data: Pointer to command data
+ * @request_id: Request id
+ */
+struct cam_jpeg_process_frame_work_data_t {
+	uint32_t type;
+	void *data;
+	uint64_t request_id;
+};
+
+/**
+ * struct cam_jpeg_process_irq_work_data_t
+ *
+ * @type: Task type
+ * @data: Pointer to message data
+ * @result_size: Result size of enc/dma
+ * @irq_status: IRQ status
+ */
+struct cam_jpeg_process_irq_work_data_t {
+	uint32_t type;
+	void *data;
+	int32_t result_size;
+	uint32_t irq_status;
+};
+
+/**
+ * struct cam_jpeg_hw_cdm_info_t
+ *
+ * @ref_cnt: Ref count of how many times device type is acquired
+ * @cdm_handle: Cdm handle
+ * @cdm_ops: Cdm ops struct
+ */
+struct cam_jpeg_hw_cdm_info_t {
+	int ref_cnt;
+	uint32_t cdm_handle;
+	struct cam_cdm_utils_ops *cdm_ops;
+};
+
+/**
+ * struct cam_jpeg_hw_cfg_req_t
+ *
+ * @list_head: List head
+ * @hw_cfg_args: Hw config args
+ * @dev_type: Dev type for cfg request
+ */
+struct cam_jpeg_hw_cfg_req {
+	struct list_head list;
+	struct cam_hw_config_args hw_cfg_args;
+	uint32_t dev_type;
+};
+
+/**
+ * struct cam_jpeg_hw_ctx_data
+ *
+ * @context_priv: Context private data, cam_context from
+ *     acquire.
+ * @ctx_mutex: Mutex for context
+ * @jpeg_dev_acquire_info: Acquire device info
+ * @ctxt_event_cb: Context callback function
+ * @in_use: Flag for context usage
+ * @wait_complete: Completion info
+ * @cdm_cmd: Cdm cmd submitted for that context.
+ * @cdm_cmd_chbase: Change base cdm command from context
+ * @cmd_chbase_buf_addr : Change base cmd buf address
+ */
+struct cam_jpeg_hw_ctx_data {
+	void *context_priv;
+	struct mutex ctx_mutex;
+	struct cam_jpeg_acquire_dev_info jpeg_dev_acquire_info;
+	cam_hw_event_cb_func ctxt_event_cb;
+	bool in_use;
+	struct completion wait_complete;
+	struct cam_cdm_bl_request *cdm_cmd;
+	struct cam_cdm_bl_request *cdm_cmd_chbase;
+	uint32_t *cmd_chbase_buf_addr;
+};
+
+/**
+ * struct cam_jpeg_hw_mgr
+ * @hw_mgr_mutex: Mutex for JPEG hardware manager
+ * @hw_mgr_lock: Spinlock for JPEG hardware manager
+ * @ctx_data: Context data
+ * @jpeg_caps: JPEG capabilities
+ * @iommu_hdl: Non secure IOMMU handle
+ * @iommu_sec_hdl: Secure IOMMU handle
+ * @work_process_frame: Work queue for hw config requests
+ * @work_process_irq_cb: Work queue for processing IRQs.
+ * @process_frame_work_data: Work data pool for hw config
+ *     requests
+ * @process_irq_cb_work_data: Work data pool for irq requests
+ * @cdm_iommu_hdl: Iommu handle received from cdm
+ * @cdm_iommu_hdl_secure: Secure iommu handle received from cdm
+ * @devices: Core hw Devices of JPEG hardware manager
+ * @cdm_info: Cdm info for each core device.
+ * @cdm_reg_map: Regmap of each device for cdm.
+ * @device_in_use: Flag device being used for an active request
+ * @dev_hw_cfg_args: Current cfg request per core dev
+ * @hw_config_req_list: Pending hw update requests list
+ * @free_req_list: Free nodes for above list
+ * @req_list: Nodes of hw update list
+ */
+struct cam_jpeg_hw_mgr {
+	struct mutex hw_mgr_mutex;
+	spinlock_t hw_mgr_lock;
+	struct cam_jpeg_hw_ctx_data ctx_data[CAM_JPEG_CTX_MAX];
+	struct cam_jpeg_query_cap_cmd jpeg_caps;
+	int32_t iommu_hdl;
+	int32_t iommu_sec_hdl;
+	struct cam_req_mgr_core_workq *work_process_frame;
+	struct cam_req_mgr_core_workq *work_process_irq_cb;
+	struct cam_jpeg_process_frame_work_data_t *process_frame_work_data;
+	struct cam_jpeg_process_irq_work_data_t *process_irq_cb_work_data;
+	int cdm_iommu_hdl;
+	int cdm_iommu_hdl_secure;
+
+	struct cam_hw_intf **devices[CAM_JPEG_DEV_TYPE_MAX];
+	struct cam_jpeg_hw_cdm_info_t cdm_info[CAM_JPEG_DEV_TYPE_MAX]
+		[CAM_JPEG_NUM_DEV_PER_RES_MAX];
+	struct cam_soc_reg_map *cdm_reg_map[CAM_JPEG_DEV_TYPE_MAX]
+		[CAM_JPEG_NUM_DEV_PER_RES_MAX];
+	uint32_t device_in_use[CAM_JPEG_DEV_TYPE_MAX]
+		[CAM_JPEG_NUM_DEV_PER_RES_MAX];
+	struct cam_jpeg_hw_cfg_req *dev_hw_cfg_args[CAM_JPEG_DEV_TYPE_MAX]
+		[CAM_JPEG_NUM_DEV_PER_RES_MAX];
+
+	struct list_head hw_config_req_list;
+	struct list_head free_req_list;
+	struct cam_jpeg_hw_cfg_req req_list[CAM_JPEG_HW_CFG_Q_MAX];
+};
+
+#endif /* CAM_JPEG_HW_MGR_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_dma_hw_intf.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_dma_hw_intf.h
new file mode 100644
index 0000000..71b21b9
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_dma_hw_intf.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_DMA_HW_INTF_H
+#define CAM_JPEG_DMA_HW_INTF_H
+
+#include <uapi/media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "cam_hw_mgr_intf.h"
+#include "cam_jpeg_hw_intf.h"
+
+enum cam_jpeg_dma_cmd_type {
+	CAM_JPEG_DMA_CMD_CDM_CFG,
+	CAM_JPEG_DMA_CMD_SET_IRQ_CB,
+	CAM_JPEG_DMA_CMD_MAX,
+};
+
+#endif /* CAM_JPEG_DMA_HW_INTF_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_enc_hw_intf.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_enc_hw_intf.h
new file mode 100644
index 0000000..f0b4e00
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_enc_hw_intf.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_ENC_HW_INTF_H
+#define CAM_JPEG_ENC_HW_INTF_H
+
+#include <uapi/media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "cam_hw_mgr_intf.h"
+#include "cam_jpeg_hw_intf.h"
+
+enum cam_jpeg_enc_cmd_type {
+	CAM_JPEG_ENC_CMD_CDM_CFG,
+	CAM_JPEG_ENC_CMD_SET_IRQ_CB,
+	CAM_JPEG_ENC_CMD_MAX,
+};
+
+#endif /* CAM_JPEG_ENC_HW_INTF_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h
new file mode 100644
index 0000000..3204388
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_HW_INTF_H
+#define CAM_JPEG_HW_INTF_H
+
+#define CAM_JPEG_CTX_MAX              8
+#define CAM_JPEG_DEV_PER_TYPE_MAX     1
+
+#define CAM_JPEG_CMD_BUF_MAX_SIZE     128
+#define CAM_JPEG_MSG_BUF_MAX_SIZE     CAM_JPEG_CMD_BUF_MAX_SIZE
+
+enum cam_jpeg_hw_type {
+	CAM_JPEG_DEV_ENC,
+	CAM_JPEG_DEV_DMA,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h
new file mode 100644
index 0000000..d5c8c9d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_HW_MGR_INTF_H
+#define CAM_JPEG_HW_MGR_INTF_H
+
+#include <uapi/media/cam_jpeg.h>
+#include <uapi/media/cam_defs.h>
+#include <linux/of.h>
+
+#include "cam_cpas_api.h"
+
+#define JPEG_TURBO_VOTE           640000000
+
+int cam_jpeg_hw_mgr_init(struct device_node *of_node,
+	uint64_t *hw_mgr_hdl);
+
+/**
+ * struct cam_jpeg_cpas_vote
+ * @ahb_vote: AHB vote info
+ * @axi_vote: AXI vote info
+ * @ahb_vote_valid: Flag for ahb vote data
+ * @axi_vote_valid: Flag for axi vote data
+ */
+struct cam_jpeg_cpas_vote {
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	uint32_t ahb_vote_valid;
+	uint32_t axi_vote_valid;
+};
+
+struct cam_jpeg_set_irq_cb {
+	int32_t (*jpeg_hw_mgr_cb)(
+		uint32_t irq_status,
+		int32_t result_size,
+		void *data);
+	void *data;
+	uint32_t b_set_cb;
+};
+
+#endif /* CAM_JPEG_HW_MGR_INTF_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/Makefile b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/Makefile
new file mode 100644
index 0000000..23b27bf
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_dma_dev.o jpeg_dma_core.o jpeg_dma_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c
new file mode 100644
index 0000000..05c1a95
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c
@@ -0,0 +1,165 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "jpeg_dma_core.h"
+#include "jpeg_dma_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_dma_hw_intf.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+int cam_jpeg_dma_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_dma_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_dma_device_core_info *core_info = NULL;
+	struct cam_jpeg_cpas_vote cpas_vote;
+	int rc;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &jpeg_dma_dev->soc_info;
+	core_info =
+		(struct cam_jpeg_dma_device_core_info *)jpeg_dma_dev->
+		core_info;
+
+	if (!soc_info || !core_info) {
+		CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
+	cpas_vote.axi_vote.compressed_bw = JPEG_TURBO_VOTE;
+	cpas_vote.axi_vote.uncompressed_bw = JPEG_TURBO_VOTE;
+
+	rc = cam_cpas_start(core_info->cpas_handle,
+		&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "cpass start failed: %d", rc);
+
+	rc = cam_jpeg_dma_enable_soc_resources(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "soc enable is failed %d", rc);
+		cam_cpas_stop(core_info->cpas_handle);
+	}
+
+	return rc;
+}
+
+int cam_jpeg_dma_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_dma_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_dma_device_core_info *core_info = NULL;
+	int rc;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &jpeg_dma_dev->soc_info;
+	core_info = (struct cam_jpeg_dma_device_core_info *)
+		jpeg_dma_dev->core_info;
+	if (!soc_info || !core_info) {
+		CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	rc = cam_jpeg_dma_disable_soc_resources(soc_info);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "soc enable failed %d", rc);
+
+	rc = cam_cpas_stop(core_info->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "cpas stop failed: %d", rc);
+
+	return 0;
+}
+
+int cam_jpeg_dma_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_dma_dev = device_priv;
+	struct cam_jpeg_dma_device_core_info *core_info = NULL;
+	int rc;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_JPEG, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	if (cmd_type >= CAM_JPEG_DMA_CMD_MAX) {
+		CAM_ERR(CAM_JPEG, "Invalid command : %x", cmd_type);
+		return -EINVAL;
+	}
+
+	core_info =
+		(struct cam_jpeg_dma_device_core_info *)jpeg_dma_dev->
+		core_info;
+
+	switch (cmd_type) {
+	case CAM_JPEG_DMA_CMD_SET_IRQ_CB:
+	{
+		struct cam_jpeg_set_irq_cb *irq_cb = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_JPEG, "cmd args NULL");
+			return -EINVAL;
+		}
+		if (irq_cb->b_set_cb) {
+			core_info->irq_cb.jpeg_hw_mgr_cb =
+				irq_cb->jpeg_hw_mgr_cb;
+			core_info->irq_cb.data = irq_cb->data;
+		} else {
+			core_info->irq_cb.jpeg_hw_mgr_cb = NULL;
+			core_info->irq_cb.data = NULL;
+		}
+		rc = 0;
+		break;
+	}
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+irqreturn_t cam_jpeg_dma_irq(int irq_num, void *data)
+{
+	return IRQ_HANDLED;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h
new file mode 100644
index 0000000..bb4e34a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_DMA_CORE_H
+#define CAM_JPEG_DMA_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+
+struct cam_jpeg_dma_device_hw_info {
+	uint32_t reserved;
+};
+
+struct cam_jpeg_dma_set_irq_cb {
+	int32_t (*jpeg_hw_mgr_cb)(uint32_t irq_status,
+		int32_t result_size, void *data);
+	void *data;
+};
+
+enum cam_jpeg_dma_core_state {
+	CAM_JPEG_DMA_CORE_NOT_READY,
+	CAM_JPEG_DMA_CORE_READY,
+	CAM_JPEG_DMA_CORE_RESETTING,
+	CAM_JPEG_DMA_CORE_STATE_MAX,
+};
+
+struct cam_jpeg_dma_device_core_info {
+	enum cam_jpeg_dma_core_state core_state;
+	struct cam_jpeg_dma_device_hw_info *jpeg_dma_hw_info;
+	uint32_t cpas_handle;
+	struct cam_jpeg_dma_set_irq_cb irq_cb;
+};
+
+int cam_jpeg_dma_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_dma_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_dma_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+irqreturn_t cam_jpeg_dma_irq(int irq_num, void *data);
+
+#endif /* CAM_JPEG_DMA_CORE_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c
new file mode 100644
index 0000000..829bb51
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c
@@ -0,0 +1,233 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+
+#include "jpeg_dma_core.h"
+#include "jpeg_dma_soc.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+static struct cam_jpeg_dma_device_hw_info cam_jpeg_dma_hw_info = {
+	.reserved = 0,
+};
+EXPORT_SYMBOL(cam_jpeg_dma_hw_info);
+
+static int cam_jpeg_dma_register_cpas(struct cam_hw_soc_info *soc_info,
+	struct cam_jpeg_dma_device_core_info *core_info,
+	uint32_t hw_idx)
+{
+	struct cam_cpas_register_params cpas_register_params;
+	int rc;
+
+	cpas_register_params.dev = &soc_info->pdev->dev;
+	memcpy(cpas_register_params.identifier, "jpeg-dma",
+		sizeof("jpeg-dma"));
+	cpas_register_params.cam_cpas_client_cb = NULL;
+	cpas_register_params.cell_index = hw_idx;
+	cpas_register_params.userdata = NULL;
+
+	rc = cam_cpas_register_client(&cpas_register_params);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "cpas_register failed: %d", rc);
+		return rc;
+	}
+	core_info->cpas_handle = cpas_register_params.client_handle;
+
+	return rc;
+}
+
+static int cam_jpeg_dma_unregister_cpas(
+	struct cam_jpeg_dma_device_core_info *core_info)
+{
+	int rc;
+
+	rc = cam_cpas_unregister_client(core_info->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "cpas unregister failed: %d", rc);
+	core_info->cpas_handle = 0;
+
+	return rc;
+}
+
+static int cam_jpeg_dma_remove(struct platform_device *pdev)
+{
+	struct cam_hw_info *jpeg_dma_dev = NULL;
+	struct cam_hw_intf *jpeg_dma_dev_intf = NULL;
+	struct cam_jpeg_dma_device_core_info *core_info = NULL;
+	int rc;
+
+	jpeg_dma_dev_intf = platform_get_drvdata(pdev);
+	if (!jpeg_dma_dev_intf) {
+		CAM_ERR(CAM_JPEG, "error No data in pdev");
+		return -EINVAL;
+	}
+
+	jpeg_dma_dev = jpeg_dma_dev_intf->hw_priv;
+	if (!jpeg_dma_dev) {
+		CAM_ERR(CAM_JPEG, "error HW data is NULL");
+		rc = -ENODEV;
+		goto free_jpeg_hw_intf;
+	}
+
+	core_info = (struct cam_jpeg_dma_device_core_info *)
+		jpeg_dma_dev->core_info;
+	if (!core_info) {
+		CAM_ERR(CAM_JPEG, "error core data NULL");
+		goto deinit_soc;
+	}
+
+	rc = cam_jpeg_dma_unregister_cpas(core_info);
+	if (rc)
+		CAM_ERR(CAM_JPEG, " unreg failed to reg cpas %d", rc);
+
+	kfree(core_info);
+
+deinit_soc:
+	rc = cam_soc_util_release_platform_resource(&jpeg_dma_dev->soc_info);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "Failed to deinit soc rc=%d", rc);
+
+	mutex_destroy(&jpeg_dma_dev->hw_mutex);
+	kfree(jpeg_dma_dev);
+
+free_jpeg_hw_intf:
+	kfree(jpeg_dma_dev_intf);
+	return rc;
+}
+
+static int cam_jpeg_dma_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info *jpeg_dma_dev = NULL;
+	struct cam_hw_intf *jpeg_dma_dev_intf = NULL;
+	const struct of_device_id *match_dev = NULL;
+	struct cam_jpeg_dma_device_core_info *core_info = NULL;
+	struct cam_jpeg_dma_device_hw_info *hw_info = NULL;
+	int rc;
+
+	jpeg_dma_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!jpeg_dma_dev_intf)
+		return -ENOMEM;
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &jpeg_dma_dev_intf->hw_idx);
+
+	jpeg_dma_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!jpeg_dma_dev) {
+		rc = -ENOMEM;
+		goto error_alloc_dev;
+	}
+	jpeg_dma_dev->soc_info.pdev = pdev;
+	jpeg_dma_dev_intf->hw_priv = jpeg_dma_dev;
+	jpeg_dma_dev_intf->hw_ops.init = cam_jpeg_dma_init_hw;
+	jpeg_dma_dev_intf->hw_ops.deinit = cam_jpeg_dma_deinit_hw;
+	jpeg_dma_dev_intf->hw_ops.process_cmd = cam_jpeg_dma_process_cmd;
+	jpeg_dma_dev_intf->hw_type = CAM_JPEG_DEV_DMA;
+
+	platform_set_drvdata(pdev, jpeg_dma_dev_intf);
+	jpeg_dma_dev->core_info =
+		kzalloc(sizeof(struct cam_jpeg_dma_device_core_info),
+			GFP_KERNEL);
+	if (!jpeg_dma_dev->core_info) {
+		rc = -ENOMEM;
+		goto error_alloc_core;
+	}
+	core_info = (struct cam_jpeg_dma_device_core_info *)jpeg_dma_dev->
+		core_info;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		CAM_ERR(CAM_JPEG, " No jpeg_dma hardware info");
+		rc = -EINVAL;
+		goto error_match_dev;
+	}
+	hw_info = (struct cam_jpeg_dma_device_hw_info *)match_dev->data;
+	core_info->jpeg_dma_hw_info = hw_info;
+	core_info->core_state = CAM_JPEG_DMA_CORE_NOT_READY;
+
+	rc = cam_jpeg_dma_init_soc_resources(&jpeg_dma_dev->soc_info,
+		cam_jpeg_dma_irq,
+		jpeg_dma_dev);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "%failed to init_soc %d", rc);
+		goto error_match_dev;
+	}
+
+	rc = cam_jpeg_dma_register_cpas(&jpeg_dma_dev->soc_info,
+		core_info, jpeg_dma_dev_intf->hw_idx);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, " failed to reg cpas %d", rc);
+		goto error_reg_cpas;
+	}
+	jpeg_dma_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&jpeg_dma_dev->hw_mutex);
+	spin_lock_init(&jpeg_dma_dev->hw_lock);
+	init_completion(&jpeg_dma_dev->hw_complete);
+
+	CAM_DBG(CAM_JPEG, " hwidx %d", jpeg_dma_dev_intf->hw_idx);
+
+	return rc;
+
+error_reg_cpas:
+	rc = cam_soc_util_release_platform_resource(&jpeg_dma_dev->soc_info);
+error_match_dev:
+	kfree(jpeg_dma_dev->core_info);
+error_alloc_core:
+	kfree(jpeg_dma_dev);
+error_alloc_dev:
+	kfree(jpeg_dma_dev_intf);
+	return rc;
+}
+
+static const struct of_device_id cam_jpeg_dma_dt_match[] = {
+	{
+		.compatible = "qcom,cam_jpeg_dma",
+		.data = &cam_jpeg_dma_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_jpeg_dma_dt_match);
+
+static struct platform_driver cam_jpeg_dma_driver = {
+	.probe = cam_jpeg_dma_probe,
+	.remove = cam_jpeg_dma_remove,
+	.driver = {
+		.name = "cam-jpeg-dma",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_jpeg_dma_dt_match,
+	},
+};
+
+static int __init cam_jpeg_dma_init_module(void)
+{
+	return platform_driver_register(&cam_jpeg_dma_driver);
+}
+
+static void __exit cam_jpeg_dma_exit_module(void)
+{
+	platform_driver_unregister(&cam_jpeg_dma_driver);
+}
+
+module_init(cam_jpeg_dma_init_module);
+module_exit(cam_jpeg_dma_exit_module);
+MODULE_DESCRIPTION("CAM JPEG_DMA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c
new file mode 100644
index 0000000..efc161b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c
@@ -0,0 +1,63 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "jpeg_dma_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+int cam_jpeg_dma_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t jpeg_dma_irq_handler, void *irq_data)
+{
+	int rc;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc)
+		return rc;
+
+	rc = cam_soc_util_request_platform_resource(soc_info,
+		jpeg_dma_irq_handler,
+		irq_data);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "init soc failed %d", rc);
+
+	return rc;
+}
+
+int cam_jpeg_dma_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_SVS_VOTE, true);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "enable platform failed %d", rc);
+
+	return rc;
+}
+
+int cam_jpeg_dma_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "disable platform failed %d", rc);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.h
new file mode 100644
index 0000000..bc9bed8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_JPEG_DMA_SOC_H_
+#define _CAM_JPEG_DMA_SOC_H_
+
+#include "cam_soc_util.h"
+
+int cam_jpeg_dma_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t jpeg_dma_irq_handler, void *irq_data);
+
+int cam_jpeg_dma_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_jpeg_dma_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_JPEG_DMA_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/Makefile b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/Makefile
new file mode 100644
index 0000000..b046a7f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/Makefile
@@ -0,0 +1,11 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_enc_dev.o jpeg_enc_core.o jpeg_enc_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c
new file mode 100644
index 0000000..25405cf
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c
@@ -0,0 +1,348 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "jpeg_enc_core.h"
+#include "jpeg_enc_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_enc_hw_intf.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK 0x00000001
+#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_SHIFT 0x00000000
+
+#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK 0x10000000
+#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_SHIFT 0x0000000a
+
+#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_MASK 0x00000800
+#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_SHIFT 0x0000000b
+
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF      (0x1<<19)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR     (0x1<<20)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR   (0x1<<21)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF (0x1<<22)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW    (0x1<<23)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM       (0x1<<24)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ   (0x1<<25)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM       (0x1<<26)
+#define CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK        (0x1<<29)
+
+#define CAM_JPEG_HW_MASK_COMP_FRAMEDONE \
+		CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK
+#define CAM_JPEG_HW_MASK_COMP_RESET_ACK \
+		CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK
+#define CAM_JPEG_HW_MASK_COMP_ERR \
+		(CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM | \
+		CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK)
+
+#define CAM_JPEG_HW_IRQ_IS_FRAME_DONE(jpeg_irq_status) \
+	(jpeg_irq_status & CAM_JPEG_HW_MASK_COMP_FRAMEDONE)
+#define CAM_JPEG_HW_IRQ_IS_RESET_ACK(jpeg_irq_status) \
+	(jpeg_irq_status & CAM_JPEG_HW_MASK_COMP_RESET_ACK)
+#define CAM_JPEG_HW_IRQ_IS_ERR(jpeg_irq_status) \
+	(jpeg_irq_status & CAM_JPEG_HW_MASK_COMP_ERR)
+
+#define CAM_JPEG_ENC_RESET_TIMEOUT msecs_to_jiffies(500)
+
+int cam_jpeg_enc_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_enc_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	struct cam_jpeg_cpas_vote cpas_vote;
+	int rc;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &jpeg_enc_dev->soc_info;
+	core_info =
+		(struct cam_jpeg_enc_device_core_info *)jpeg_enc_dev->
+		core_info;
+
+	if (!soc_info || !core_info) {
+		CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
+	cpas_vote.axi_vote.compressed_bw = JPEG_TURBO_VOTE;
+	cpas_vote.axi_vote.uncompressed_bw = JPEG_TURBO_VOTE;
+
+	rc = cam_cpas_start(core_info->cpas_handle,
+		&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "cpass start failed: %d", rc);
+
+	rc = cam_jpeg_enc_enable_soc_resources(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "soc enable is failed %d", rc);
+		cam_cpas_stop(core_info->cpas_handle);
+	}
+
+	return rc;
+}
+
+int cam_jpeg_enc_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_enc_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	int rc;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &jpeg_enc_dev->soc_info;
+	core_info = (struct cam_jpeg_enc_device_core_info *)
+		jpeg_enc_dev->core_info;
+	if (!soc_info || !core_info) {
+		CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	rc = cam_jpeg_enc_disable_soc_resources(soc_info);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "soc enable failed %d", rc);
+
+	rc = cam_cpas_stop(core_info->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "cpas stop failed: %d", rc);
+
+	return 0;
+}
+
+irqreturn_t cam_jpeg_enc_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *jpeg_enc_dev = data;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	uint32_t irq_status = 0;
+	uint32_t encoded_size = 0;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+	void __iomem *mem_base;
+
+	if (!jpeg_enc_dev) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return IRQ_HANDLED;
+	}
+	soc_info = &jpeg_enc_dev->soc_info;
+	core_info =
+		(struct cam_jpeg_enc_device_core_info *)jpeg_enc_dev->
+		core_info;
+	hw_info = core_info->jpeg_enc_hw_info;
+	mem_base = soc_info->reg_map[0].mem_base;
+
+	irq_status = cam_io_r_mb(mem_base +
+		core_info->jpeg_enc_hw_info->int_status);
+
+	cam_io_w_mb(irq_status,
+		soc_info->reg_map[0].mem_base +
+		core_info->jpeg_enc_hw_info->int_clr);
+
+	CAM_DBG(CAM_JPEG, "irq_num %d  irq_status = %x , core_state %d",
+		irq_num, irq_status, core_info->core_state);
+	if (CAM_JPEG_HW_IRQ_IS_FRAME_DONE(irq_status)) {
+		if (core_info->core_state == CAM_JPEG_ENC_CORE_READY) {
+			encoded_size = cam_io_r_mb(mem_base + 0x180);
+			if (core_info->irq_cb.jpeg_hw_mgr_cb) {
+				core_info->irq_cb.jpeg_hw_mgr_cb(irq_status,
+					encoded_size,
+					core_info->irq_cb.data);
+			} else {
+				CAM_ERR(CAM_JPEG, "unexpected done");
+			}
+		}
+
+		core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+	}
+	if (CAM_JPEG_HW_IRQ_IS_RESET_ACK(irq_status)) {
+		if (core_info->core_state == CAM_JPEG_ENC_CORE_RESETTING) {
+			core_info->core_state = CAM_JPEG_ENC_CORE_READY;
+			complete(&jpeg_enc_dev->hw_complete);
+		} else {
+			CAM_ERR(CAM_JPEG, "unexpected reset irq");
+		}
+	}
+	/* Unexpected/unintended HW interrupt */
+	if (CAM_JPEG_HW_IRQ_IS_ERR(irq_status)) {
+		core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+		CAM_ERR_RATE_LIMIT(CAM_JPEG,
+			"error irq_num %d  irq_status = %x , core_state %d",
+			irq_num, irq_status, core_info->core_state);
+
+		if (core_info->irq_cb.jpeg_hw_mgr_cb) {
+			core_info->irq_cb.jpeg_hw_mgr_cb(irq_status,
+				-1,
+				core_info->irq_cb.data);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+int cam_jpeg_enc_reset_hw(void *data,
+	void *start_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_enc_dev = data;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+	void __iomem *mem_base;
+	unsigned long rem_jiffies;
+
+	if (!jpeg_enc_dev) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return -EINVAL;
+	}
+	/* maskdisable.clrirq.maskenable.resetcmd */
+	soc_info = &jpeg_enc_dev->soc_info;
+	core_info =
+		(struct cam_jpeg_enc_device_core_info *)jpeg_enc_dev->
+		core_info;
+	hw_info = core_info->jpeg_enc_hw_info;
+	mem_base = soc_info->reg_map[0].mem_base;
+
+	if (core_info->core_state == CAM_JPEG_ENC_CORE_RESETTING) {
+		CAM_ERR(CAM_JPEG, "alrady resetting");
+		return 0;
+	}
+
+	reinit_completion(&jpeg_enc_dev->hw_complete);
+
+	core_info->core_state = CAM_JPEG_ENC_CORE_RESETTING;
+
+	cam_io_w_mb(0x00000000, mem_base + hw_info->int_mask);
+	cam_io_w_mb(0xFFFFFFFF, mem_base + hw_info->int_clr);
+	cam_io_w_mb(0xFFFFFFFF, mem_base + hw_info->int_mask);
+	cam_io_w_mb(0x00032093, mem_base + hw_info->reset_cmd);
+
+	rem_jiffies = wait_for_completion_timeout(&jpeg_enc_dev->hw_complete,
+		CAM_JPEG_ENC_RESET_TIMEOUT);
+	if (!rem_jiffies) {
+		CAM_ERR(CAM_JPEG, "error Reset Timeout");
+		core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+	}
+
+	return 0;
+}
+
+int cam_jpeg_enc_start_hw(void *data,
+	void *start_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_enc_dev = data;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+	void __iomem *mem_base;
+
+	if (!jpeg_enc_dev) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return -EINVAL;
+	}
+
+	soc_info = &jpeg_enc_dev->soc_info;
+	core_info = (struct cam_jpeg_enc_device_core_info *)
+		jpeg_enc_dev->core_info;
+	hw_info = core_info->jpeg_enc_hw_info;
+	mem_base = soc_info->reg_map[0].mem_base;
+
+	if (core_info->core_state != CAM_JPEG_ENC_CORE_READY) {
+		CAM_ERR(CAM_JPEG, "Error not ready");
+		return -EINVAL;
+	}
+
+	cam_io_w_mb(0x00000001, mem_base + 0x00000010);
+
+	return 0;
+}
+
+int cam_jpeg_enc_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_enc_dev = device_priv;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	int rc;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_JPEG, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	if (cmd_type >= CAM_JPEG_ENC_CMD_MAX) {
+		CAM_ERR(CAM_JPEG, "Invalid command : %x", cmd_type);
+		return -EINVAL;
+	}
+
+	core_info =
+		(struct cam_jpeg_enc_device_core_info *)jpeg_enc_dev->
+		core_info;
+
+	switch (cmd_type) {
+	case CAM_JPEG_ENC_CMD_SET_IRQ_CB:
+	{
+		struct cam_jpeg_set_irq_cb *irq_cb = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_JPEG, "cmd args NULL");
+			return -EINVAL;
+		}
+		if (irq_cb->b_set_cb) {
+			core_info->irq_cb.jpeg_hw_mgr_cb =
+				irq_cb->jpeg_hw_mgr_cb;
+			core_info->irq_cb.data = irq_cb->data;
+		} else {
+			core_info->irq_cb.jpeg_hw_mgr_cb = NULL;
+			core_info->irq_cb.data = NULL;
+		}
+		rc = 0;
+		break;
+	}
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	if (rc)
+		CAM_ERR(CAM_JPEG, "error cmdtype %d rc = %d", cmd_type, rc);
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h
new file mode 100644
index 0000000..6ae4cdc
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h
@@ -0,0 +1,62 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CAM_JPEG_ENC_CORE_H
+#define CAM_JPEG_ENC_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+
+struct cam_jpeg_enc_device_hw_info {
+	uint32_t hw_version;
+	uint32_t int_status;
+	uint32_t int_clr;
+	uint32_t int_mask;
+	uint32_t reset_cmd;
+};
+
+struct cam_jpeg_enc_set_irq_cb {
+	int32_t (*jpeg_hw_mgr_cb)(uint32_t irq_status,
+		int32_t result_size, void *data);
+	void *data;
+};
+
+enum cam_jpeg_enc_core_state {
+	CAM_JPEG_ENC_CORE_NOT_READY,
+	CAM_JPEG_ENC_CORE_READY,
+	CAM_JPEG_ENC_CORE_RESETTING,
+	CAM_JPEG_ENC_CORE_STATE_MAX,
+};
+
+struct cam_jpeg_enc_device_core_info {
+	enum cam_jpeg_enc_core_state core_state;
+	struct cam_jpeg_enc_device_hw_info *jpeg_enc_hw_info;
+	uint32_t cpas_handle;
+	struct cam_jpeg_enc_set_irq_cb irq_cb;
+};
+
+int cam_jpeg_enc_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_start_hw(void *device_priv,
+	void *start_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_reset_hw(void *device_priv,
+	void *reset_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+irqreturn_t cam_jpeg_enc_irq(int irq_num, void *data);
+
+#endif /* CAM_JPEG_ENC_CORE_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c
new file mode 100644
index 0000000..5dd1e1f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c
@@ -0,0 +1,238 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+
+#include "jpeg_enc_core.h"
+#include "jpeg_enc_soc.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+static struct cam_jpeg_enc_device_hw_info cam_jpeg_enc_hw_info = {
+	.int_clr = 0x1c,
+	.int_status = 0x20,
+	.int_mask = 0x18,
+	.reset_cmd = 0x8,
+	.hw_version = 0x0,
+};
+EXPORT_SYMBOL(cam_jpeg_enc_hw_info);
+
+static int cam_jpeg_enc_register_cpas(struct cam_hw_soc_info *soc_info,
+	struct cam_jpeg_enc_device_core_info *core_info,
+	uint32_t hw_idx)
+{
+	struct cam_cpas_register_params cpas_register_params;
+	int rc;
+
+	cpas_register_params.dev = &soc_info->pdev->dev;
+	memcpy(cpas_register_params.identifier, "jpeg-enc",
+		sizeof("jpeg-enc"));
+	cpas_register_params.cam_cpas_client_cb = NULL;
+	cpas_register_params.cell_index = hw_idx;
+	cpas_register_params.userdata = NULL;
+
+	rc = cam_cpas_register_client(&cpas_register_params);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "cpas_register failed: %d", rc);
+		return rc;
+	}
+	core_info->cpas_handle = cpas_register_params.client_handle;
+
+	return rc;
+}
+
+static int cam_jpeg_enc_unregister_cpas(
+	struct cam_jpeg_enc_device_core_info *core_info)
+{
+	int rc;
+
+	rc = cam_cpas_unregister_client(core_info->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "cpas unregister failed: %d", rc);
+	core_info->cpas_handle = 0;
+
+	return rc;
+}
+
+static int cam_jpeg_enc_remove(struct platform_device *pdev)
+{
+	struct cam_hw_info *jpeg_enc_dev = NULL;
+	struct cam_hw_intf *jpeg_enc_dev_intf = NULL;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	int rc;
+
+	jpeg_enc_dev_intf = platform_get_drvdata(pdev);
+	if (!jpeg_enc_dev_intf) {
+		CAM_ERR(CAM_JPEG, "error No data in pdev");
+		return -EINVAL;
+	}
+
+	jpeg_enc_dev = jpeg_enc_dev_intf->hw_priv;
+	if (!jpeg_enc_dev) {
+		CAM_ERR(CAM_JPEG, "error HW data is NULL");
+		rc = -ENODEV;
+		goto free_jpeg_hw_intf;
+	}
+
+	core_info = (struct cam_jpeg_enc_device_core_info *)
+		jpeg_enc_dev->core_info;
+	if (!core_info) {
+		CAM_ERR(CAM_JPEG, "error core data NULL");
+		goto deinit_soc;
+	}
+
+	rc = cam_jpeg_enc_unregister_cpas(core_info);
+	if (rc)
+		CAM_ERR(CAM_JPEG, " unreg failed to reg cpas %d", rc);
+
+	kfree(core_info);
+
+deinit_soc:
+	rc = cam_soc_util_release_platform_resource(&jpeg_enc_dev->soc_info);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "Failed to deinit soc rc=%d", rc);
+
+	mutex_destroy(&jpeg_enc_dev->hw_mutex);
+	kfree(jpeg_enc_dev);
+
+free_jpeg_hw_intf:
+	kfree(jpeg_enc_dev_intf);
+	return rc;
+}
+
+static int cam_jpeg_enc_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info *jpeg_enc_dev = NULL;
+	struct cam_hw_intf *jpeg_enc_dev_intf = NULL;
+	const struct of_device_id *match_dev = NULL;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+	int rc;
+
+	jpeg_enc_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!jpeg_enc_dev_intf)
+		return -ENOMEM;
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &jpeg_enc_dev_intf->hw_idx);
+
+	jpeg_enc_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!jpeg_enc_dev) {
+		rc = -ENOMEM;
+		goto error_alloc_dev;
+	}
+	jpeg_enc_dev->soc_info.pdev = pdev;
+	jpeg_enc_dev_intf->hw_priv = jpeg_enc_dev;
+	jpeg_enc_dev_intf->hw_ops.init = cam_jpeg_enc_init_hw;
+	jpeg_enc_dev_intf->hw_ops.deinit = cam_jpeg_enc_deinit_hw;
+	jpeg_enc_dev_intf->hw_ops.start = cam_jpeg_enc_start_hw;
+	jpeg_enc_dev_intf->hw_ops.reset = cam_jpeg_enc_reset_hw;
+	jpeg_enc_dev_intf->hw_ops.process_cmd = cam_jpeg_enc_process_cmd;
+	jpeg_enc_dev_intf->hw_type = CAM_JPEG_DEV_ENC;
+
+	platform_set_drvdata(pdev, jpeg_enc_dev_intf);
+	jpeg_enc_dev->core_info =
+		kzalloc(sizeof(struct cam_jpeg_enc_device_core_info),
+			GFP_KERNEL);
+	if (!jpeg_enc_dev->core_info) {
+		rc = -ENOMEM;
+		goto error_alloc_core;
+	}
+	core_info = (struct cam_jpeg_enc_device_core_info *)jpeg_enc_dev->
+		core_info;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		CAM_ERR(CAM_JPEG, " No jpeg_enc hardware info");
+		rc = -EINVAL;
+		goto error_match_dev;
+	}
+	hw_info = (struct cam_jpeg_enc_device_hw_info *)match_dev->data;
+	core_info->jpeg_enc_hw_info = hw_info;
+	core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+
+	rc = cam_jpeg_enc_init_soc_resources(&jpeg_enc_dev->soc_info,
+		cam_jpeg_enc_irq,
+		jpeg_enc_dev);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, " failed to init_soc %d", rc);
+		goto error_match_dev;
+	}
+
+	rc = cam_jpeg_enc_register_cpas(&jpeg_enc_dev->soc_info,
+		core_info, jpeg_enc_dev_intf->hw_idx);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, " failed to reg cpas %d", rc);
+		goto error_reg_cpas;
+	}
+	jpeg_enc_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&jpeg_enc_dev->hw_mutex);
+	spin_lock_init(&jpeg_enc_dev->hw_lock);
+	init_completion(&jpeg_enc_dev->hw_complete);
+
+	return rc;
+
+error_reg_cpas:
+	cam_soc_util_release_platform_resource(&jpeg_enc_dev->soc_info);
+error_match_dev:
+	kfree(jpeg_enc_dev->core_info);
+error_alloc_core:
+	kfree(jpeg_enc_dev);
+error_alloc_dev:
+	kfree(jpeg_enc_dev_intf);
+
+	return rc;
+}
+
+static const struct of_device_id cam_jpeg_enc_dt_match[] = {
+	{
+		.compatible = "qcom,cam_jpeg_enc",
+		.data = &cam_jpeg_enc_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_jpeg_enc_dt_match);
+
+static struct platform_driver cam_jpeg_enc_driver = {
+	.probe = cam_jpeg_enc_probe,
+	.remove = cam_jpeg_enc_remove,
+	.driver = {
+		.name = "cam-jpeg-enc",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_jpeg_enc_dt_match,
+	},
+};
+
+static int __init cam_jpeg_enc_init_module(void)
+{
+	return platform_driver_register(&cam_jpeg_enc_driver);
+}
+
+static void __exit cam_jpeg_enc_exit_module(void)
+{
+	platform_driver_unregister(&cam_jpeg_enc_driver);
+}
+
+module_init(cam_jpeg_enc_init_module);
+module_exit(cam_jpeg_enc_exit_module);
+MODULE_DESCRIPTION("CAM JPEG_ENC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c
new file mode 100644
index 0000000..3f450cd
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c
@@ -0,0 +1,63 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "jpeg_enc_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+int cam_jpeg_enc_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t jpeg_enc_irq_handler, void *irq_data)
+{
+	int rc;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc)
+		return rc;
+
+	rc = cam_soc_util_request_platform_resource(soc_info,
+		jpeg_enc_irq_handler,
+		irq_data);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "init soc failed %d", rc);
+
+	return rc;
+}
+
+int cam_jpeg_enc_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_SVS_VOTE, true);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "enable platform failed %d", rc);
+
+	return rc;
+}
+
+int cam_jpeg_enc_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "disable platform failed %d", rc);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.h
new file mode 100644
index 0000000..a0485a2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_JPEG_ENC_SOC_H_
+#define _CAM_JPEG_ENC_SOC_H_
+
+#include "cam_soc_util.h"
+
+int cam_jpeg_enc_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t jpeg_enc_irq_handler, void *irq_data);
+
+int cam_jpeg_enc_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_jpeg_enc_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_JPEG_ENC_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
index 2f9db97..c150244 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -10,14 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "CAM-MEM-MGR %s:%d " fmt, __func__, __LINE__
-
-#ifdef CONFIG_MEM_MGR_DBG
-#define CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/mutex.h>
@@ -27,6 +19,7 @@
 #include "cam_req_mgr_util.h"
 #include "cam_mem_mgr.h"
 #include "cam_smmu_api.h"
+#include "cam_debug_util.h"
 
 static struct cam_mem_table tbl;
 
@@ -36,12 +29,12 @@
 {
 	*vaddr = (uintptr_t)ion_map_kernel(tbl.client, hdl);
 	if (IS_ERR_OR_NULL((void *)*vaddr)) {
-		pr_err("kernel map fail");
+		CAM_ERR(CAM_CRM, "kernel map fail");
 		return -ENOSPC;
 	}
 
 	if (ion_handle_get_size(tbl.client, hdl, len)) {
-		pr_err("kernel get len failed");
+		CAM_ERR(CAM_CRM, "kernel get len failed");
 		ion_unmap_kernel(tbl.client, hdl);
 		return -ENOSPC;
 	}
@@ -69,7 +62,7 @@
 
 	tbl.client = msm_ion_client_create("camera_global_pool");
 	if (IS_ERR_OR_NULL(tbl.client)) {
-		pr_err("fail to create client\n");
+		CAM_ERR(CAM_CRM, "fail to create client");
 		rc = -EINVAL;
 	}
 
@@ -92,7 +85,7 @@
 
 	rc = cam_mem_util_client_create();
 	if (rc < 0) {
-		pr_err("fail to create ion client\n");
+		CAM_ERR(CAM_CRM, "fail to create ion client");
 		goto client_fail;
 	}
 
@@ -127,10 +120,12 @@
 	mutex_lock(&tbl.m_lock);
 	for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
 		if (!tbl.bufq[i].active) {
-			CDBG("Buffer inactive at idx=%d, continuing\n", i);
+			CAM_DBG(CAM_CRM,
+				"Buffer inactive at idx=%d, continuing", i);
 			continue;
 		} else {
-			pr_err("Active buffer at idx=%d, possible leak\n", i);
+			CAM_ERR(CAM_CRM,
+				"Active buffer at idx=%d, possible leak", i);
 		}
 
 		mutex_lock(&tbl.bufq[i].q_lock);
@@ -221,7 +216,7 @@
 		iova_ptr,
 		len_ptr);
 	if (rc < 0)
-		pr_err("fail to get buf hdl :%d", buf_handle);
+		CAM_ERR(CAM_CRM, "fail to get buf hdl :%d", buf_handle);
 
 handle_mismatch:
 	mutex_unlock(&tbl.bufq[idx].q_lock);
@@ -255,7 +250,7 @@
 
 	ion_hdl = tbl.bufq[idx].i_hdl;
 	if (!ion_hdl) {
-		pr_err("Invalid ION handle\n");
+		CAM_ERR(CAM_CRM, "Invalid ION handle");
 		rc = -EINVAL;
 		goto exit_func;
 	}
@@ -310,7 +305,7 @@
 	rc = ion_handle_get_flags(tbl.client, tbl.bufq[idx].i_hdl,
 		&ion_flag);
 	if (rc) {
-		pr_err("cache get flags failed %d\n", rc);
+		CAM_ERR(CAM_CRM, "cache get flags failed %d", rc);
 		goto fail;
 	}
 
@@ -326,7 +321,8 @@
 			ion_cache_ops = ION_IOC_CLEAN_INV_CACHES;
 			break;
 		default:
-			pr_err("invalid cache ops :%d", cmd->mem_cache_ops);
+			CAM_ERR(CAM_CRM,
+				"invalid cache ops :%d", cmd->mem_cache_ops);
 			rc = -EINVAL;
 			goto fail;
 		}
@@ -337,7 +333,7 @@
 				tbl.bufq[idx].len,
 				ion_cache_ops);
 		if (rc)
-			pr_err("cache operation failed %d\n", rc);
+			CAM_ERR(CAM_CRM, "cache operation failed %d", rc);
 	}
 fail:
 	mutex_unlock(&tbl.bufq[idx].q_lock);
@@ -360,7 +356,7 @@
 
 	*fd = ion_share_dma_buf_fd(tbl.client, *hdl);
 	if (*fd < 0) {
-		pr_err("dma buf get fd fail");
+		CAM_ERR(CAM_CRM, "dma buf get fd fail");
 		rc = -EINVAL;
 		goto get_fd_fail;
 	}
@@ -404,19 +400,19 @@
 static int cam_mem_util_check_flags(struct cam_mem_mgr_alloc_cmd *cmd)
 {
 	if (!cmd->flags) {
-		pr_err("Invalid flags\n");
+		CAM_ERR(CAM_CRM, "Invalid flags");
 		return -EINVAL;
 	}
 
 	if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
-		pr_err("Num of mmu hdl exceeded maximum(%d)\n",
+		CAM_ERR(CAM_CRM, "Num of mmu hdl exceeded maximum(%d)",
 			CAM_MEM_MMU_MAX_HANDLE);
 		return -EINVAL;
 	}
 
 	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
 		cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
-		pr_err("Kernel mapping in secure mode not allowed");
+		CAM_ERR(CAM_CRM, "Kernel mapping in secure mode not allowed");
 		return -EINVAL;
 	}
 
@@ -426,24 +422,25 @@
 static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd *cmd)
 {
 	if (!cmd->flags) {
-		pr_err("Invalid flags\n");
+		CAM_ERR(CAM_CRM, "Invalid flags");
 		return -EINVAL;
 	}
 
 	if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
-		pr_err("Num of mmu hdl exceeded maximum(%d)\n",
+		CAM_ERR(CAM_CRM, "Num of mmu hdl exceeded maximum(%d)",
 			CAM_MEM_MMU_MAX_HANDLE);
 		return -EINVAL;
 	}
 
 	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
 		cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
-		pr_err("Kernel mapping in secure mode not allowed");
+		CAM_ERR(CAM_CRM, "Kernel mapping in secure mode not allowed");
 		return -EINVAL;
 	}
 
 	if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
-		pr_err("Shared memory buffers are not allowed to be mapped\n");
+		CAM_ERR(CAM_CRM,
+			"Shared memory buffers are not allowed to be mapped");
 		return -EINVAL;
 	}
 
@@ -463,7 +460,7 @@
 	int dir = cam_mem_util_get_dma_dir(flags);
 
 	if (dir < 0) {
-		pr_err("fail to map DMA direction\n");
+		CAM_ERR(CAM_CRM, "fail to map DMA direction");
 		return dir;
 	}
 
@@ -476,7 +473,8 @@
 				len);
 
 			if (rc < 0) {
-				pr_err("Failed to securely map to smmu");
+				CAM_ERR(CAM_CRM,
+					"Failed to securely map to smmu");
 				goto multi_map_fail;
 			}
 		}
@@ -490,7 +488,7 @@
 				region);
 
 			if (rc < 0) {
-				pr_err("Failed to map to smmu");
+				CAM_ERR(CAM_CRM, "Failed to map to smmu");
 				goto multi_map_fail;
 			}
 		}
@@ -520,14 +518,14 @@
 	size_t len;
 
 	if (!cmd) {
-		pr_err(" Invalid argument\n");
+		CAM_ERR(CAM_CRM, " Invalid argument");
 		return -EINVAL;
 	}
 	len = cmd->len;
 
 	rc = cam_mem_util_check_flags(cmd);
 	if (rc) {
-		pr_err("Invalid flags: flags = %X\n", cmd->flags);
+		CAM_ERR(CAM_CRM, "Invalid flags: flags = %X", cmd->flags);
 		return rc;
 	}
 
@@ -535,7 +533,7 @@
 		&ion_hdl,
 		&ion_fd);
 	if (rc) {
-		pr_err("Ion allocation failed\n");
+		CAM_ERR(CAM_CRM, "Ion allocation failed");
 		return rc;
 	}
 
@@ -591,7 +589,7 @@
 	cmd->out.fd = tbl.bufq[idx].fd;
 	cmd->out.vaddr = 0;
 
-	CDBG("buf handle: %x, fd: %d, len: %zu\n",
+	CAM_DBG(CAM_CRM, "buf handle: %x, fd: %d, len: %zu",
 		cmd->out.buf_handle, cmd->out.fd,
 		tbl.bufq[idx].len);
 
@@ -613,7 +611,7 @@
 	size_t len = 0;
 
 	if (!cmd || (cmd->fd < 0)) {
-		pr_err("Invalid argument\n");
+		CAM_ERR(CAM_CRM, "Invalid argument");
 		return -EINVAL;
 	}
 
@@ -622,13 +620,13 @@
 
 	rc = cam_mem_util_check_map_flags(cmd);
 	if (rc) {
-		pr_err("Invalid flags: flags = %X\n", cmd->flags);
+		CAM_ERR(CAM_CRM, "Invalid flags: flags = %X", cmd->flags);
 		return rc;
 	}
 
 	ion_hdl = ion_import_dma_buf_fd(tbl.client, cmd->fd);
 	if (IS_ERR_OR_NULL((void *)(ion_hdl))) {
-		pr_err("Failed to import ion fd\n");
+		CAM_ERR(CAM_CRM, "Failed to import ion fd");
 		return -EINVAL;
 	}
 
@@ -690,7 +688,7 @@
 	int rc = -EINVAL;
 
 	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
-		pr_err("Incorrect index\n");
+		CAM_ERR(CAM_CRM, "Incorrect index");
 		return rc;
 	}
 
@@ -725,11 +723,11 @@
 	enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
 
 	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
-		pr_err("Incorrect index\n");
+		CAM_ERR(CAM_CRM, "Incorrect index");
 		return -EINVAL;
 	}
 
-	CDBG("Flags = %X\n", tbl.bufq[idx].flags);
+	CAM_DBG(CAM_CRM, "Flags = %X", tbl.bufq[idx].flags);
 
 	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)
 		if (tbl.bufq[idx].i_hdl && tbl.bufq[idx].kmdvaddr)
@@ -755,7 +753,8 @@
 	memset(tbl.bufq[idx].hdls, 0,
 		sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
 
-	CDBG("Ion handle at idx = %d freeing = %pK, fd = %d, imported %d\n",
+	CAM_DBG(CAM_CRM,
+		"Ion handle at idx = %d freeing = %pK, fd = %d, imported %d",
 		idx, tbl.bufq[idx].i_hdl, tbl.bufq[idx].fd,
 		tbl.bufq[idx].is_imported);
 
@@ -780,27 +779,28 @@
 	int rc;
 
 	if (!cmd) {
-		pr_err("Invalid argument\n");
+		CAM_ERR(CAM_CRM, "Invalid argument");
 		return -EINVAL;
 	}
 
 	idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
 	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
-		pr_err("Incorrect index extracted from mem handle\n");
+		CAM_ERR(CAM_CRM, "Incorrect index extracted from mem handle");
 		return -EINVAL;
 	}
 
 	if (!tbl.bufq[idx].active) {
-		pr_err("Released buffer state should be active\n");
+		CAM_ERR(CAM_CRM, "Released buffer state should be active");
 		return -EINVAL;
 	}
 
 	if (tbl.bufq[idx].buf_handle != cmd->buf_handle) {
-		pr_err("Released buf handle not matching within table\n");
+		CAM_ERR(CAM_CRM,
+			"Released buf handle not matching within table");
 		return -EINVAL;
 	}
 
-	CDBG("Releasing hdl = %u\n", cmd->buf_handle);
+	CAM_DBG(CAM_CRM, "Releasing hdl = %u", cmd->buf_handle);
 	rc = cam_mem_util_unmap(idx);
 
 	return rc;
@@ -824,14 +824,14 @@
 	enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
 
 	if (!inp || !out) {
-		pr_err("Invalid params\n");
+		CAM_ERR(CAM_CRM, "Invalid params");
 		return -EINVAL;
 	}
 
 	if (!(inp->flags & CAM_MEM_FLAG_HW_READ_WRITE ||
 		inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS ||
 		inp->flags & CAM_MEM_FLAG_CACHE)) {
-		pr_err("Invalid flags for request mem\n");
+		CAM_ERR(CAM_CRM, "Invalid flags for request mem");
 		return -EINVAL;
 	}
 
@@ -850,20 +850,20 @@
 		&ion_fd);
 
 	if (rc) {
-		pr_err("ION alloc failed for shared buffer\n");
+		CAM_ERR(CAM_CRM, "ION alloc failed for shared buffer");
 		goto ion_fail;
 	} else {
-		CDBG("Got ION fd = %d, hdl = %pK\n", ion_fd, hdl);
+		CAM_DBG(CAM_CRM, "Got ION fd = %d, hdl = %pK", ion_fd, hdl);
 	}
 
 	rc = cam_mem_util_map_cpu_va(hdl, &kvaddr, &request_len);
 	if (rc) {
-		pr_err("Failed to get kernel vaddr\n");
+		CAM_ERR(CAM_CRM, "Failed to get kernel vaddr");
 		goto map_fail;
 	}
 
 	if (!inp->smmu_hdl) {
-		pr_err("Invalid SMMU handle\n");
+		CAM_ERR(CAM_CRM, "Invalid SMMU handle");
 		rc = -EINVAL;
 		goto smmu_fail;
 	}
@@ -884,7 +884,7 @@
 		region);
 
 	if (rc < 0) {
-		pr_err("SMMU mapping failed\n");
+		CAM_ERR(CAM_CRM, "SMMU mapping failed");
 		goto smmu_fail;
 	}
 
@@ -941,27 +941,28 @@
 	int rc;
 
 	if (!inp) {
-		pr_err("Invalid argument\n");
+		CAM_ERR(CAM_CRM, "Invalid argument");
 		return -EINVAL;
 	}
 
 	idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
 	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
-		pr_err("Incorrect index extracted from mem handle\n");
+		CAM_ERR(CAM_CRM, "Incorrect index extracted from mem handle");
 		return -EINVAL;
 	}
 
 	if (!tbl.bufq[idx].active) {
-		pr_err("Released buffer state should be active\n");
+		CAM_ERR(CAM_CRM, "Released buffer state should be active");
 		return -EINVAL;
 	}
 
 	if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
-		pr_err("Released buf handle not matching within table\n");
+		CAM_ERR(CAM_CRM,
+			"Released buf handle not matching within table");
 		return -EINVAL;
 	}
 
-	CDBG("Releasing hdl = %X\n", inp->mem_handle);
+	CAM_DBG(CAM_CRM, "Releasing hdl = %X", inp->mem_handle);
 	rc = cam_mem_util_unmap(idx);
 
 	return rc;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index f5dfcf2..3fd42f7 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -20,6 +20,7 @@
 #include "cam_req_mgr_workq.h"
 #include "cam_req_mgr_debug.h"
 #include "cam_trace.h"
+#include "cam_debug_util.h"
 
 static struct cam_req_mgr_core_device *g_crm_core_dev;
 
@@ -59,24 +60,25 @@
 	struct cam_req_mgr_req_tbl   *req_tbl = req->l_tbl;
 
 	if (!in_q || !req_tbl) {
-		CRM_WARN("NULL pointer %pK %pK", in_q, req_tbl);
+		CAM_WARN(CAM_CRM, "NULL pointer %pK %pK", in_q, req_tbl);
 		return -EINVAL;
 	}
-	CRM_DBG("in_q %pK %pK %d", in_q, req_tbl, req_tbl->num_slots);
+	CAM_DBG(CAM_CRM, "in_q %pK %pK %d", in_q, req_tbl, req_tbl->num_slots);
 	mutex_lock(&req->lock);
 	for (i = 0; i < in_q->num_slots; i++) {
-		CRM_DBG("IN_Q %d: idx %d, red_id %lld", i,
+		CAM_DBG(CAM_CRM, "IN_Q %d: idx %d, red_id %lld", i,
 			in_q->slot[i].idx, CRM_GET_REQ_ID(in_q, i));
 	}
 
 	while (req_tbl != NULL) {
 		for (i = 0; i < req_tbl->num_slots; i++) {
-			CRM_DBG("idx= %d, map= %x, state= %d",
+			CAM_DBG(CAM_CRM, "idx= %d, map= %x, state= %d",
 				req_tbl->slot[i].idx,
 				req_tbl->slot[i].req_ready_map,
 				req_tbl->slot[i].state);
 		}
-		CRM_DBG("TBL:id= %d, pd=%d cnt=%d mask=%x skip=%d num_slt= %d",
+		CAM_DBG(CAM_CRM,
+			"TBL:id= %d, pd=%d cnt=%d mask=%x skip=%d num_slt= %d",
 			req_tbl->id, req_tbl->pd, req_tbl->dev_count,
 			req_tbl->dev_mask, req_tbl->skip_traverse,
 			req_tbl->num_slots);
@@ -166,7 +168,7 @@
 	struct cam_req_mgr_apply    *apply_data;
 
 	if (!traverse_data->tbl || !traverse_data->apply_data) {
-		CRM_ERR("NULL pointer %pK %pK",
+		CAM_ERR(CAM_CRM, "NULL pointer %pK %pK",
 			traverse_data->tbl, traverse_data->apply_data);
 		traverse_data->result = 0;
 		return -EINVAL;
@@ -174,7 +176,7 @@
 
 	tbl = traverse_data->tbl;
 	apply_data = traverse_data->apply_data;
-	CRM_DBG("Enter pd %d idx %d state %d skip %d status %d",
+	CAM_DBG(CAM_CRM, "Enter pd %d idx %d state %d skip %d status %d",
 		tbl->pd, curr_idx, tbl->slot[curr_idx].state,
 		tbl->skip_traverse, traverse_data->in_q->slot[curr_idx].status);
 
@@ -229,7 +231,7 @@
 	in_q->slot[idx].req_id = -1;
 	in_q->slot[idx].skip_idx = 1;
 	in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
-	CRM_DBG("SET IDX SKIP on slot= %d", idx);
+	CAM_DBG(CAM_CRM, "SET IDX SKIP on slot= %d", idx);
 }
 
 /**
@@ -247,7 +249,7 @@
 		return;
 	do {
 		tbl->id = req->num_tbl++;
-		CRM_DBG("%d: pd %d skip_traverse %d delta %d",
+		CAM_DBG(CAM_CRM, "%d: pd %d skip_traverse %d delta %d",
 			tbl->id, tbl->pd, tbl->skip_traverse,
 			tbl->pd_delta);
 		tbl = tbl->next;
@@ -276,7 +278,7 @@
 	max_pd = tbl->pd;
 	do {
 		tbl->skip_traverse = max_pd - tbl->pd;
-		CRM_DBG("%d: pd %d skip_traverse %d delta %d",
+		CAM_DBG(CAM_CRM, "%d: pd %d skip_traverse %d delta %d",
 			tbl->id, tbl->pd, tbl->skip_traverse,
 			tbl->pd_delta);
 		tbl = tbl->next;
@@ -299,7 +301,7 @@
 	struct cam_req_mgr_req_queue *in_q = link->req.in_q;
 
 	slot = &in_q->slot[idx];
-	CRM_DBG("RESET: idx: %d: slot->status %d", idx, slot->status);
+	CAM_DBG(CAM_CRM, "RESET: idx: %d: slot->status %d", idx, slot->status);
 
 	/* Check if CSL has already pushed new request*/
 	if (slot->status == CRM_SLOT_STATUS_REQ_ADDED)
@@ -313,7 +315,7 @@
 
 	/* Reset all pd table slot */
 	while (tbl != NULL) {
-		CRM_DBG("pd: %d: idx %d state %d",
+		CAM_DBG(CAM_CRM, "pd: %d: idx %d state %d",
 			tbl->pd, idx, tbl->slot[idx].state);
 		tbl->slot[idx].req_ready_map = 0;
 		tbl->slot[idx].state = CRM_REQ_STATE_EMPTY;
@@ -339,13 +341,14 @@
 	__cam_req_mgr_inc_idx(&idx, 1, in_q->num_slots);
 	slot = &in_q->slot[idx];
 
-	CRM_DBG("idx: %d: slot->status %d", idx, slot->status);
+	CAM_DBG(CAM_CRM, "idx: %d: slot->status %d", idx, slot->status);
 
 	/* Check if there is new req from CSL, if not complete req */
 	if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
 		__cam_req_mgr_in_q_skip_idx(in_q, idx);
 		if (in_q->wr_idx != idx)
-			CRM_WARN("CHECK here wr %d, rd %d", in_q->wr_idx, idx);
+			CAM_WARN(CAM_CRM,
+				"CHECK here wr %d, rd %d", in_q->wr_idx, idx);
 		__cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
 	}
 }
@@ -377,13 +380,13 @@
 		if (dev) {
 			pd = dev->dev_info.p_delay;
 			if (pd >= CAM_PIPELINE_DELAY_MAX) {
-				CRM_WARN("pd %d greater than max",
+				CAM_WARN(CAM_CRM, "pd %d greater than max",
 					pd);
 				continue;
 			}
 			if (link->req.apply_data[pd].skip_idx ||
 				link->req.apply_data[pd].req_id < 0) {
-				CRM_DBG("skip %d req_id %lld",
+				CAM_DBG(CAM_CRM, "skip %d req_id %lld",
 					link->req.apply_data[pd].skip_idx,
 					link->req.apply_data[pd].req_id);
 				continue;
@@ -394,7 +397,10 @@
 			idx = link->req.apply_data[pd].idx;
 			apply_req.report_if_bubble =
 				in_q->slot[idx].recover;
-			CRM_DBG("SEND: pd %d req_id %lld",
+
+			trace_cam_req_mgr_apply_request(link, &apply_req, dev);
+
+			CAM_DBG(CAM_CRM, "SEND: pd %d req_id %lld",
 				pd, apply_req.request_id);
 			if (dev->ops && dev->ops->apply_req) {
 				rc = dev->ops->apply_req(&apply_req);
@@ -404,7 +410,7 @@
 		}
 	}
 	if (rc < 0) {
-		CRM_ERR("APPLY FAILED pd %d req_id %lld",
+		CAM_ERR(CAM_CRM, "APPLY FAILED pd %d req_id %lld",
 			dev->dev_info.p_delay, apply_req.request_id);
 		/* Apply req failed notify already applied devs */
 		for (; i >= 0; i--) {
@@ -457,11 +463,12 @@
 	 */
 
 	rc = __cam_req_mgr_traverse(&traverse_data);
-	CRM_DBG("SOF: idx %d result %x pd_mask %x rc %d",
+	CAM_DBG(CAM_CRM, "SOF: idx %d result %x pd_mask %x rc %d",
 		idx, traverse_data.result, link->pd_mask, rc);
 
 	if (!rc && traverse_data.result == link->pd_mask) {
-		CRM_DBG("APPLY: link_hdl= %x idx= %d, req_id= %lld :%lld :%lld",
+		CAM_DBG(CAM_CRM,
+			"APPLY: link_hdl= %x idx= %d, req_id= %lld :%lld :%lld",
 			link->link_hdl, idx,
 			apply_data[2].req_id, apply_data[1].req_id,
 			apply_data[0].req_id);
@@ -498,12 +505,12 @@
 	 * - if in applied_state, somthign wrong.
 	 * - if in no_req state, no new req
 	 */
-	CRM_DBG("idx %d req_status %d",
+	CAM_DBG(CAM_CRM, "idx %d req_status %d",
 		in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
 
 	slot = &in_q->slot[in_q->rd_idx];
 	if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
-		CRM_DBG("No Pending req");
+		CAM_DBG(CAM_CRM, "No Pending req");
 		return 0;
 	}
 
@@ -517,7 +524,8 @@
 			slot->status = CRM_SLOT_STATUS_REQ_APPLIED;
 
 			if (link->state == CAM_CRM_LINK_STATE_ERR) {
-				CRM_WARN("Err recovery done idx %d status %d",
+				CAM_WARN(CAM_CRM,
+					"Err recovery done idx %d status %d",
 					in_q->rd_idx,
 					in_q->slot[in_q->rd_idx].status);
 				mutex_lock(&link->lock);
@@ -553,7 +561,8 @@
 			 *   don't expect to enter here.
 			 * @TODO: gracefully handle if recovery fails.
 			 */
-			CRM_ERR("FATAL recovery cant finish idx %d status %d",
+			CAM_ERR(CAM_CRM,
+				"FATAL recovery cant finish idx %d status %d",
 				in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
 			rc = -EPERM;
 		}
@@ -601,7 +610,7 @@
 		tbl->next = new_tbl;
 		tbl->pd_delta = tbl->pd - new_tbl->pd;
 	}
-	CRM_DBG("added pd %d tbl to link delta %d", new_tbl->pd,
+	CAM_DBG(CAM_CRM, "added pd %d tbl to link delta %d", new_tbl->pd,
 		new_tbl->pd_delta);
 }
 
@@ -620,7 +629,7 @@
 		kzalloc(sizeof(struct cam_req_mgr_req_tbl), GFP_KERNEL);
 	if (tbl != NULL) {
 		tbl->num_slots = MAX_REQ_SLOTS;
-		CRM_DBG("pd= %d slots= %d", delay, tbl->num_slots);
+		CAM_DBG(CAM_CRM, "pd= %d slots= %d", delay, tbl->num_slots);
 	}
 
 	return tbl;
@@ -637,7 +646,7 @@
 {
 	struct cam_req_mgr_req_tbl  *tbl = *l_tbl, *temp;
 
-	CRM_DBG("*l_tbl %pK", tbl);
+	CAM_DBG(CAM_CRM, "*l_tbl %pK", tbl);
 	while (tbl != NULL) {
 		temp = tbl->next;
 		kfree(tbl);
@@ -666,7 +675,7 @@
 	for (i = 0; i < in_q->num_slots; i++) {
 		slot = &in_q->slot[idx];
 		if (slot->req_id == req_id) {
-			CRM_DBG("req %lld found at %d %d status %d",
+			CAM_DBG(CAM_CRM, "req %lld found at %d %d status %d",
 				req_id, idx, slot->idx,
 				slot->status);
 			break;
@@ -694,7 +703,7 @@
 	struct cam_req_mgr_req_queue *in_q = req->in_q;
 
 	if (!in_q) {
-		CRM_ERR("NULL in_q");
+		CAM_ERR(CAM_CRM, "NULL in_q");
 		return -EINVAL;
 	}
 
@@ -729,7 +738,7 @@
 	struct cam_req_mgr_req_queue *in_q = req->in_q;
 
 	if (!in_q) {
-		CRM_ERR("NULL in_q");
+		CAM_ERR(CAM_CRM, "NULL in_q");
 		return -EINVAL;
 	}
 
@@ -758,11 +767,11 @@
 	struct cam_req_mgr_core_link *link = NULL;
 
 	if (!timer) {
-		CRM_ERR("NULL timer");
+		CAM_ERR(CAM_CRM, "NULL timer");
 		return;
 	}
 	link = (struct cam_req_mgr_core_link *)timer->parent;
-	CRM_ERR("SOF freeze for link %x", link->link_hdl);
+	CAM_ERR(CAM_CRM, "SOF freeze for link %x", link->link_hdl);
 }
 
 /**
@@ -859,12 +868,12 @@
 	struct cam_req_mgr_req_queue *in_q;
 
 	if (!session || !g_crm_core_dev) {
-		CRM_ERR("NULL session/core_dev ptr");
+		CAM_ERR(CAM_CRM, "NULL session/core_dev ptr");
 		return NULL;
 	}
 
 	if (session->num_links >= MAX_LINKS_PER_SESSION) {
-		CRM_ERR("Reached max links %d per session limit %d",
+		CAM_ERR(CAM_CRM, "Reached max links %d per session limit %d",
 			session->num_links, MAX_LINKS_PER_SESSION);
 		return NULL;
 	}
@@ -872,7 +881,7 @@
 	link = (struct cam_req_mgr_core_link *)
 		kzalloc(sizeof(struct cam_req_mgr_core_link), GFP_KERNEL);
 	if (!link) {
-		CRM_ERR("failed to create link, no mem");
+		CAM_ERR(CAM_CRM, "failed to create link, no mem");
 		return NULL;
 	}
 	in_q = &session->in_q;
@@ -893,7 +902,7 @@
 	mutex_lock(&session->lock);
 	session->links[session->num_links] = link;
 	session->num_links++;
-	CRM_DBG("Active session links (%d)",
+	CAM_DBG(CAM_CRM, "Active session links (%d)",
 		session->num_links);
 	mutex_unlock(&session->lock);
 
@@ -915,14 +924,14 @@
 	int32_t   i = 0;
 
 	if (!session || !*link) {
-		CRM_ERR("NULL session/link ptr %pK %pK",
+		CAM_ERR(CAM_CRM, "NULL session/link ptr %pK %pK",
 			session, *link);
 		return;
 	}
 
 	mutex_lock(&session->lock);
 	if (!session->num_links)
-		CRM_WARN("No active link or invalid state %d",
+		CAM_WARN(CAM_CRM, "No active link or invalid state %d",
 			session->num_links);
 	else {
 		for (i = 0; i < session->num_links; i++) {
@@ -930,7 +939,7 @@
 				session->links[i] = NULL;
 		}
 		session->num_links--;
-		CRM_DBG("Active session links (%d)",
+		CAM_DBG(CAM_CRM, "Active session links (%d)",
 			session->num_links);
 	}
 	kfree(*link);
@@ -959,7 +968,7 @@
 	struct cam_req_mgr_req_queue        *in_q = NULL;
 
 	if (!data || !priv) {
-		CRM_ERR("input args NULL %pK %pK", data, priv);
+		CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
 		rc = -EINVAL;
 		goto end;
 	}
@@ -994,21 +1003,21 @@
 	struct crm_task_payload             *task_data = NULL;
 
 	if (!data || !priv) {
-		CRM_ERR("input args NULL %pK %pK", data, priv);
+		CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
 		rc = -EINVAL;
 		goto end;
 	}
 	link = (struct cam_req_mgr_core_link *)priv;
 	task_data = (struct crm_task_payload *)data;
 	flush_info  = (struct cam_req_mgr_flush_info *)&task_data->u;
-	CRM_DBG("link_hdl %x req_id %lld type %d",
+	CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld type %d",
 		flush_info->link_hdl,
 		flush_info->req_id,
 		flush_info->flush_type);
 
 	in_q = link->req.in_q;
 
-	trace_cam_flush_req(flush_info);
+	trace_cam_flush_req(link, flush_info);
 
 	mutex_lock(&link->req.lock);
 	if (flush_info->flush_type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
@@ -1024,15 +1033,16 @@
 		CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
 		idx = __cam_req_mgr_find_slot_for_req(in_q, flush_info->req_id);
 		if (idx < 0) {
-			CRM_ERR("req_id %lld not found in input queue",
+			CAM_ERR(CAM_CRM, "req_id %lld not found in input queue",
 			flush_info->req_id);
 		} else {
-			CRM_DBG("req_id %lld found at idx %d",
+			CAM_DBG(CAM_CRM, "req_id %lld found at idx %d",
 				flush_info->req_id, idx);
 			slot = &in_q->slot[idx];
 			if (slot->status == CRM_SLOT_STATUS_REQ_PENDING ||
 				slot->status == CRM_SLOT_STATUS_REQ_APPLIED) {
-				CRM_WARN("req_id %lld can not be cancelled",
+				CAM_WARN(CAM_CRM,
+					"req_id %lld can not be cancelled",
 					flush_info->req_id);
 				mutex_unlock(&link->req.lock);
 				return -EINVAL;
@@ -1078,14 +1088,14 @@
 	struct crm_task_payload          *task_data = NULL;
 
 	if (!data || !priv) {
-		CRM_ERR("input args NULL %pK %pK", data, priv);
+		CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
 		rc = -EINVAL;
 		goto end;
 	}
 	link = (struct cam_req_mgr_core_link *)priv;
 	task_data = (struct crm_task_payload *)data;
 	sched_req  = (struct cam_req_mgr_sched_request *)&task_data->u;
-	CRM_DBG("link_hdl %x req_id %lld",
+	CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld",
 		sched_req->link_hdl,
 		sched_req->req_id);
 
@@ -1096,9 +1106,9 @@
 
 	if (slot->status != CRM_SLOT_STATUS_NO_REQ &&
 		slot->status != CRM_SLOT_STATUS_REQ_APPLIED)
-		CRM_WARN("in_q overwrite %d", slot->status);
+		CAM_WARN(CAM_CRM, "in_q overwrite %d", slot->status);
 
-	CRM_DBG("sched_req %lld at slot %d",
+	CAM_DBG(CAM_CRM, "sched_req %lld at slot %d",
 		sched_req->req_id, in_q->wr_idx);
 
 	slot->status = CRM_SLOT_STATUS_REQ_ADDED;
@@ -1133,7 +1143,7 @@
 	struct crm_task_payload             *task_data = NULL;
 
 	if (!data || !priv) {
-		CRM_ERR("input args NULL %pK %pK", data, priv);
+		CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
 		rc = -EINVAL;
 		goto end;
 	}
@@ -1150,7 +1160,7 @@
 		}
 	}
 	if (!tbl) {
-		CRM_ERR("dev_hdl not found %x, %x %x",
+		CAM_ERR(CAM_CRM, "dev_hdl not found %x, %x %x",
 			add_req->dev_hdl,
 			link->l_dev[0].dev_hdl,
 			link->l_dev[1].dev_hdl);
@@ -1168,7 +1178,7 @@
 	mutex_lock(&link->req.lock);
 	idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
 	if (idx < 0) {
-		CRM_ERR("req %lld not found in in_q", add_req->req_id);
+		CAM_ERR(CAM_CRM, "req %lld not found in in_q", add_req->req_id);
 		rc = -EBADSLT;
 		mutex_unlock(&link->req.lock);
 		goto end;
@@ -1176,19 +1186,21 @@
 	slot = &tbl->slot[idx];
 	if (slot->state != CRM_REQ_STATE_PENDING &&
 		slot->state != CRM_REQ_STATE_EMPTY) {
-		CRM_WARN("Unexpected state %d for slot %d map %x",
+		CAM_WARN(CAM_CRM, "Unexpected state %d for slot %d map %x",
 			slot->state, idx, slot->req_ready_map);
 	}
 
 	slot->state = CRM_REQ_STATE_PENDING;
 	slot->req_ready_map |= (1 << device->dev_bit);
 
-	CRM_DBG("idx %d dev_hdl %x req_id %lld pd %d ready_map %x",
+	CAM_DBG(CAM_CRM, "idx %d dev_hdl %x req_id %lld pd %d ready_map %x",
 		idx, add_req->dev_hdl, add_req->req_id, tbl->pd,
 		slot->req_ready_map);
 
+	trace_cam_req_mgr_add_req(link, idx, add_req, tbl, device);
+
 	if (slot->req_ready_map == tbl->dev_mask) {
-		CRM_DBG("idx %d req_id %lld pd %d SLOT READY",
+		CAM_DBG(CAM_CRM, "idx %d req_id %lld pd %d SLOT READY",
 			idx, add_req->req_id, tbl->pd);
 		slot->state = CRM_REQ_STATE_READY;
 	}
@@ -1219,14 +1231,14 @@
 	struct crm_task_payload             *task_data = NULL;
 
 	if (!data || !priv) {
-		CRM_ERR("input args NULL %pK %pK", data, priv);
+		CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
 		rc = -EINVAL;
 		goto end;
 	}
 	link = (struct cam_req_mgr_core_link *)priv;
 	task_data = (struct crm_task_payload *)data;
 	err_info  = (struct cam_req_mgr_error_notify *)&task_data->u;
-	CRM_DBG("link_hdl %x req_id %lld error %d",
+	CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld error %d",
 		err_info->link_hdl,
 		err_info->req_id,
 		err_info->error);
@@ -1237,20 +1249,22 @@
 	if (err_info->error == CRM_KMD_ERR_BUBBLE) {
 		idx = __cam_req_mgr_find_slot_for_req(in_q, err_info->req_id);
 		if (idx < 0) {
-			CRM_ERR("req_id %lld not found in input queue",
+			CAM_ERR(CAM_CRM, "req_id %lld not found in input queue",
 			err_info->req_id);
 		} else {
-			CRM_DBG("req_id %lld found at idx %d",
+			CAM_DBG(CAM_CRM, "req_id %lld found at idx %d",
 				err_info->req_id, idx);
 			slot = &in_q->slot[idx];
 			if (!slot->recover) {
-				CRM_WARN("err recovery disabled req_id %lld",
+				CAM_WARN(CAM_CRM,
+					"err recovery disabled req_id %lld",
 					err_info->req_id);
 				mutex_unlock(&link->req.lock);
 				return 0;
 			} else if (slot->status != CRM_SLOT_STATUS_REQ_PENDING
 			&& slot->status != CRM_SLOT_STATUS_REQ_APPLIED) {
-				CRM_WARN("req_id %lld can not be recovered %d",
+				CAM_WARN(CAM_CRM,
+					"req_id %lld can not be recovered %d",
 					err_info->req_id, slot->status);
 				mutex_unlock(&link->req.lock);
 				return -EINVAL;
@@ -1305,7 +1319,7 @@
 	struct crm_task_payload             *task_data = NULL;
 
 	if (!data || !priv) {
-		CRM_ERR("input args NULL %pK %pK", data, priv);
+		CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
 		rc = -EINVAL;
 		goto end;
 	}
@@ -1313,7 +1327,7 @@
 	task_data = (struct crm_task_payload *)data;
 	sof_data = (struct cam_req_mgr_sof_notify *)&task_data->u;
 
-	CRM_DBG("link_hdl %x frame_id %lld",
+	CAM_DBG(CAM_CRM, "link_hdl %x frame_id %lld",
 		sof_data->link_hdl,
 		sof_data->frame_id);
 
@@ -1324,11 +1338,11 @@
 	 * Check if current read index is in applied state, if yes make it free
 	 *    and increment read index to next slot.
 	 */
-	CRM_DBG("link_hdl %x curent idx %d req_status %d",
+	CAM_DBG(CAM_CRM, "link_hdl %x curent idx %d req_status %d",
 		link->link_hdl, in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
 
 	if (link->state == CAM_CRM_LINK_STATE_ERR)
-		CRM_WARN("Error recovery idx %d status %d",
+		CAM_WARN(CAM_CRM, "Error recovery idx %d status %d",
 			in_q->rd_idx,
 			in_q->slot[in_q->rd_idx].status);
 
@@ -1367,17 +1381,18 @@
 	struct crm_task_payload        *task_data;
 
 	if (!add_req) {
-		CRM_ERR("sof_data is NULL");
+		CAM_ERR(CAM_CRM, "sof_data is NULL");
 		rc = -EINVAL;
 		goto end;
 	}
 
-	CRM_DBG("E: dev %x dev req %lld", add_req->dev_hdl, add_req->req_id);
+	CAM_DBG(CAM_CRM, "E: dev %x dev req %lld",
+		add_req->dev_hdl, add_req->req_id);
 	link = (struct cam_req_mgr_core_link *)
 		cam_get_device_priv(add_req->link_hdl);
 
 	if (!link) {
-		CRM_DBG("link ptr NULL %x", add_req->link_hdl);
+		CAM_DBG(CAM_CRM, "link ptr NULL %x", add_req->link_hdl);
 		rc = -EINVAL;
 		goto end;
 	}
@@ -1385,14 +1400,14 @@
 	/* Validate if req id is present in input queue */
 	idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
 	if (idx < 0) {
-		CRM_ERR("req %lld not found in in_q", add_req->req_id);
+		CAM_ERR(CAM_CRM, "req %lld not found in in_q", add_req->req_id);
 		rc = -ENOENT;
 		goto end;
 	}
 
 	task = cam_req_mgr_workq_get_task(link->workq);
 	if (!task) {
-		CRM_ERR("no empty task dev %x req %lld",
+		CAM_ERR(CAM_CRM, "no empty task dev %x req %lld",
 			add_req->dev_hdl, add_req->req_id);
 		rc = -EBUSY;
 		goto end;
@@ -1406,7 +1421,8 @@
 	dev_req->dev_hdl = add_req->dev_hdl;
 	task->process_cb = &cam_req_mgr_process_add_req;
 	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
-	CRM_DBG("X: dev %x dev req %lld", add_req->dev_hdl, add_req->req_id);
+	CAM_DBG(CAM_CRM, "X: dev %x dev req %lld",
+		add_req->dev_hdl, add_req->req_id);
 
 end:
 	return rc;
@@ -1431,7 +1447,7 @@
 	struct crm_task_payload         *task_data;
 
 	if (!err_info) {
-		CRM_ERR("err_info is NULL");
+		CAM_ERR(CAM_CRM, "err_info is NULL");
 		rc = -EINVAL;
 		goto end;
 	}
@@ -1439,7 +1455,7 @@
 	link = (struct cam_req_mgr_core_link *)
 		cam_get_device_priv(err_info->link_hdl);
 	if (!link) {
-		CRM_DBG("link ptr NULL %x", err_info->link_hdl);
+		CAM_DBG(CAM_CRM, "link ptr NULL %x", err_info->link_hdl);
 		rc = -EINVAL;
 		goto end;
 	}
@@ -1447,7 +1463,7 @@
 	crm_timer_reset(link->watchdog);
 	task = cam_req_mgr_workq_get_task(link->workq);
 	if (!task) {
-		CRM_ERR("no empty task req_id %lld", err_info->req_id);
+		CAM_ERR(CAM_CRM, "no empty task req_id %lld", err_info->req_id);
 		rc = -EBUSY;
 		goto end;
 	}
@@ -1485,7 +1501,7 @@
 	struct crm_task_payload         *task_data;
 
 	if (!sof_data) {
-		CRM_ERR("sof_data is NULL");
+		CAM_ERR(CAM_CRM, "sof_data is NULL");
 		rc = -EINVAL;
 		goto end;
 	}
@@ -1493,7 +1509,7 @@
 	link = (struct cam_req_mgr_core_link *)
 		cam_get_device_priv(sof_data->link_hdl);
 	if (!link) {
-		CRM_DBG("link ptr NULL %x", sof_data->link_hdl);
+		CAM_DBG(CAM_CRM, "link ptr NULL %x", sof_data->link_hdl);
 		rc = -EINVAL;
 		goto end;
 	}
@@ -1501,7 +1517,8 @@
 	crm_timer_reset(link->watchdog);
 	task = cam_req_mgr_workq_get_task(link->workq);
 	if (!task) {
-		CRM_ERR("no empty task frame %lld", sof_data->frame_id);
+		CAM_ERR(CAM_CRM, "no empty task frame %lld",
+			sof_data->frame_id);
 		rc = -EBUSY;
 		goto end;
 	}
@@ -1548,7 +1565,7 @@
 		return -EPERM;
 
 	mutex_init(&link->req.lock);
-	CRM_DBG("LOCK_DBG in_q lock %pK", &link->req.lock);
+	CAM_DBG(CAM_CRM, "LOCK_DBG in_q lock %pK", &link->req.lock);
 	link->req.num_tbl = 0;
 
 	rc = __cam_req_mgr_setup_in_q(&link->req);
@@ -1565,7 +1582,7 @@
 		if (!dev->ops ||
 			!dev->ops->get_dev_info ||
 			!dev->ops->link_setup) {
-			CRM_ERR("FATAL: device ops NULL");
+			CAM_ERR(CAM_CRM, "FATAL: device ops NULL");
 			rc = -ENXIO;
 			goto error;
 		}
@@ -1573,7 +1590,10 @@
 		dev->parent = (void *)link;
 		dev->dev_info.dev_hdl = dev->dev_hdl;
 		rc = dev->ops->get_dev_info(&dev->dev_info);
-		CRM_DBG("%x: connected: %s, id %d, delay %d",
+
+		trace_cam_req_mgr_connect_device(link, &dev->dev_info);
+
+		CAM_DBG(CAM_CRM, "%x: connected: %s, id %d, delay %d",
 			link_info->session_hdl, dev->dev_info.name,
 			dev->dev_info.dev_id, dev->dev_info.p_delay);
 		if (rc < 0 ||
@@ -1581,10 +1601,10 @@
 			CAM_PIPELINE_DELAY_MAX ||
 			dev->dev_info.p_delay <
 			CAM_PIPELINE_DELAY_0) {
-			CRM_ERR("get device info failed");
+			CAM_ERR(CAM_CRM, "get device info failed");
 			goto error;
 		} else {
-			CRM_DBG("%x: connected: %s, delay %d",
+			CAM_DBG(CAM_CRM, "%x: connected: %s, delay %d",
 				link_info->session_hdl,
 				dev->dev_info.name,
 				dev->dev_info.p_delay);
@@ -1613,7 +1633,7 @@
 			pd_tbl = __cam_req_mgr_find_pd_tbl(link->req.l_tbl,
 				dev->dev_info.p_delay);
 			if (!pd_tbl) {
-				CRM_ERR("pd %d tbl not found",
+				CAM_ERR(CAM_CRM, "pd %d tbl not found",
 					dev->dev_info.p_delay);
 				rc = -ENXIO;
 				goto error;
@@ -1622,7 +1642,7 @@
 			pd_tbl = __cam_req_mgr_create_pd_tbl(
 				dev->dev_info.p_delay);
 			if (pd_tbl == NULL) {
-				CRM_ERR("create new pd tbl failed");
+				CAM_ERR(CAM_CRM, "create new pd tbl failed");
 				rc = -ENXIO;
 				goto error;
 			}
@@ -1669,7 +1689,7 @@
 	struct cam_req_mgr_core_session *cam_session = NULL;
 
 	if (!ses_info) {
-		CRM_DBG("NULL session info pointer");
+		CAM_DBG(CAM_CRM, "NULL session info pointer");
 		return -EINVAL;
 	}
 	mutex_lock(&g_crm_core_dev->crm_lock);
@@ -1682,7 +1702,8 @@
 
 	session_hdl = cam_create_session_hdl((void *)cam_session);
 	if (session_hdl < 0) {
-		CRM_ERR("unable to create session_hdl = %x", session_hdl);
+		CAM_ERR(CAM_CRM, "unable to create session_hdl = %x",
+			session_hdl);
 		rc = session_hdl;
 		kfree(cam_session);
 		goto end;
@@ -1690,7 +1711,7 @@
 	ses_info->session_hdl = session_hdl;
 
 	mutex_init(&cam_session->lock);
-	CRM_DBG("LOCK_DBG session lock %pK", &cam_session->lock);
+	CAM_DBG(CAM_CRM, "LOCK_DBG session lock %pK", &cam_session->lock);
 
 	mutex_lock(&cam_session->lock);
 	cam_session->session_hdl = session_hdl;
@@ -1709,7 +1730,7 @@
 	struct cam_req_mgr_core_session *cam_session = NULL;
 
 	if (!ses_info) {
-		CRM_DBG("NULL session info pointer");
+		CAM_DBG(CAM_CRM, "NULL session info pointer");
 		return -EINVAL;
 	}
 
@@ -1717,14 +1738,14 @@
 	cam_session = (struct cam_req_mgr_core_session *)
 		cam_get_device_priv(ses_info->session_hdl);
 	if (!cam_session) {
-		CRM_ERR("failed to get session priv");
+		CAM_ERR(CAM_CRM, "failed to get session priv");
 		rc = -ENOENT;
 		goto end;
 
 	}
 	mutex_lock(&cam_session->lock);
 	if (cam_session->num_links) {
-		CRM_ERR("destroy session %x num_active_links %d",
+		CAM_ERR(CAM_CRM, "destroy session %x num_active_links %d",
 			ses_info->session_hdl,
 			cam_session->num_links);
 		/* @TODO : Go through active links and destroy ? */
@@ -1736,7 +1757,7 @@
 
 	rc = cam_destroy_session_hdl(ses_info->session_hdl);
 	if (rc < 0)
-		CRM_ERR("unable to destroy session_hdl = %x rc %d",
+		CAM_ERR(CAM_CRM, "unable to destroy session_hdl = %x rc %d",
 			ses_info->session_hdl, rc);
 
 end:
@@ -1753,11 +1774,12 @@
 	struct cam_req_mgr_core_link           *link;
 
 	if (!link_info) {
-		CRM_DBG("NULL pointer");
+		CAM_DBG(CAM_CRM, "NULL pointer");
 		return -EINVAL;
 	}
 	if (link_info->num_devices > CAM_REQ_MGR_MAX_HANDLES) {
-		CRM_ERR("Invalid num devices %d", link_info->num_devices);
+		CAM_ERR(CAM_CRM, "Invalid num devices %d",
+			link_info->num_devices);
 		return -EINVAL;
 	}
 
@@ -1765,7 +1787,7 @@
 	cam_session = (struct cam_req_mgr_core_session *)
 		cam_get_device_priv(link_info->session_hdl);
 	if (!cam_session) {
-		CRM_DBG("NULL pointer");
+		CAM_DBG(CAM_CRM, "NULL pointer");
 		return -EINVAL;
 	}
 
@@ -1774,11 +1796,11 @@
 	/* Allocate link struct and map it with session's request queue */
 	link = __cam_req_mgr_reserve_link(cam_session);
 	if (!link) {
-		CRM_ERR("failed to reserve new link");
+		CAM_ERR(CAM_CRM, "failed to reserve new link");
 		mutex_unlock(&g_crm_core_dev->crm_lock);
 		return -EINVAL;
 	}
-	CRM_DBG("link reserved %pK %x", link, link->link_hdl);
+	CAM_DBG(CAM_CRM, "link reserved %pK %x", link, link->link_hdl);
 
 	memset(&root_dev, 0, sizeof(struct cam_create_dev_hdl));
 	root_dev.session_hdl = link_info->session_hdl;
@@ -1788,7 +1810,8 @@
 	/* Create unique dev handle for link */
 	link->link_hdl = cam_create_device_hdl(&root_dev);
 	if (link->link_hdl < 0) {
-		CRM_ERR("Insufficient memory to create new device handle");
+		CAM_ERR(CAM_CRM,
+			"Insufficient memory to create new device handle");
 		mutex_unlock(&link->lock);
 		rc = link->link_hdl;
 		goto link_hdl_fail;
@@ -1800,7 +1823,8 @@
 	rc = __cam_req_mgr_create_subdevs(&link->l_dev,
 		link_info->num_devices);
 	if (rc < 0) {
-		CRM_ERR("Insufficient memory to create new crm subdevs");
+		CAM_ERR(CAM_CRM,
+			"Insufficient memory to create new crm subdevs");
 		goto create_subdev_failed;
 	}
 
@@ -1819,7 +1843,7 @@
 	rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS,
 		&link->workq, CRM_WORKQ_USAGE_NON_IRQ);
 	if (rc < 0) {
-		CRM_ERR("FATAL: unable to create worker");
+		CAM_ERR(CAM_CRM, "FATAL: unable to create worker");
 		__cam_req_mgr_destroy_link_info(link);
 		goto setup_failed;
 	}
@@ -1865,18 +1889,18 @@
 	struct cam_req_mgr_core_link    *link;
 
 	if (!unlink_info) {
-		CRM_ERR("NULL pointer");
+		CAM_ERR(CAM_CRM, "NULL pointer");
 		return -EINVAL;
 	}
 
 	mutex_lock(&g_crm_core_dev->crm_lock);
-	CRM_DBG("link_hdl %x", unlink_info->link_hdl);
+	CAM_DBG(CAM_CRM, "link_hdl %x", unlink_info->link_hdl);
 
 	/* session hdl's priv data is cam session struct */
 	cam_session = (struct cam_req_mgr_core_session *)
 		cam_get_device_priv(unlink_info->session_hdl);
 	if (!cam_session) {
-		CRM_ERR("NULL pointer");
+		CAM_ERR(CAM_CRM, "NULL pointer");
 		mutex_unlock(&g_crm_core_dev->crm_lock);
 		return -EINVAL;
 	}
@@ -1884,7 +1908,7 @@
 	/* link hdl's priv data is core_link struct */
 	link = cam_get_device_priv(unlink_info->link_hdl);
 	if (!link) {
-		CRM_ERR("NULL pointer");
+		CAM_ERR(CAM_CRM, "NULL pointer");
 		mutex_unlock(&g_crm_core_dev->crm_lock);
 		return -EINVAL;
 	}
@@ -1908,7 +1932,7 @@
 	/* Destroy the link handle */
 	rc = cam_destroy_device_hdl(unlink_info->link_hdl);
 	if (rc < 0) {
-		CRM_ERR("error while destroying dev handle %d %x",
+		CAM_ERR(CAM_CRM, "error while destroying dev handle %d %x",
 			rc, link->link_hdl);
 	}
 
@@ -1929,7 +1953,7 @@
 	struct crm_task_payload           task_data;
 
 	if (!sched_req) {
-		CRM_ERR("csl_req is NULL");
+		CAM_ERR(CAM_CRM, "csl_req is NULL");
 		rc = -EINVAL;
 		goto end;
 	}
@@ -1937,15 +1961,16 @@
 	link = (struct cam_req_mgr_core_link *)
 		cam_get_device_priv(sched_req->link_hdl);
 	if (!link) {
-		CRM_DBG("link ptr NULL %x", sched_req->link_hdl);
+		CAM_DBG(CAM_CRM, "link ptr NULL %x", sched_req->link_hdl);
 		return -EINVAL;
 	}
 	session = (struct cam_req_mgr_core_session *)link->parent;
 	if (!session) {
-		CRM_WARN("session ptr NULL %x", sched_req->link_hdl);
+		CAM_WARN(CAM_CRM, "session ptr NULL %x", sched_req->link_hdl);
 		return -EINVAL;
 	}
-	CRM_DBG("link %x req %lld", sched_req->link_hdl, sched_req->req_id);
+	CAM_DBG(CAM_CRM, "link %x req %lld",
+		sched_req->link_hdl, sched_req->req_id);
 
 	task_data.type = CRM_WORKQ_TASK_SCHED_REQ;
 	sched = (struct cam_req_mgr_sched_request *)&task_data.u;
@@ -1960,7 +1985,8 @@
 
 	rc = cam_req_mgr_process_sched_req(link, &task_data);
 
-	CRM_DBG("DONE dev %x req %lld", sched_req->link_hdl, sched_req->req_id);
+	CAM_DBG(CAM_CRM, "DONE dev %x req %lld",
+		sched_req->link_hdl, sched_req->req_id);
 end:
 	return rc;
 }
@@ -1969,7 +1995,7 @@
 	struct cam_req_mgr_sync_mode *sync_links)
 {
 	if (!sync_links) {
-		CRM_ERR("NULL pointer");
+		CAM_ERR(CAM_CRM, "NULL pointer");
 		return -EINVAL;
 	}
 
@@ -1988,12 +2014,13 @@
 	struct cam_req_mgr_core_session  *session = NULL;
 
 	if (!flush_info) {
-		CRM_ERR("flush req is NULL");
+		CAM_ERR(CAM_CRM, "flush req is NULL");
 		rc = -EFAULT;
 		goto end;
 	}
 	if (flush_info->flush_type >= CAM_REQ_MGR_FLUSH_TYPE_MAX) {
-		CRM_ERR("incorrect flush type %x", flush_info->flush_type);
+		CAM_ERR(CAM_CRM, "incorrect flush type %x",
+			flush_info->flush_type);
 		rc = -EINVAL;
 		goto end;
 	}
@@ -2002,12 +2029,12 @@
 	session = (struct cam_req_mgr_core_session *)
 		cam_get_device_priv(flush_info->session_hdl);
 	if (!session) {
-		CRM_ERR("Invalid session %x", flush_info->session_hdl);
+		CAM_ERR(CAM_CRM, "Invalid session %x", flush_info->session_hdl);
 		rc = -EINVAL;
 		goto end;
 	}
 	if (session->num_links <= 0) {
-		CRM_WARN("No active links in session %x",
+		CAM_WARN(CAM_CRM, "No active links in session %x",
 		flush_info->session_hdl);
 		goto end;
 	}
@@ -2015,7 +2042,7 @@
 	link = (struct cam_req_mgr_core_link *)
 		cam_get_device_priv(flush_info->link_hdl);
 	if (!link) {
-		CRM_DBG("link ptr NULL %x", flush_info->link_hdl);
+		CAM_DBG(CAM_CRM, "link ptr NULL %x", flush_info->link_hdl);
 		rc = -EINVAL;
 		goto end;
 	}
@@ -2047,10 +2074,10 @@
 
 int cam_req_mgr_core_device_init(void)
 {
-	CRM_DBG("Enter g_crm_core_dev %pK", g_crm_core_dev);
+	CAM_DBG(CAM_CRM, "Enter g_crm_core_dev %pK", g_crm_core_dev);
 
 	if (g_crm_core_dev) {
-		CRM_WARN("core device is already initialized");
+		CAM_WARN(CAM_CRM, "core device is already initialized");
 		return 0;
 	}
 	g_crm_core_dev = (struct cam_req_mgr_core_device *)
@@ -2058,7 +2085,7 @@
 	if (!g_crm_core_dev)
 		return -ENOMEM;
 
-	CRM_DBG("g_crm_core_dev %pK", g_crm_core_dev);
+	CAM_DBG(CAM_CRM, "g_crm_core_dev %pK", g_crm_core_dev);
 	INIT_LIST_HEAD(&g_crm_core_dev->session_head);
 	mutex_init(&g_crm_core_dev->crm_lock);
 	cam_req_mgr_debug_register(g_crm_core_dev);
@@ -2069,11 +2096,11 @@
 int cam_req_mgr_core_device_deinit(void)
 {
 	if (!g_crm_core_dev) {
-		CRM_ERR("NULL pointer");
+		CAM_ERR(CAM_CRM, "NULL pointer");
 		return -EINVAL;
 	}
 
-	CRM_DBG("g_crm_core_dev %pK", g_crm_core_dev);
+	CAM_DBG(CAM_CRM, "g_crm_core_dev %pK", g_crm_core_dev);
 	mutex_destroy(&g_crm_core_dev->crm_lock);
 	kfree(g_crm_core_dev);
 	g_crm_core_dev = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
index 2a831e8..f61c41e 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
@@ -25,30 +25,5 @@
 
 #define CRM_GET_REQ_ID(in_q, idx) in_q->slot[idx].req_id
 
-#if (CRM_TRACE_ENABLE == 1)
-	#define CRM_DBG(fmt, args...) do { \
-	trace_printk("%d: [crm_dbg] "fmt"\n", __LINE__, ##args); \
-	pr_debug("%s:%d "fmt"\n", __func__, __LINE__, ##args); \
-	} while (0)
-
-	#define CRM_WARN(fmt, args...) do { \
-	trace_printk("%d: [crm_warn] "fmt"\n", __LINE__, ##args); \
-	pr_warn("%s:%d "fmt"\n", __func__, __LINE__, ##args); \
-	} while (0)
-
-	#define CRM_ERR(fmt, args...) do { \
-	trace_printk("%d: [crm_err] "fmt"\n", __LINE__, ##args); \
-	pr_err("%s:%d "fmt"\n", __func__, __LINE__, ##args);\
-	} while (0)
-#else
-	#define CRM_DBG(fmt, args...) pr_debug("%s:%d "fmt"\n", \
-	__func__, __LINE__, ##args)
-
-	#define CRM_WARN(fmt, args...) pr_warn("%s:%d "fmt"\n", \
-	__func__, __LINE__, ##args)
-
-	#define CRM_ERR(fmt, args...) pr_err("%s:%d "fmt"\n", \
-	__func__, __LINE__, ##args)
-#endif
 #endif
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index c495088..7a2bc09 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "CAM-REQ-MGR %s:%d " fmt, __func__, __LINE__
-
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/platform_device.h>
@@ -26,6 +24,7 @@
 #include "cam_req_mgr_core.h"
 #include "cam_subdev.h"
 #include "cam_mem_mgr.h"
+#include "cam_debug_util.h"
 
 #define CAM_REQ_MGR_EVENT_MAX 30
 
@@ -108,7 +107,7 @@
 
 	rc = v4l2_fh_open(filep);
 	if (rc) {
-		pr_err("v4l2_fh_open failed: %d\n", rc);
+		CAM_ERR(CAM_CRM, "v4l2_fh_open failed: %d", rc);
 		goto end;
 	}
 
@@ -120,7 +119,7 @@
 	rc = cam_mem_mgr_init();
 	if (rc) {
 		g_dev.open_cnt--;
-		pr_err("mem mgr init failed\n");
+		CAM_ERR(CAM_CRM, "mem mgr init failed");
 		goto mem_mgr_init_fail;
 	}
 
@@ -507,19 +506,20 @@
 	int rc;
 
 	if (g_dev.state != true) {
-		pr_err("camera root device not ready yet");
+		CAM_ERR(CAM_CRM, "camera root device not ready yet");
 		return -ENODEV;
 	}
 
 	if (!csd || !csd->name) {
-		pr_err("invalid arguments");
+		CAM_ERR(CAM_CRM, "invalid arguments");
 		return -EINVAL;
 	}
 
 	mutex_lock(&g_dev.dev_lock);
 	if ((g_dev.subdev_nodes_created) &&
 		(csd->sd_flags & V4L2_SUBDEV_FL_HAS_DEVNODE)) {
-		pr_err("dynamic node is not allowed, name: %s, type : %d",
+		CAM_ERR(CAM_CRM,
+			"dynamic node is not allowed, name: %s, type :%d",
 			csd->name, csd->ent_function);
 		rc = -EINVAL;
 		goto reg_fail;
@@ -538,7 +538,7 @@
 
 	rc = v4l2_device_register_subdev(g_dev.v4l2_dev, sd);
 	if (rc) {
-		pr_err("register subdev failed");
+		CAM_ERR(CAM_CRM, "register subdev failed");
 		goto reg_fail;
 	}
 	g_dev.count++;
@@ -552,7 +552,7 @@
 int cam_unregister_subdev(struct cam_subdev *csd)
 {
 	if (g_dev.state != true) {
-		pr_err("camera root device not ready yet");
+		CAM_ERR(CAM_CRM, "camera root device not ready yet");
 		return -ENODEV;
 	}
 
@@ -603,19 +603,19 @@
 
 	rc = cam_req_mgr_util_init();
 	if (rc) {
-		pr_err("cam req mgr util init is failed\n");
+		CAM_ERR(CAM_CRM, "cam req mgr util init is failed");
 		goto req_mgr_util_fail;
 	}
 
 	rc = cam_mem_mgr_init();
 	if (rc) {
-		pr_err("mem mgr init failed\n");
+		CAM_ERR(CAM_CRM, "mem mgr init failed");
 		goto mem_mgr_init_fail;
 	}
 
 	rc = cam_req_mgr_core_device_init();
 	if (rc) {
-		pr_err("core device setup failed\n");
+		CAM_ERR(CAM_CRM, "core device setup failed");
 		goto req_mgr_core_fail;
 	}
 
@@ -663,7 +663,7 @@
 		return -EINVAL;
 
 	if (g_dev.state != true) {
-		pr_err("camera root device not ready yet");
+		CAM_ERR(CAM_CRM, "camera root device not ready yet");
 		return -ENODEV;
 	}
 
@@ -675,7 +675,7 @@
 
 	rc = v4l2_device_register_subdev_nodes(g_dev.v4l2_dev);
 	if (rc) {
-		pr_err("failed to register the sub devices");
+		CAM_ERR(CAM_CRM, "failed to register the sub devices");
 		goto create_fail;
 	}
 
@@ -683,7 +683,7 @@
 		if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
 			continue;
 		sd->entity.name = video_device_node_name(sd->devnode);
-		pr_debug("created node :%s\n", sd->entity.name);
+		CAM_DBG(CAM_CRM, "created node :%s", sd->entity.name);
 	}
 
 	g_dev.subdev_nodes_created = true;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
index 91860f6..8faf35a 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
@@ -196,9 +196,9 @@
  * @error    : what error device hit while processing this req
  */
 struct cam_req_mgr_error_notify {
-	int32_t link_hdl;
-	int32_t dev_hdl;
-	int64_t req_id;
+	int32_t  link_hdl;
+	int32_t  dev_hdl;
+	uint64_t req_id;
 	enum cam_req_mgr_device_error error;
 };
 
@@ -210,9 +210,9 @@
  *
  */
 struct cam_req_mgr_add_request {
-	int32_t link_hdl;
-	int32_t dev_hdl;
-	int64_t req_id;
+	int32_t  link_hdl;
+	int32_t  dev_hdl;
+	uint64_t req_id;
 };
 
 
@@ -260,7 +260,7 @@
 struct cam_req_mgr_apply_request {
 	int32_t    link_hdl;
 	int32_t    dev_hdl;
-	int64_t    request_id;
+	uint64_t   request_id;
 	int32_t    report_if_bubble;
 };
 
@@ -276,7 +276,7 @@
 	int32_t     link_hdl;
 	int32_t     dev_hdl;
 	uint32_t    type;
-	int64_t     req_id;
+	uint64_t    req_id;
 };
 
 /**
@@ -286,9 +286,9 @@
  *
  */
 struct cam_req_mgr_link_evt_data {
-	int32_t link_hdl;
-	int32_t dev_hdl;
-	int64_t req_id;
+	int32_t  link_hdl;
+	int32_t  dev_hdl;
+	uint64_t req_id;
 
 	enum cam_req_mgr_link_evt_type evt_type;
 	union {
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
index 9da445d..2aa2ab1 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
@@ -11,12 +11,13 @@
  */
 
 #include "cam_req_mgr_timer.h"
+#include "cam_debug_util.h"
 
 void crm_timer_reset(struct cam_req_mgr_timer *crm_timer)
 {
 	if (!crm_timer)
 		return;
-	CRM_DBG("Starting timer to fire in %d ms. (jiffies=%lu)\n",
+	CAM_DBG(CAM_CRM, "Starting timer to fire in %d ms. (jiffies=%lu)\n",
 		crm_timer->expires, jiffies);
 	mod_timer(&crm_timer->sys_timer,
 		(jiffies + msecs_to_jiffies(crm_timer->expires)));
@@ -27,17 +28,17 @@
 	struct cam_req_mgr_timer *timer = (struct cam_req_mgr_timer *)data;
 
 	if (!timer) {
-		CRM_ERR("NULL timer");
+		CAM_ERR(CAM_CRM, "NULL timer");
 		return;
 	}
-	CRM_DBG("timer %pK parent %pK", timer, timer->parent);
+	CAM_DBG(CAM_CRM, "timer %pK parent %pK", timer, timer->parent);
 	crm_timer_reset(timer);
 }
 
 void crm_timer_modify(struct cam_req_mgr_timer *crm_timer,
 	int32_t expires)
 {
-	CRM_DBG("new time %d", expires);
+	CAM_DBG(CAM_CRM, "new time %d", expires);
 	if (crm_timer) {
 		crm_timer->expires = expires;
 		crm_timer_reset(crm_timer);
@@ -50,7 +51,7 @@
 	int                       ret = 0;
 	struct cam_req_mgr_timer *crm_timer = NULL;
 
-	CRM_DBG("init timer %d %pK", expires, *timer);
+	CAM_DBG(CAM_CRM, "init timer %d %pK", expires, *timer);
 	if (*timer == NULL) {
 		crm_timer = (struct cam_req_mgr_timer *)
 			kzalloc(sizeof(struct cam_req_mgr_timer), GFP_KERNEL);
@@ -71,7 +72,7 @@
 		crm_timer_reset(crm_timer);
 		*timer = crm_timer;
 	} else {
-		CRM_WARN("Timer already exists!!");
+		CAM_WARN(CAM_CRM, "Timer already exists!!");
 		ret = -EINVAL;
 	}
 end:
@@ -79,7 +80,7 @@
 }
 void crm_timer_exit(struct cam_req_mgr_timer **crm_timer)
 {
-	CRM_DBG("destroy timer %pK", *crm_timer);
+	CAM_DBG(CAM_CRM, "destroy timer %pK", *crm_timer);
 	if (*crm_timer) {
 		del_timer(&(*crm_timer)->sys_timer);
 		kfree(*crm_timer);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
index 38048d5..a9134fb 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
@@ -20,12 +20,7 @@
 #include <linux/random.h>
 #include <media/cam_req_mgr.h>
 #include "cam_req_mgr_util.h"
-
-#ifdef CONFIG_CAM_REQ_MGR_UTIL_DEBUG
-#define CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
+#include "cam_debug_util.h"
 
 static struct cam_req_mgr_util_hdl_tbl *hdl_tbl;
 static DEFINE_SPINLOCK(hdl_tbl_lock);
@@ -38,7 +33,7 @@
 
 	if (hdl_tbl) {
 		rc = -EINVAL;
-		pr_err("Hdl_tbl is already present\n");
+		CAM_ERR(CAM_CRM, "Hdl_tbl is already present");
 		goto hdl_tbl_check_failed;
 	}
 
@@ -79,7 +74,7 @@
 {
 	spin_lock_bh(&hdl_tbl_lock);
 	if (!hdl_tbl) {
-		pr_err("Hdl tbl is NULL\n");
+		CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
 		spin_unlock_bh(&hdl_tbl_lock);
 		return -EINVAL;
 	}
@@ -99,14 +94,14 @@
 
 	spin_lock_bh(&hdl_tbl_lock);
 	if (!hdl_tbl) {
-		pr_err("Hdl tbl is NULL\n");
+		CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
 		spin_unlock_bh(&hdl_tbl_lock);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < CAM_REQ_MGR_MAX_HANDLES; i++) {
 		if (hdl_tbl->hdl[i].state == HDL_ACTIVE) {
-			pr_err("Dev handle = %x session_handle = %x\n",
+			CAM_ERR(CAM_CRM, "Dev handle = %x session_handle = %x",
 				hdl_tbl->hdl[i].hdl_value,
 				hdl_tbl->hdl[i].session_hdl);
 			hdl_tbl->hdl[i].state = HDL_FREE;
@@ -141,14 +136,14 @@
 
 	spin_lock_bh(&hdl_tbl_lock);
 	if (!hdl_tbl) {
-		pr_err("Hdl tbl is NULL\n");
+		CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
 		spin_unlock_bh(&hdl_tbl_lock);
 		return -EINVAL;
 	}
 
 	idx = cam_get_free_handle_index();
 	if (idx < 0) {
-		pr_err("Unable to create session handle\n");
+		CAM_ERR(CAM_CRM, "Unable to create session handle");
 		spin_unlock_bh(&hdl_tbl_lock);
 		return idx;
 	}
@@ -174,14 +169,14 @@
 
 	spin_lock_bh(&hdl_tbl_lock);
 	if (!hdl_tbl) {
-		pr_err("Hdl tbl is NULL\n");
+		CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
 		spin_unlock_bh(&hdl_tbl_lock);
 		return -EINVAL;
 	}
 
 	idx = cam_get_free_handle_index();
 	if (idx < 0) {
-		pr_err("Unable to create device handle\n");
+		CAM_ERR(CAM_CRM, "Unable to create device handle");
 		spin_unlock_bh(&hdl_tbl_lock);
 		return idx;
 	}
@@ -196,7 +191,7 @@
 	hdl_tbl->hdl[idx].ops = hdl_data->ops;
 	spin_unlock_bh(&hdl_tbl_lock);
 
-	pr_debug("%s: handle = %x\n", __func__, handle);
+	pr_debug("%s: handle = %x", __func__, handle);
 	return handle;
 }
 
@@ -208,29 +203,29 @@
 
 	spin_lock_bh(&hdl_tbl_lock);
 	if (!hdl_tbl) {
-		pr_err("Hdl tbl is NULL\n");
+		CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
 		goto device_priv_fail;
 	}
 
 	idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl);
 	if (idx >= CAM_REQ_MGR_MAX_HANDLES) {
-		pr_err("Invalid idx\n");
+		CAM_ERR(CAM_CRM, "Invalid idx");
 		goto device_priv_fail;
 	}
 
 	if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
-		pr_err("Invalid state\n");
+		CAM_ERR(CAM_CRM, "Invalid state");
 		goto device_priv_fail;
 	}
 
 	type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
 	if (HDL_TYPE_DEV != type && HDL_TYPE_SESSION != type) {
-		pr_err("Invalid type\n");
+		CAM_ERR(CAM_CRM, "Invalid type");
 		goto device_priv_fail;
 	}
 
 	if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) {
-		pr_err("Invalid hdl\n");
+		CAM_ERR(CAM_CRM, "Invalid hdl");
 		goto device_priv_fail;
 	}
 
@@ -252,29 +247,29 @@
 
 	spin_lock_bh(&hdl_tbl_lock);
 	if (!hdl_tbl) {
-		pr_err("Hdl tbl is NULL\n");
+		CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
 		goto device_ops_fail;
 	}
 
 	idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl);
 	if (idx >= CAM_REQ_MGR_MAX_HANDLES) {
-		pr_err("Invalid idx\n");
+		CAM_ERR(CAM_CRM, "Invalid idx");
 		goto device_ops_fail;
 	}
 
 	if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
-		pr_err("Invalid state\n");
+		CAM_ERR(CAM_CRM, "Invalid state");
 		goto device_ops_fail;
 	}
 
 	type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
 	if (HDL_TYPE_DEV != type && HDL_TYPE_SESSION != type) {
-		pr_err("Invalid type\n");
+		CAM_ERR(CAM_CRM, "Invalid type");
 		goto device_ops_fail;
 	}
 
 	if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) {
-		pr_err("Invalid hdl\n");
+		CAM_ERR(CAM_CRM, "Invalid hdl");
 		goto device_ops_fail;
 	}
 
@@ -295,29 +290,29 @@
 
 	spin_lock_bh(&hdl_tbl_lock);
 	if (!hdl_tbl) {
-		pr_err("Hdl tbl is NULL\n");
+		CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
 		goto destroy_hdl_fail;
 	}
 
 	idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl);
 	if (idx >= CAM_REQ_MGR_MAX_HANDLES) {
-		pr_err("Invalid idx\n");
+		CAM_ERR(CAM_CRM, "Invalid idx");
 		goto destroy_hdl_fail;
 	}
 
 	if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
-		pr_err("Invalid state\n");
+		CAM_ERR(CAM_CRM, "Invalid state");
 		goto destroy_hdl_fail;
 	}
 
 	type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
 	if (type != dev_hdl_type) {
-		pr_err("Invalid type %d, %d\n", type, dev_hdl_type);
+		CAM_ERR(CAM_CRM, "Invalid type %d, %d", type, dev_hdl_type);
 		goto destroy_hdl_fail;
 	}
 
 	if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) {
-		pr_err("Invalid hdl\n");
+		CAM_ERR(CAM_CRM, "Invalid hdl");
 		goto destroy_hdl_fail;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
index 38dcb42..c48a391 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
@@ -11,6 +11,7 @@
  */
 
 #include "cam_req_mgr_workq.h"
+#include "cam_debug_util.h"
 
 #define WORKQ_ACQUIRE_LOCK(workq, flags) {\
 	if ((workq)->in_irq) \
@@ -84,7 +85,7 @@
 	if (task->process_cb)
 		task->process_cb(task->priv, task->payload);
 	else
-		CRM_WARN("FATAL:no task handler registered for workq");
+		CAM_WARN(CAM_CRM, "FATAL:no task handler registered for workq");
 	cam_req_mgr_workq_put_task(task);
 
 	return 0;
@@ -100,7 +101,7 @@
 	struct crm_workq_task         *task, *task_save;
 	int32_t                        i = CRM_TASK_PRIORITY_0;
 	if (!w) {
-		CRM_ERR("NULL task pointer can not schedule");
+		CAM_ERR(CAM_CRM, "NULL task pointer can not schedule");
 		return;
 	}
 	workq = (struct cam_req_mgr_core_workq *)
@@ -113,7 +114,7 @@
 				atomic_sub(1, &workq->task.pending_cnt);
 				cam_req_mgr_process_task(task);
 			}
-			CRM_DBG("processed task %pK free_cnt %d",
+			CAM_DBG(CAM_CRM, "processed task %pK free_cnt %d",
 				task, atomic_read(&workq->task.free_cnt));
 		}
 		i++;
@@ -125,7 +126,7 @@
 	int32_t                 i = CRM_TASK_PRIORITY_0;
 	struct crm_workq_task  *task, *task_save;
 
-	CRM_DBG("pending_cnt %d",
+	CAM_DBG(CAM_CRM, "pending_cnt %d",
 		atomic_read(&workq->task.pending_cnt));
 
 	while (i < CRM_TASK_PRIORITY_MAX) {
@@ -133,7 +134,7 @@
 			list_for_each_entry_safe(task, task_save,
 				&workq->task.process_head[i], entry) {
 				cam_req_mgr_workq_put_task(task);
-				CRM_WARN("flush task %pK, %d, cnt %d",
+				CAM_WARN(CAM_CRM, "flush task %pK, %d, cnt %d",
 					task, i, atomic_read(
 					&workq->task.free_cnt));
 			}
@@ -150,13 +151,13 @@
 	unsigned long flags = 0;
 
 	if (!task) {
-		CRM_WARN("NULL task pointer can not schedule");
+		CAM_WARN(CAM_CRM, "NULL task pointer can not schedule");
 		rc = -EINVAL;
 		goto end;
 	}
 	workq = (struct cam_req_mgr_core_workq *)task->parent;
 	if (!workq) {
-		CRM_DBG("NULL workq pointer suspect mem corruption");
+		CAM_DBG(CAM_CRM, "NULL workq pointer suspect mem corruption");
 		rc = -EINVAL;
 		goto end;
 	}
@@ -167,7 +168,7 @@
 
 	if (task->cancel == 1) {
 		cam_req_mgr_workq_put_task(task);
-		CRM_WARN("task aborted and queued back to pool");
+		CAM_WARN(CAM_CRM, "task aborted and queued back to pool");
 		rc = 0;
 		goto end;
 	}
@@ -182,7 +183,7 @@
 	WORKQ_RELEASE_LOCK(workq, flags);
 
 	atomic_add(1, &workq->task.pending_cnt);
-	CRM_DBG("enq task %pK pending_cnt %d",
+	CAM_DBG(CAM_CRM, "enq task %pK pending_cnt %d",
 		task, atomic_read(&workq->task.pending_cnt));
 
 	queue_work(workq->job, &workq->work);
@@ -207,7 +208,7 @@
 			return -ENOMEM;
 
 		strlcat(buf, name, sizeof(buf));
-		CRM_DBG("create workque crm_workq-%s", name);
+		CAM_DBG(CAM_CRM, "create workque crm_workq-%s", name);
 		crm_workq->job = alloc_workqueue(buf,
 			WQ_HIGHPRI | WQ_UNBOUND, 0, NULL);
 		if (!crm_workq->job) {
@@ -218,7 +219,7 @@
 		/* Workq attributes initialization */
 		INIT_WORK(&crm_workq->work, cam_req_mgr_process_workq);
 		spin_lock_init(&crm_workq->lock_bh);
-		CRM_DBG("LOCK_DBG workq %s lock %pK",
+		CAM_DBG(CAM_CRM, "LOCK_DBG workq %s lock %pK",
 			name, &crm_workq->lock_bh);
 
 		/* Task attributes initialization */
@@ -234,7 +235,7 @@
 				crm_workq->task.num_task,
 				GFP_KERNEL);
 		if (!crm_workq->task.pool) {
-			CRM_WARN("Insufficient memory %lu",
+			CAM_WARN(CAM_CRM, "Insufficient memory %lu",
 				sizeof(struct crm_workq_task) *
 				crm_workq->task.num_task);
 			kfree(crm_workq);
@@ -250,7 +251,7 @@
 			cam_req_mgr_workq_put_task(task);
 		}
 		*workq = crm_workq;
-		CRM_DBG("free tasks %d",
+		CAM_DBG(CAM_CRM, "free tasks %d",
 			atomic_read(&crm_workq->task.free_cnt));
 	}
 
@@ -259,7 +260,7 @@
 
 void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **crm_workq)
 {
-	CRM_DBG("destroy workque %pK", crm_workq);
+	CAM_DBG(CAM_CRM, "destroy workque %pK", crm_workq);
 	if (*crm_workq) {
 		crm_workq_clear_q(*crm_workq);
 		if ((*crm_workq)->job) {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
index e515a40..b66480c 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
@@ -4,3 +4,5 @@
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_csiphy/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_actuator/
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_flash/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_eeprom/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
index 37dbf9e..18097b0 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -23,7 +23,7 @@
 	struct cam_cmd_i2c_info *i2c_info;
 
 	if (!a_ctrl || !cmd_buf) {
-		pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+		CAM_ERR(CAM_ACTUATOR, "Invalid Args");
 		return -EINVAL;
 	}
 
@@ -32,8 +32,8 @@
 		i2c_info->i2c_freq_mode;
 	a_ctrl->io_master_info.cci_client->sid =
 		i2c_info->slave_addr >> 1;
-	CDBG("%s:%d Slave addr: 0x%x Freq Mode: %d\n", __func__,
-		__LINE__, i2c_info->slave_addr, i2c_info->i2c_freq_mode);
+	CAM_DBG(CAM_ACTUATOR, "Slave addr: 0x%x Freq Mode: %d",
+		i2c_info->slave_addr, i2c_info->i2c_freq_mode);
 
 	return rc;
 }
@@ -46,13 +46,12 @@
 	uint32_t i, size;
 
 	if (a_ctrl == NULL || i2c_set == NULL) {
-		pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+		CAM_ERR(CAM_ACTUATOR, "Invalid Args");
 		return -EINVAL;
 	}
 
 	if (i2c_set->is_settings_valid != 1) {
-		pr_err("%s: %d :Error: Invalid settings\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_ACTUATOR, " Invalid settings");
 		return -EINVAL;
 	}
 
@@ -62,8 +61,8 @@
 			rc = camera_io_dev_write(&(a_ctrl->io_master_info),
 				&(i2c_list->i2c_settings));
 			if (rc < 0) {
-				pr_err("%s: %d :Error: Failed in Applying i2c write settings\n",
-					__func__, __LINE__);
+				CAM_ERR(CAM_ACTUATOR,
+					"Failed in Applying i2c wrt settings");
 				return rc;
 			}
 		} else if (i2c_list->op_code == CAM_SENSOR_I2C_POLL) {
@@ -82,8 +81,8 @@
 					i2c_list->i2c_settings.
 						reg_setting[i].delay);
 				if (rc < 0) {
-					pr_err("%s: %d :Error: Failed in Applying i2c poll settings\n",
-						__func__, __LINE__);
+					CAM_ERR(CAM_ACTUATOR,
+						"i2c poll apply setting Fail");
 					return rc;
 				}
 			}
@@ -99,24 +98,21 @@
 	struct cam_actuator_ctrl_t *a_ctrl = NULL;
 
 	if (!apply) {
-		pr_err("%s:%d :Error: Invalid Input Args\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_ACTUATOR, "Invalid Input Args");
 		return -EINVAL;
 	}
 
 	a_ctrl = (struct cam_actuator_ctrl_t *)
 		cam_get_device_priv(apply->dev_hdl);
 	if (!a_ctrl) {
-		pr_err("%s: %d :Error: Device data is NULL\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_ACTUATOR, "Device data is NULL");
 		return -EINVAL;
 	}
 	request_id = apply->request_id % MAX_PER_FRAME_ARRAY;
 
 	trace_cam_apply_req("Actuator", apply);
 
-	CDBG("%s:%d Request Id: %lld\n",
-		__func__, __LINE__, apply->request_id);
+	CAM_DBG(CAM_ACTUATOR, "Request Id: %lld", apply->request_id);
 
 	if ((apply->request_id ==
 		a_ctrl->i2c_data.per_frame[request_id].request_id) &&
@@ -125,8 +121,9 @@
 		rc = cam_actuator_apply_settings(a_ctrl,
 			&a_ctrl->i2c_data.per_frame[request_id]);
 		if (rc < 0) {
-			pr_err("%s:%d Failed in applying the request: %lld\n",
-				__func__, __LINE__, apply->request_id);
+			CAM_ERR(CAM_ACTUATOR,
+				"Failed in applying the request: %lld\n",
+				apply->request_id);
 			return rc;
 		}
 	}
@@ -139,12 +136,13 @@
 		a_ctrl->i2c_data.per_frame[del_req_id].request_id = 0;
 		rc = delete_request(&a_ctrl->i2c_data.per_frame[del_req_id]);
 		if (rc < 0) {
-			pr_err("%s: %d :Error: Fail deleting the req: %d err: %d\n",
-				__func__, __LINE__, del_req_id, rc);
+			CAM_ERR(CAM_ACTUATOR,
+				"Fail deleting the req: %d err: %d\n",
+				del_req_id, rc);
 			return rc;
 		}
 	} else {
-		CDBG("%s:%d No Valid Req to clean Up\n", __func__, __LINE__);
+		CAM_DBG(CAM_ACTUATOR, "No Valid Req to clean Up");
 	}
 
 	return rc;
@@ -156,15 +154,14 @@
 	struct cam_actuator_ctrl_t *a_ctrl = NULL;
 
 	if (!link) {
-		pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+		CAM_ERR(CAM_ACTUATOR, "Invalid Args");
 		return -EINVAL;
 	}
 
 	a_ctrl = (struct cam_actuator_ctrl_t *)
 		cam_get_device_priv(link->dev_hdl);
 	if (!a_ctrl) {
-		pr_err("%s:%d :Error: Device data is NULL\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_ACTUATOR, "Device data is NULL");
 		return -EINVAL;
 	}
 	if (link->link_enable) {
@@ -181,7 +178,7 @@
 int32_t cam_actuator_publish_dev_info(struct cam_req_mgr_device_info *info)
 {
 	if (!info) {
-		pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+		CAM_ERR(CAM_ACTUATOR, "Invalid Args");
 		return -EINVAL;
 	}
 
@@ -208,8 +205,7 @@
 	struct cam_req_mgr_add_request add_req;
 
 	if (!a_ctrl || !arg) {
-		pr_err("%s:%d :Error: Invalid Args\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_ACTUATOR, "Invalid Args");
 		return -EINVAL;
 	}
 
@@ -220,21 +216,21 @@
 	rc = cam_mem_get_cpu_buf(config.packet_handle,
 		(uint64_t *)&generic_ptr, &len_of_buff);
 	if (rc < 0) {
-		pr_err("%s:%d :Error: error in converting command Handle %d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_ACTUATOR, "Error in converting command Handle %d",
+			rc);
 		return rc;
 	}
 
 	if (config.offset > len_of_buff) {
-		pr_err("%s: %d offset is out of bounds: offset: %lld len: %zu\n",
-			__func__, __LINE__, config.offset, len_of_buff);
+		CAM_ERR(CAM_ACTUATOR,
+			"offset is out of bounds: offset: %lld len: %zu",
+			config.offset, len_of_buff);
 		return -EINVAL;
 	}
 
 	csl_packet = (struct cam_packet *)(generic_ptr +
 		config.offset);
-	CDBG("%s:%d Pkt opcode: %d\n",
-		__func__, __LINE__, csl_packet->header.op_code);
+	CAM_DBG(CAM_ACTUATOR, "Pkt opcode: %d", csl_packet->header.op_code);
 
 	if ((csl_packet->header.op_code & 0xFFFFFF) ==
 			CAM_ACTUATOR_PACKET_OPCODE_INIT) {
@@ -246,24 +242,22 @@
 		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
 
 		if (csl_packet->num_cmd_buf != 2) {
-			pr_err("%s:: %d :Error: cmd Buffers in Init : %d\n",
-				__func__, __LINE__, csl_packet->num_cmd_buf);
+			CAM_ERR(CAM_ACTUATOR, "cmd Buffers in Init : %d",
+				csl_packet->num_cmd_buf);
 			return -EINVAL;
 		}
 
 		rc = cam_mem_get_cpu_buf(cmd_desc[0].mem_handle,
 			(uint64_t *)&generic_ptr, &len_of_buff);
 		if (rc < 0) {
-			pr_err("%s:%d Failed to get cpu buf\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_ACTUATOR, "Failed to get cpu buf");
 			return rc;
 		}
 		cmd_buf = (uint32_t *)generic_ptr;
 		cmd_buf += cmd_desc->offset / sizeof(uint32_t);
 		rc = cam_actuator_slaveInfo_pkt_parser(a_ctrl, cmd_buf);
 		if (rc < 0) {
-			pr_err("%s:%d Failed in parsing the pkt\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_ACTUATOR, "Failed in parsing the pkt");
 			return rc;
 		}
 		cmd_buf += (sizeof(struct cam_cmd_i2c_info)/sizeof(uint32_t));
@@ -272,8 +266,8 @@
 		rc = cam_sensor_i2c_pkt_parser(i2c_reg_settings,
 			&cmd_desc[1], 1);
 		if (rc < 0) {
-			pr_err("%s:%d :Error: actuator pkt parsing failed: %d\n",
-				__func__, __LINE__, rc);
+			CAM_ERR(CAM_ACTUATOR, "Actuator pkt parsing failed: %d",
+				rc);
 			return rc;
 		}
 	} else if ((csl_packet->header.op_code & 0xFFFFFF) ==
@@ -293,8 +287,8 @@
 		rc = cam_sensor_i2c_pkt_parser(i2c_reg_settings,
 			cmd_desc, 1);
 		if (rc < 0) {
-			pr_err("%s:%d :Error: actuator pkt parsing failed: %d\n",
-				__func__, __LINE__, rc);
+			CAM_ERR(CAM_ACTUATOR, "Actuator pkt parsing failed: %d",
+				rc);
 			return rc;
 		}
 	} else if ((csl_packet->header.op_code & 0xFFFFFF) ==
@@ -313,8 +307,8 @@
 		rc = cam_sensor_i2c_pkt_parser(i2c_reg_settings,
 			cmd_desc, 1);
 		if (rc < 0) {
-			pr_err("%s:%d :Error: actuator pkt parsing failed: %d\n",
-				__func__, __LINE__, rc);
+			CAM_ERR(CAM_ACTUATOR, "Actuator pkt parsing failed: %d",
+				rc);
 			return rc;
 		}
 	}
@@ -327,8 +321,8 @@
 		if (a_ctrl->bridge_intf.crm_cb &&
 			a_ctrl->bridge_intf.crm_cb->add_req)
 			a_ctrl->bridge_intf.crm_cb->add_req(&add_req);
-		CDBG("%s: %d Req Id: %lld added to Bridge\n",
-			__func__, __LINE__, add_req.req_id);
+		CAM_DBG(CAM_ACTUATOR, "Req Id: %lld added to Bridge",
+			add_req.req_id);
 	}
 
 	return rc;
@@ -348,8 +342,7 @@
 		return 0;
 
 	if (cnt >= CAM_SOC_MAX_REGULATOR) {
-		pr_err("%s:%d Regulators more than supported %d\n",
-			__func__, __LINE__, cnt);
+		CAM_ERR(CAM_ACTUATOR, "Regulators more than supported %d", cnt);
 		return -EINVAL;
 	}
 
@@ -372,8 +365,7 @@
 
 	rc = cam_actuator_vreg_control(a_ctrl, 1);
 	if (rc < 0) {
-		pr_err("%s:%d Actuator Reg Failed %d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_ACTUATOR, "Actuator Reg Failed %d", rc);
 		return rc;
 	}
 
@@ -387,8 +379,7 @@
 		rc = cam_soc_util_enable_platform_resource(&a_ctrl->soc_info,
 			false, 0, false);
 		if (rc < 0) {
-			pr_err("%s:%d :Error: Failed in req gpio: %d\n",
-				__func__, __LINE__, rc);
+			CAM_ERR(CAM_ACTUATOR, "Failed in req gpio: %d", rc);
 			return rc;
 		}
 
@@ -412,7 +403,7 @@
 
 	rc = cam_actuator_vreg_control(a_ctrl, 0);
 	if (rc < 0) {
-		pr_err("%s failed %d\n", __func__, __LINE__);
+		CAM_ERR(CAM_ACTUATOR, "Failed %d");
 		return rc;
 	}
 
@@ -430,8 +421,8 @@
 		rc |= cam_soc_util_disable_platform_resource(&a_ctrl->soc_info,
 					0, 0);
 		if (rc < 0)
-			pr_err("%s:%d Failed to disable platform resources: %d\n",
-				__func__, __LINE__, rc);
+			CAM_ERR(CAM_ACTUATOR,
+				"Failed to disable platform resources: %d", rc);
 	}
 
 	return rc;
@@ -444,13 +435,11 @@
 	struct cam_control *cmd = (struct cam_control *)arg;
 
 	if (!a_ctrl || !cmd) {
-		pr_err("%s: %d :Error: Invalid Args\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_ACTUATOR, " Invalid Args");
 		return -EINVAL;
 	}
 
-	pr_debug("%s:%d Opcode to Actuator: %d\n",
-		__func__, __LINE__, cmd->op_code);
+	pr_debug("Opcode to Actuator: %d", cmd->op_code);
 
 	mutex_lock(&(a_ctrl->actuator_mutex));
 	switch (cmd->op_code) {
@@ -459,8 +448,7 @@
 		struct cam_create_dev_hdl bridge_params;
 
 		if (a_ctrl->bridge_intf.device_hdl != -1) {
-			pr_err("%s:%d Device is already acquired\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_ACTUATOR, "Device is already acquired");
 			rc = -EINVAL;
 			goto release_mutex;
 		}
@@ -468,8 +456,7 @@
 			(void __user *) cmd->handle,
 			sizeof(actuator_acq_dev));
 		if (rc < 0) {
-			pr_err("%s:%d :Error: Failed Copying from user\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_ACTUATOR, "Failed Copying from user\n");
 			goto release_mutex;
 		}
 
@@ -485,12 +472,11 @@
 		a_ctrl->bridge_intf.session_hdl =
 			actuator_acq_dev.session_handle;
 
-		CDBG("%s:%d Device Handle: %d\n",
-			__func__, __LINE__, actuator_acq_dev.device_handle);
+		CAM_DBG(CAM_ACTUATOR, "Device Handle: %d",
+			actuator_acq_dev.device_handle);
 		if (copy_to_user((void __user *) cmd->handle, &actuator_acq_dev,
 			sizeof(struct cam_sensor_acquire_dev))) {
-			pr_err("%s:%d :Error: Failed Copy to User\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_ACTUATOR, "Failed Copy to User");
 			rc = -EFAULT;
 			goto release_mutex;
 		}
@@ -499,8 +485,7 @@
 		break;
 	case CAM_RELEASE_DEV: {
 		if (a_ctrl->bridge_intf.device_hdl == -1) {
-			pr_err("%s:%d :Error: link hdl: %d device hdl: %d\n",
-				__func__, __LINE__,
+			CAM_ERR(CAM_ACTUATOR, "link hdl: %d device hdl: %d",
 				a_ctrl->bridge_intf.device_hdl,
 				a_ctrl->bridge_intf.link_hdl);
 			rc = -EINVAL;
@@ -508,21 +493,19 @@
 		}
 		rc = cam_destroy_device_hdl(a_ctrl->bridge_intf.device_hdl);
 		if (rc < 0)
-			pr_err("%s:%d :Error: destroying the device hdl\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_ACTUATOR, "destroying the device hdl");
 		a_ctrl->bridge_intf.device_hdl = -1;
 		a_ctrl->bridge_intf.link_hdl = -1;
 		a_ctrl->bridge_intf.session_hdl = -1;
 	}
 		break;
 	case CAM_QUERY_CAP: {
-		struct cam_actuator_query_cap actuator_cap;
+		struct cam_actuator_query_cap actuator_cap = {0};
 
 		actuator_cap.slot_info = a_ctrl->id;
 		if (copy_to_user((void __user *) cmd->handle, &actuator_cap,
 			sizeof(struct cam_actuator_query_cap))) {
-			pr_err("%s:%d :Error: Failed Copy to User\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_ACTUATOR, "Failed Copy to User");
 			rc = -EFAULT;
 			goto release_mutex;
 		}
@@ -531,28 +514,25 @@
 	case CAM_START_DEV: {
 		rc = cam_actuator_power_up(a_ctrl);
 		if (rc < 0) {
-			pr_err("%s: %d :Error: Actuator Power up failed\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_ACTUATOR, " Actuator Power up failed");
 			goto release_mutex;
 		}
 		rc = camera_io_init(&a_ctrl->io_master_info);
 		if (rc < 0) {
-			pr_err("%s:%d :Error: cci_init failed\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_ACTUATOR, "cci_init failed");
 			cam_actuator_power_down(a_ctrl);
 		}
 
 		rc = cam_actuator_apply_settings(a_ctrl,
 			&a_ctrl->i2c_data.init_settings);
 		if (rc < 0)
-			pr_err("%s: %d :Error: Cannot apply Init settings\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_ACTUATOR, "Cannot apply Init settings");
 
 		/* Delete the request even if the apply is failed */
 		rc = delete_request(&a_ctrl->i2c_data.init_settings);
 		if (rc < 0) {
-			pr_err("%s:%d Fail in deleting the Init settings\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_ACTUATOR,
+				"Fail in deleting the Init settings");
 			rc = -EINVAL;
 			goto release_mutex;
 		}
@@ -561,12 +541,10 @@
 	case CAM_STOP_DEV: {
 		rc = camera_io_release(&a_ctrl->io_master_info);
 		if (rc < 0)
-			pr_err("%s:%d :Error: Failed in releasing CCI\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_ACTUATOR, "Failed in releasing CCI");
 		rc = cam_actuator_power_down(a_ctrl);
 		if (rc < 0) {
-			pr_err("%s:%d :Error: Actuator Power down failed\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_ACTUATOR, "Actuator Power down failed");
 			goto release_mutex;
 		}
 	}
@@ -576,8 +554,7 @@
 			ACT_APPLY_SETTINGS_LATER;
 		rc = cam_actuator_i2c_pkt_parse(a_ctrl, arg);
 		if (rc < 0) {
-			pr_err("%s:%d :Error: Failed in actuator Parsing\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_ACTUATOR, "Failed in actuator Parsing");
 		}
 
 		if (a_ctrl->act_apply_state ==
@@ -585,14 +562,15 @@
 			rc = cam_actuator_apply_settings(a_ctrl,
 				&a_ctrl->i2c_data.init_settings);
 			if (rc < 0)
-				pr_err("%s:%d :Error: Cannot apply Update settings\n",
-					__func__, __LINE__);
+				CAM_ERR(CAM_ACTUATOR,
+					"Cannot apply Update settings");
 
 			/* Delete the request even if the apply is failed */
 			rc = delete_request(&a_ctrl->i2c_data.init_settings);
 			if (rc < 0) {
-				pr_err("%s: %d :Error: Failed in Deleting the Init Pkt: %d\n",
-					__func__, __LINE__, rc);
+				CAM_ERR(CAM_ACTUATOR,
+					"Failed in Deleting the Init Pkt: %d",
+					rc);
 				goto release_mutex;
 			}
 		}
@@ -601,8 +579,7 @@
 	case CAM_SD_SHUTDOWN:
 		break;
 	default:
-		pr_err("%s:%d Invalid Opcode %d\n",
-			__func__, __LINE__, cmd->op_code);
+		CAM_ERR(CAM_ACTUATOR, "Invalid Opcode %d", cmd->op_code);
 	}
 
 release_mutex:
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
index f8ea769..7eba9d1 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
@@ -28,8 +28,7 @@
 		rc = cam_actuator_driver_cmd(a_ctrl, arg);
 		break;
 	default:
-		pr_err("%s:%d Invalid ioctl cmd\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_ACTUATOR, "Invalid ioctl cmd");
 		rc = -EINVAL;
 		break;
 	}
@@ -43,14 +42,14 @@
 	struct cam_actuator_ctrl_t *a_ctrl;
 
 	if (client == NULL || id == NULL) {
-		pr_err("%s:%d: :Error: Invalid Args client: %pK id: %pK\n",
-			__func__, __LINE__, client, id);
+		CAM_ERR(CAM_ACTUATOR, "Invalid Args client: %pK id: %pK",
+			client, id);
 		return -EINVAL;
 	}
 
 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
-		pr_err("%s %s :Error: i2c_check_functionality failed\n",
-			__func__, client->name);
+		CAM_ERR(CAM_ACTUATOR, "%s :: i2c_check_functionality failed",
+			 client->name);
 		rc = -EFAULT;
 		return rc;
 	}
@@ -81,7 +80,7 @@
 
 	rc = cam_actuator_parse_dt(a_ctrl, &client->dev);
 	if (rc < 0) {
-		pr_err("failed: cam_sensor_parse_dt rc %d", rc);
+		CAM_ERR(CAM_ACTUATOR, "failed: cam_sensor_parse_dt rc %d", rc);
 		goto free_mem;
 	}
 
@@ -100,7 +99,7 @@
 
 	a_ctrl = platform_get_drvdata(pdev);
 	if (!a_ctrl) {
-		pr_err("%s: Actuator device is NULL\n", __func__);
+		CAM_ERR(CAM_ACTUATOR, "Actuator device is NULL");
 		return 0;
 	}
 
@@ -120,7 +119,7 @@
 
 	/* Handle I2C Devices */
 	if (!a_ctrl) {
-		pr_err("%s: Actuator device is NULL\n", __func__);
+		CAM_ERR(CAM_ACTUATOR, "Actuator device is NULL");
 		return -EINVAL;
 	}
 	/*Free Allocated Mem */
@@ -139,7 +138,8 @@
 
 	if (copy_from_user(&cmd_data, (void __user *)arg,
 		sizeof(cmd_data))) {
-		pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+		CAM_ERR(CAM_ACTUATOR,
+			"Failed to copy from user_ptr=%pK size=%zu\n",
 			(void __user *)arg, sizeof(cmd_data));
 		return -EFAULT;
 	}
@@ -149,21 +149,21 @@
 		cmd = VIDIOC_CAM_CONTROL;
 		rc = cam_actuator_subdev_ioctl(sd, cmd, &cmd_data);
 		if (rc < 0) {
-			pr_err("%s:%d Failed in actuator suddev handling",
-				__func__, __LINE__);
+			CAM_ERR(CAM_ACTUATOR,
+				"Failed in actuator suddev handling");
 			return rc;
 		}
 		break;
 	default:
-		pr_err("%s:%d Invalid compat ioctl: %d\n",
-			__func__, __LINE__, cmd);
+		CAM_ERR(CAM_ACTUATOR, "Invalid compat ioctl: %d", cmd);
 		rc = -EINVAL;
 	}
 
 	if (!rc) {
 		if (copy_to_user((void __user *)arg, &cmd_data,
 			sizeof(cmd_data))) {
-			pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+			CAM_ERR(CAM_ACTUATOR,
+				"Failed to copy to user_ptr=%pK size=%zu\n",
 				(void __user *)arg, sizeof(cmd_data));
 			rc = -EFAULT;
 		}
@@ -228,8 +228,7 @@
 
 	rc = cam_actuator_parse_dt(a_ctrl, &(pdev->dev));
 	if (rc < 0) {
-		pr_err("%s:%d :Error: Paring actuator dt failed rc %d",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_ACTUATOR, "Paring actuator dt failed rc %d", rc);
 		goto free_ctrl;
 	}
 
@@ -252,16 +251,15 @@
 
 	rc = cam_register_subdev(&(a_ctrl->v4l2_dev_str));
 	if (rc < 0) {
-		pr_err("%s:%d :ERROR: Fail with cam_register_subdev\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_ACTUATOR, "Fail with cam_register_subdev");
 		goto free_mem;
 	}
 
 	rc = cam_soc_util_request_platform_resource(&a_ctrl->soc_info,
 			NULL, NULL);
 	if (rc < 0) {
-		pr_err("%s:%d :Error: Requesting Platform Resources failed rc %d",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_ACTUATOR,
+			"Requesting Platform Resources failed rc %d", rc);
 		goto free_ctrl;
 	}
 
@@ -316,14 +314,13 @@
 
 	rc = platform_driver_register(&cam_actuator_platform_driver);
 	if (rc < 0) {
-		pr_err("%s platform_driver_register failed rc = %d",
-			__func__, rc);
+		CAM_ERR(CAM_ACTUATOR,
+			"platform_driver_register failed rc = %d", rc);
 		return rc;
 	}
 	rc = i2c_add_driver(&cam_actuator_driver_i2c);
 	if (rc)
-		pr_err("%s:%d :Error: i2c_add_driver failed rc = %d",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_ACTUATOR, "i2c_add_driver failed rc = %d", rc);
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
index 19fe4af..fdf881f3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
@@ -33,6 +33,7 @@
 #include <cam_subdev.h>
 #include "cam_sensor_util.h"
 #include "cam_soc_util.h"
+#include "cam_debug_util.h"
 
 #define NUM_MASTERS 2
 #define NUM_QUEUES 2
@@ -40,13 +41,6 @@
 #define TRUE  1
 #define FALSE 0
 
-#undef CDBG
-#ifdef CAM_SENSOR_DEBUG
-#define CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
 #define ACTUATOR_DRIVER_I2C "i2c_actuator"
 #define CAMX_ACTUATOR_DEV_NAME "cam-actuator-driver"
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
index 584e4d2..ddc89a8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
@@ -28,8 +28,7 @@
 	struct platform_device *pdev = NULL;
 
 	if (!soc_info->pdev) {
-		pr_err("%s:%d :Error:soc_info is not initialized\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_ACTUATOR, "soc_info is not initialized");
 		return -EINVAL;
 	}
 
@@ -41,27 +40,27 @@
 
 	rc = cam_soc_util_get_dt_properties(soc_info);
 	if (rc < 0) {
-		pr_err("%s:%d :Error: parsing common soc dt(rc %d)\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_ACTUATOR, "parsing common soc dt(rc %d)", rc);
 		return rc;
 	}
 	rc = of_property_read_u32(of_node, "cci-master",
 		&(a_ctrl->cci_i2c_master));
-	CDBG("cci-master %d, rc %d\n", a_ctrl->cci_i2c_master, rc);
+	CAM_DBG(CAM_ACTUATOR, "cci-master %d, rc %d",
+		a_ctrl->cci_i2c_master, rc);
 	if (rc < 0 || a_ctrl->cci_i2c_master >= MASTER_MAX) {
-		pr_err("%s:%d :Error: Wrong info from dt CCI master as : %d\n",
-			__func__, __LINE__, a_ctrl->cci_i2c_master);
+		CAM_ERR(CAM_ACTUATOR, "Wrong info from dt CCI master as : %d",
+			a_ctrl->cci_i2c_master);
 		return rc;
 	}
 
 	if (!soc_info->gpio_data) {
-		pr_info("%s:%d No GPIO found\n", __func__, __LINE__);
+		CAM_INFO(CAM_ACTUATOR, "No GPIO found");
 		rc = 0;
 		return rc;
 	}
 
 	if (!soc_info->gpio_data->cam_gpio_common_tbl_size) {
-		pr_info("%s:%d No GPIO found\n", __func__, __LINE__);
+		CAM_INFO(CAM_ACTUATOR, "No GPIO found");
 		return -EINVAL;
 	}
 
@@ -69,8 +68,7 @@
 		&a_ctrl->gpio_num_info);
 
 	if ((rc < 0) || (!a_ctrl->gpio_num_info)) {
-		pr_err("%s:%d No/Error Actuator GPIOs\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_ACTUATOR, "No/Error Actuator GPIOs");
 		return -EINVAL;
 	}
 	return rc;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
index 83e0c19..c69eeaa 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
@@ -33,7 +33,7 @@
 		num_bytes = 4;
 		break;
 	default:
-		pr_err("%s: %d failed: %d\n", __func__, __LINE__, type);
+		CAM_ERR(CAM_CCI, "failed: %d", type);
 		num_bytes = 0;
 		break;
 	}
@@ -52,9 +52,9 @@
 	rc = wait_for_completion_timeout(
 		&cci_dev->cci_master_info[master].reset_complete, CCI_TIMEOUT);
 	if (rc < 0) {
-		pr_err("%s:%d wait failed\n", __func__, __LINE__);
+		CAM_ERR(CAM_CCI, "wait failed");
 	} else if (rc == 0) {
-		pr_err("%s:%d wait timeout\n", __func__, __LINE__);
+		CAM_ERR(CAM_CCI, "wait timeout");
 
 		/* Set reset pending flag to TRUE */
 		cci_dev->cci_master_info[master].reset_pending = TRUE;
@@ -72,8 +72,7 @@
 			&cci_dev->cci_master_info[master].reset_complete,
 			CCI_TIMEOUT);
 		if (rc <= 0)
-			pr_err("%s:%d wait failed %d\n", __func__, __LINE__,
-				rc);
+			CAM_ERR(CAM_CCI, "wait failed %d", rc);
 	}
 }
 
@@ -91,37 +90,36 @@
 
 	read_val = cam_io_r_mb(base +
 		CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
-	CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d len %d max %d\n",
-		__func__, __LINE__, read_val, len,
+	CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d len %d max %d",
+		read_val, len,
 		cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
 	if ((read_val + len + 1) > cci_dev->
 		cci_i2c_queue_info[master][queue].max_queue_size) {
 		uint32_t reg_val = 0;
 		uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
 
-		CDBG("%s:%d CCI_I2C_REPORT_CMD\n", __func__, __LINE__);
+		CAM_DBG(CAM_CCI, "CCI_I2C_REPORT_CMD");
 		cam_io_w_mb(report_val,
 			base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
 			reg_offset);
 		read_val++;
-		CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d, queue: %d\n",
-			__func__, __LINE__, read_val, queue);
+		CAM_DBG(CAM_CCI,
+			"CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d, queue: %d",
+			read_val, queue);
 		cam_io_w_mb(read_val, base +
 			CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
 		reg_val = 1 << ((master * 2) + queue);
-		CDBG("%s:%d CCI_QUEUE_START_ADDR\n", __func__, __LINE__);
+		CAM_DBG(CAM_CCI, "CCI_QUEUE_START_ADDR");
 		atomic_set(&cci_dev->cci_master_info[master].
 						done_pending[queue], 1);
 		cam_io_w_mb(reg_val, base +
 			CCI_QUEUE_START_ADDR);
-		CDBG("%s line %d wait_for_completion_timeout\n",
-			__func__, __LINE__);
+		CAM_DBG(CAM_CCI, "wait_for_completion_timeout");
 		atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
 		rc = wait_for_completion_timeout(&cci_dev->
 			cci_master_info[master].report_q[queue], CCI_TIMEOUT);
 		if (rc <= 0) {
-			pr_err("%s: wait_for_completion_timeout %d\n",
-				 __func__, __LINE__);
+			CAM_ERR(CAM_CCI, "Wait_for_completion_timeout %d");
 			if (rc == 0)
 				rc = -ETIMEDOUT;
 			cam_cci_flush_queue(cci_dev, master);
@@ -129,7 +127,7 @@
 		}
 		rc = cci_dev->cci_master_info[master].status;
 		if (rc < 0)
-			pr_err("%s failed rc %d\n", __func__, rc);
+			CAM_ERR(CAM_CCI, "Failed rc %d", rc);
 	}
 
 	return rc;
@@ -147,17 +145,17 @@
 	void __iomem *base = soc_info->reg_map[0].mem_base;
 
 	if (!cci_dev) {
-		pr_err("%s: failed %d", __func__, __LINE__);
+		CAM_ERR(CAM_CCI, "%s: failed %d");
 		return -EINVAL;
 	}
 
 	rc = cam_cci_validate_queue(cci_dev, 1, master, queue);
 	if (rc < 0) {
-		pr_err("%s: failed %d", __func__, __LINE__);
+		CAM_ERR(CAM_CCI, "Failed %d");
 		return rc;
 	}
-	CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x\n",
-		__func__, CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+	CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x",
+		CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
 		reg_offset, val);
 	cam_io_w_mb(val, base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
 		reg_offset);
@@ -186,45 +184,43 @@
 	uint32_t reg_offset = 0;
 
 	/* CCI Top Registers */
-	CCI_DBG(" **** %s : %d CCI TOP Registers ****\n", __func__, __LINE__);
+	CCI_DBG(" **** %s : %d CCI TOP Registers ****");
 	for (i = 0; i < DEBUG_TOP_REG_COUNT; i++) {
 		reg_offset = DEBUG_TOP_REG_START + i * 4;
 		read_val = cam_io_r_mb(cci_dev->base + reg_offset);
-		CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
-			__func__, __LINE__, reg_offset, read_val);
+		CCI_DBG("offset = 0x%X value = 0x%X",
+			reg_offset, read_val);
 	}
 
 	/* CCI Master registers */
-	CCI_DBG(" **** %s : %d CCI MASTER%d Registers ****\n",
-		__func__, __LINE__, master);
+	CCI_DBG(" ****CCI MASTER %d Registers ****",
+		master);
 	for (i = 0; i < DEBUG_MASTER_REG_COUNT; i++) {
 		if (i == 6)
 			continue;
 		reg_offset = DEBUG_MASTER_REG_START + master*0x100 + i * 4;
 		read_val = cam_io_r_mb(cci_dev->base + reg_offset);
-		CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
-			__func__, __LINE__, reg_offset, read_val);
+		CCI_DBG("offset = 0x%X value = 0x%X", reg_offset, read_val);
 	}
 
 	/* CCI Master Queue registers */
-	CCI_DBG(" **** %s : %d CCI MASTER%d QUEUE%d Registers ****\n",
-		__func__, __LINE__, master, queue);
+	CCI_DBG(" **** CCI MASTER%d QUEUE%d Registers ****",
+		master, queue);
 	for (i = 0; i < DEBUG_MASTER_QUEUE_REG_COUNT; i++) {
 		reg_offset = DEBUG_MASTER_QUEUE_REG_START +  master*0x200 +
 			queue*0x100 + i * 4;
 		read_val = cam_io_r_mb(cci_dev->base + reg_offset);
-		CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
-			__func__, __LINE__, reg_offset, read_val);
+		CCI_DBG("offset = 0x%X value = 0x%X",
+			reg_offset, read_val);
 	}
 
 	/* CCI Interrupt registers */
-	CCI_DBG(" **** %s : %d CCI Interrupt Registers ****\n",
-		__func__, __LINE__);
+	CCI_DBG(" ****CCI Interrupt Registers ****");
 	for (i = 0; i < DEBUG_INTR_REG_COUNT; i++) {
 		reg_offset = DEBUG_INTR_REG_START + i * 4;
 		read_val = cam_io_r_mb(cci_dev->base + reg_offset);
-		CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
-			__func__, __LINE__, reg_offset, read_val);
+		CCI_DBG("offset = 0x%X value = 0x%X",
+			reg_offset, read_val);
 	}
 }
 #endif
@@ -236,21 +232,19 @@
 	int32_t rc = 0;
 
 	if (!cci_dev) {
-		pr_err("%s: failed %d", __func__, __LINE__);
+		CAM_ERR(CAM_CCI, "failed");
 		return -EINVAL;
 	}
 
 	rc = wait_for_completion_timeout(&cci_dev->
 		cci_master_info[master].report_q[queue], CCI_TIMEOUT);
-	CDBG("%s line %d wait DONE_for_completion_timeout\n",
-		__func__, __LINE__);
+	CAM_DBG(CAM_CCI, "wait DONE_for_completion_timeout");
 
 	if (rc <= 0) {
 #ifdef DUMP_CCI_REGISTERS
 		cam_cci_dump_registers(cci_dev, master, queue);
 #endif
-		pr_err("%s: %d wait for queue: %d\n",
-			 __func__, __LINE__, queue);
+		CAM_ERR(CAM_CCI, "wait for queue: %d", queue);
 		if (rc == 0)
 			rc = -ETIMEDOUT;
 		cam_cci_flush_queue(cci_dev, master);
@@ -258,7 +252,7 @@
 	}
 	rc = cci_dev->cci_master_info[master].status;
 	if (rc < 0) {
-		pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+		CAM_ERR(CAM_CCI, "failed rc %d", rc);
 		return rc;
 	}
 
@@ -278,15 +272,13 @@
 		CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
 	uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
 
-	CDBG("%s:%d CCI_I2C_REPORT_CMD curr_w_cnt: %d\n",
-		__func__, __LINE__, read_val);
+	CAM_DBG(CAM_CCI, "CCI_I2C_REPORT_CMD curr_w_cnt: %d", read_val);
 	cam_io_w_mb(report_val,
 		base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
 		reg_offset);
 	read_val++;
 
-	CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d\n",
-		__func__, __LINE__, read_val);
+	CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d", read_val);
 	cam_io_w_mb(read_val, base +
 		CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
 }
@@ -319,12 +311,12 @@
 	if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 0) {
 		rc = cam_cci_lock_queue(cci_dev, master, queue, 0);
 		if (rc < 0) {
-			pr_err("%s failed line %d\n", __func__, __LINE__);
+			CAM_ERR(CAM_CCI, "failed line %d");
 			return rc;
 		}
 		rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
 		if (rc < 0) {
-			pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+			CAM_ERR(CAM_CCI, "failed rc %d", rc);
 			return rc;
 		}
 	} else {
@@ -332,17 +324,17 @@
 						done_pending[queue], 1);
 		rc = cam_cci_wait(cci_dev, master, queue);
 		if (rc < 0) {
-			pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+			CAM_ERR(CAM_CCI, "failed rc %d", rc);
 			return rc;
 		}
 		rc = cam_cci_lock_queue(cci_dev, master, queue, 0);
 		if (rc < 0) {
-			pr_err("%s failed line %d\n", __func__, __LINE__);
+			CAM_ERR(CAM_CCI, "failed rc %d", rc);
 			return rc;
 		}
 		rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
 		if (rc < 0) {
-			pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+			CAM_ERR(CAM_CCI, "Failed rc %d", rc);
 			return rc;
 		}
 	}
@@ -362,8 +354,7 @@
 
 	read_val = cam_io_r_mb(base +
 		CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
-	CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d max %d\n",
-		__func__, __LINE__, read_val,
+	CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d max %d", read_val,
 		cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
 	return (cci_dev->
 		cci_i2c_queue_info[master][queue].max_queue_size) -
@@ -398,13 +389,13 @@
 						done_pending[queue], 1);
 		rc = cam_cci_wait(cci_dev, master, queue);
 		if (rc < 0) {
-			pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+			CAM_ERR(CAM_CCI, "failed rc %d", rc);
 			return rc;
 		}
 	} else {
 		rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
 		if (rc < 0) {
-			pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+			CAM_ERR(CAM_CCI, "failed rc %d", rc);
 			return rc;
 		}
 	}
@@ -425,7 +416,7 @@
 	uint32_t size = cmd_size;
 
 	if (!cci_dev || !c_ctrl) {
-		pr_err("%s: failed %d", __func__, __LINE__);
+		CAM_ERR(CAM_CCI, "failed");
 		return -EINVAL;
 	}
 
@@ -458,8 +449,8 @@
 	}
 
 	if (len > cci_dev->payload_size) {
-		pr_err("%s: %d Len error: %d",
-			__func__, __LINE__, len);
+		CAM_ERR(CAM_CCI, "%s: %d Len error: %d",
+			len);
 		return -EINVAL;
 	}
 
@@ -476,8 +467,8 @@
 	if (clk) {
 		cycles_per_us = ((clk/1000)*256)/1000;
 	} else {
-		pr_err("%s:%d, failed: Can use default: %d",
-			__func__, __LINE__, CYCLES_PER_MICRO_SEC_DEFAULT);
+		CAM_ERR(CAM_CCI, "failed: Can use default: %d",
+			CYCLES_PER_MICRO_SEC_DEFAULT);
 		cycles_per_us = CYCLES_PER_MICRO_SEC_DEFAULT;
 	}
 
@@ -498,8 +489,8 @@
 
 	if (i2c_freq_mode >= I2C_MAX_MODES ||
 		i2c_freq_mode < I2C_STANDARD_MODE) {
-		pr_err("%s:%d Invalid frequency mode: %d\n",
-			__func__, __LINE__, (int32_t)i2c_freq_mode);
+		CAM_ERR(CAM_CCI, "Invalid frequency mode: %d",
+			(int32_t)i2c_freq_mode);
 		cci_dev->clk_level_index = -1;
 		return;
 	}
@@ -544,8 +535,7 @@
 	void __iomem *base = soc_info->reg_map[0].mem_base;
 
 	if ((i2c_freq_mode >= I2C_MAX_MODES) || (i2c_freq_mode < 0)) {
-		pr_err("%s:%d invalid i2c_freq_mode = %d",
-			__func__, __LINE__, i2c_freq_mode);
+		CAM_ERR(CAM_CCI, "invalid i2c_freq_mode = %d", i2c_freq_mode);
 		return -EINVAL;
 	}
 
@@ -608,28 +598,27 @@
 	void __iomem *base = soc_info->reg_map[0].mem_base;
 
 	if (i2c_cmd == NULL) {
-		pr_err("%s:%d Failed line\n", __func__,
-			__LINE__);
+		CAM_ERR(CAM_CCI, "Failed: i2c cmd is NULL");
 		return -EINVAL;
 	}
 
 	if ((!cmd_size) || (cmd_size > CCI_I2C_MAX_WRITE)) {
-		pr_err("%s:%d failed: invalid cmd_size %d\n",
-			__func__, __LINE__, cmd_size);
+		CAM_ERR(CAM_CCI, "failed: invalid cmd_size %d",
+			cmd_size);
 		return -EINVAL;
 	}
 
-	CDBG("%s addr type %d data type %d cmd_size %d\n", __func__,
+	CAM_DBG(CAM_CCI, "addr type %d data type %d cmd_size %d",
 		i2c_msg->addr_type, i2c_msg->data_type, cmd_size);
 
 	if (i2c_msg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
-		pr_err("%s:%d failed: invalid addr_type 0x%X\n",
-			__func__, __LINE__, i2c_msg->addr_type);
+		CAM_ERR(CAM_CCI, "failed: invalid addr_type 0x%X",
+			i2c_msg->addr_type);
 		return -EINVAL;
 	}
 	if (i2c_msg->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
-		pr_err("%s:%d failed: invalid data_type 0x%X\n",
-			__func__, __LINE__, i2c_msg->data_type);
+		CAM_ERR(CAM_CCI, "failed: invalid data_type 0x%X",
+			i2c_msg->data_type);
 		return -EINVAL;
 	}
 	reg_offset = master * 0x200 + queue * 0x100;
@@ -643,8 +632,8 @@
 		c_ctrl->cci_info->retries << 16 |
 		c_ctrl->cci_info->id_map << 18;
 
-	CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x\n",
-		__func__, CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+	CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x",
+		CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
 		reg_offset, val);
 	cam_io_w_mb(val, base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
 		reg_offset);
@@ -671,7 +660,7 @@
 
 	rc = cam_cci_lock_queue(cci_dev, master, queue, 1);
 	if (rc < 0) {
-		pr_err("%s failed line %d\n", __func__, __LINE__);
+		CAM_ERR(CAM_CCI, "%s failed line %d");
 		return rc;
 	}
 
@@ -681,22 +670,21 @@
 		len = cam_cci_calc_cmd_len(cci_dev, c_ctrl, cmd_size,
 			i2c_cmd, &pack);
 		if (len <= 0) {
-			pr_err("%s failed line %d\n", __func__, __LINE__);
+			CAM_ERR(CAM_CCI, "%s failed line %d");
 			return -EINVAL;
 		}
 
 		read_val = cam_io_r_mb(base +
 			CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
-		CDBG("%s line %d CUR_WORD_CNT_ADDR %d len %d max %d\n",
-			__func__, __LINE__, read_val, len, max_queue_size);
+		CAM_DBG(CAM_CCI, "CUR_WORD_CNT_ADDR %d len %d max %d",
+			read_val, len, max_queue_size);
 		/* + 1 - space alocation for Report CMD */
 		if ((read_val + len + 1) > queue_size) {
 			if ((read_val + len + 1) > max_queue_size) {
 				rc = cam_cci_process_full_q(cci_dev,
 					master, queue);
 				if (rc < 0) {
-					pr_err("%s failed line %d\n",
-						__func__, __LINE__);
+					CAM_ERR(CAM_CCI, "failed rc: %d", rc);
 					return rc;
 				}
 				continue;
@@ -704,7 +692,7 @@
 			cam_cci_process_half_q(cci_dev, master, queue);
 		}
 
-		CDBG("%s cmd_size %d addr 0x%x data 0x%x\n", __func__,
+		CAM_DBG(CAM_CCI, "cmd_size %d addr 0x%x data 0x%x",
 			cmd_size, i2c_cmd->reg_addr, i2c_cmd->reg_data);
 		delay = i2c_cmd->delay;
 		i = 0;
@@ -772,8 +760,9 @@
 			cmd = 0;
 			for (j = 0; (j < 4 && k < i); j++)
 				cmd |= (data[k++] << (j * 8));
-			CDBG("%s LOAD_DATA_ADDR 0x%x, q: %d, len:%d, cnt: %d\n",
-				__func__, cmd, queue, len, read_val);
+			CAM_DBG(CAM_CCI,
+				"LOAD_DATA_ADDR 0x%x, q: %d, len:%d, cnt: %d",
+				cmd, queue, len, read_val);
 			cam_io_w_mb(cmd, base +
 				CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
 				master * 0x200 + queue * 0x100);
@@ -789,8 +778,8 @@
 				0x100);
 			cmd <<= 4;
 			cmd |= CCI_I2C_WAIT_CMD;
-			CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x%x\n",
-				__func__, cmd);
+			CAM_DBG(CAM_CCI,
+				"CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x%x", cmd);
 			cam_io_w_mb(cmd, base +
 				CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
 				master * 0x200 + queue * 0x100);
@@ -802,7 +791,7 @@
 
 	rc = cam_cci_transfer_end(cci_dev, master, queue);
 	if (rc < 0) {
-		pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+		CAM_ERR(CAM_CCI, "failed rc %d", rc);
 		return rc;
 	}
 
@@ -830,7 +819,7 @@
 
 	if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
 		|| c_ctrl->cci_info->cci_i2c_master < 0) {
-		pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
+		CAM_ERR(CAM_CCI, "Invalid I2C master addr");
 		return -EINVAL;
 	}
 
@@ -847,8 +836,7 @@
 	/* Set the I2C Frequency */
 	rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
 	if (rc < 0) {
-		pr_err("%s:%d cam_cci_set_clk_param failed rc = %d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc);
 		goto rel_mutex;
 	}
 
@@ -861,25 +849,22 @@
 		cci_dev->cci_i2c_queue_info[master][queue].max_queue_size - 1,
 		master, queue);
 	if (rc < 0) {
-		pr_err("%s:%d Initial validataion failed rc %d\n", __func__,
-			__LINE__, rc);
+		CAM_ERR(CAM_CCI, "Initial validataion failed rc %d", rc);
 		goto rel_mutex;
 	}
 
 	if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
-		pr_err("%s:%d More than max retries\n", __func__,
-			__LINE__);
+		CAM_ERR(CAM_CCI, "More than max retries");
 		goto rel_mutex;
 	}
 
 	if (read_cfg->data == NULL) {
-		pr_err("%s:%d Data ptr is NULL\n", __func__,
-			__LINE__);
+		CAM_ERR(CAM_CCI, "Data ptr is NULL");
 		goto rel_mutex;
 	}
 
-	CDBG("%s master %d, queue %d\n", __func__, master, queue);
-	CDBG("%s set param sid 0x%x retries %d id_map %d\n", __func__,
+	CAM_DBG(CAM_CCI, "master %d, queue %d", master, queue);
+	CAM_DBG(CAM_CCI, "set param sid 0x%x retries %d id_map %d",
 		c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
 		c_ctrl->cci_info->id_map);
 	val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
@@ -887,19 +872,20 @@
 		c_ctrl->cci_info->id_map << 18;
 	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
 	if (rc < 0) {
-		CDBG("%s failed line %d\n", __func__, __LINE__);
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
 		goto rel_mutex;
 	}
 
 	val = CCI_I2C_LOCK_CMD;
 	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
 	if (rc < 0) {
-		CDBG("%s failed line %d\n", __func__, __LINE__);
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
 		goto rel_mutex;
 	}
 
 	if (read_cfg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
-		pr_err("%s failed line %d\n", __func__, __LINE__);
+		CAM_ERR(CAM_CCI, "failed : Invalid addr type: %u",
+			read_cfg->addr_type);
 		rc = -EINVAL;
 		goto rel_mutex;
 	}
@@ -912,34 +898,33 @@
 
 	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
 	if (rc < 0) {
-		CDBG("%s failed line %d\n", __func__, __LINE__);
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
 		goto rel_mutex;
 	}
 
 	val = CCI_I2C_READ_CMD | (read_cfg->num_byte << 4);
 	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
 	if (rc < 0) {
-		CDBG("%s failed line %d\n", __func__, __LINE__);
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
 		goto rel_mutex;
 	}
 
 	val = CCI_I2C_UNLOCK_CMD;
 	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
 	if (rc < 0) {
-		CDBG("%s failed line %d\n", __func__, __LINE__);
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
 		goto rel_mutex;
 	}
 
 	val = cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR
 			+ master * 0x200 + queue * 0x100);
-	CDBG("%s cur word cnt 0x%x\n", __func__, val);
+	CAM_DBG(CAM_CCI, "%s cur word cnt 0x%x", val);
 	cam_io_w_mb(val, base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR
 			+ master * 0x200 + queue * 0x100);
 
 	val = 1 << ((master * 2) + queue);
 	cam_io_w_mb(val, base + CCI_QUEUE_START_ADDR);
-	CDBG("%s:%d E wait_for_completion_timeout\n", __func__,
-		__LINE__);
+	CAM_DBG(CAM_CCI, "wait_for_completion_timeout");
 
 	rc = wait_for_completion_timeout(&cci_dev->
 		cci_master_info[master].reset_complete, CCI_TIMEOUT);
@@ -949,8 +934,7 @@
 #endif
 		if (rc == 0)
 			rc = -ETIMEDOUT;
-		pr_err("%s: %d wait_for_completion_timeout rc = %d\n",
-			 __func__, __LINE__, rc);
+		CAM_ERR(CAM_CCI, "wait_for_completion_timeout rc = %d", rc);
 		cam_cci_flush_queue(cci_dev, master);
 		goto rel_mutex;
 	} else {
@@ -961,29 +945,28 @@
 		CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
 	exp_words = ((read_cfg->num_byte / 4) + 1);
 	if (read_words != exp_words) {
-		pr_err("%s:%d read_words = %d, exp words = %d\n", __func__,
-			__LINE__, read_words, exp_words);
+		CAM_ERR(CAM_CCI, "read_words = %d, exp words = %d",
+			read_words, exp_words);
 		memset(read_cfg->data, 0, read_cfg->num_byte);
 		rc = -EINVAL;
 		goto rel_mutex;
 	}
 	index = 0;
-	CDBG("%s index %d num_type %d\n", __func__, index,
-		read_cfg->num_byte);
+	CAM_DBG(CAM_CCI, "index %d num_type %d", index, read_cfg->num_byte);
 	first_byte = 0;
 	do {
 		val = cam_io_r_mb(base +
 			CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
-		CDBG("%s read val 0x%x\n", __func__, val);
+		CAM_DBG(CAM_CCI, "read val 0x%x", val);
 		for (i = 0; (i < 4) && (index < read_cfg->num_byte); i++) {
-			CDBG("%s i %d index %d\n", __func__, i, index);
+			CAM_DBG(CAM_CCI, "i:%d index:%d", i, index);
 			if (!first_byte) {
-				CDBG("%s sid 0x%x\n", __func__, val & 0xFF);
+				CAM_DBG(CAM_CCI, "sid 0x%x", val & 0xFF);
 				first_byte++;
 			} else {
 				read_cfg->data[index] =
 					(val  >> (i * 8)) & 0xFF;
-				CDBG("%s data[%d] 0x%x\n", __func__, index,
+				CAM_DBG(CAM_CCI, "data[%d] 0x%x", index,
 					read_cfg->data[index]);
 				index++;
 			}
@@ -1006,20 +989,19 @@
 	cci_dev = v4l2_get_subdevdata(sd);
 
 	if (cci_dev->cci_state != CCI_STATE_ENABLED) {
-		pr_err("%s invalid cci state %d\n",
-			__func__, cci_dev->cci_state);
+		CAM_ERR(CAM_CCI, "%s invalid cci state %d",
+			cci_dev->cci_state);
 		return -EINVAL;
 	}
 	master = c_ctrl->cci_info->cci_i2c_master;
-	CDBG("%s set param sid 0x%x retries %d id_map %d\n", __func__,
+	CAM_DBG(CAM_CCI, "set param sid 0x%x retries %d id_map %d",
 		c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
 		c_ctrl->cci_info->id_map);
 
 	/* Set the I2C Frequency */
 	rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
 	if (rc < 0) {
-		pr_err("%s:%d cam_cci_set_clk_param failed rc = %d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc);
 		return rc;
 	}
 	/*
@@ -1031,18 +1013,17 @@
 		cci_dev->cci_i2c_queue_info[master][queue].max_queue_size-1,
 		master, queue);
 	if (rc < 0) {
-		pr_err("%s:%d Initial validataion failed rc %d\n",
-		__func__, __LINE__, rc);
+		CAM_ERR(CAM_CCI, "%s:%d Initial validataion failed rc %d",
+		rc);
 		return rc;
 	}
 	if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
-		pr_err("%s:%d More than max retries\n", __func__,
-			__LINE__);
+		CAM_ERR(CAM_CCI, "More than max retries");
 		return rc;
 	}
 	rc = cam_cci_data_queue(cci_dev, c_ctrl, queue, sync_en);
 	if (rc < 0) {
-		pr_err("%s failed line %d\n", __func__, __LINE__);
+		CAM_ERR(CAM_CCI, "failed rc: %d", rc);
 		return rc;
 	}
 
@@ -1069,7 +1050,7 @@
 		&write_async->c_ctrl, write_async->queue, write_async->sync_en);
 	mutex_unlock(&cci_master_info->mutex_q[write_async->queue]);
 	if (rc < 0)
-		pr_err("%s: %d failed\n", __func__, __LINE__);
+		CAM_ERR(CAM_CCI, "failed rc: %d", rc);
 
 	kfree(write_async->c_ctrl.cfg.cci_i2c_write_cfg.reg_setting);
 	kfree(write_async);
@@ -1110,7 +1091,7 @@
 		kzalloc(sizeof(struct cam_sensor_i2c_reg_array)*
 		cci_i2c_write_cfg->size, GFP_KERNEL);
 	if (!cci_i2c_write_cfg_w->reg_setting) {
-		pr_err("%s: %d Couldn't allocate memory\n", __func__, __LINE__);
+		CAM_ERR(CAM_CCI, "Couldn't allocate memory");
 		kfree(write_async);
 		return -ENOMEM;
 	}
@@ -1140,35 +1121,33 @@
 	uint16_t read_bytes = 0;
 
 	if (!sd || !c_ctrl) {
-		pr_err("%s:%d sd %pK c_ctrl %pK\n", __func__,
-			__LINE__, sd, c_ctrl);
+		CAM_ERR(CAM_CCI, "sd %pK c_ctrl %pK", sd, c_ctrl);
 		return -EINVAL;
 	}
 	if (!c_ctrl->cci_info) {
-		pr_err("%s:%d cci_info NULL\n", __func__, __LINE__);
+		CAM_ERR(CAM_CCI, "cci_info NULL");
 		return -EINVAL;
 	}
 	cci_dev = v4l2_get_subdevdata(sd);
 	if (!cci_dev) {
-		pr_err("%s:%d cci_dev NULL\n", __func__, __LINE__);
+		CAM_ERR(CAM_CCI, "cci_dev NULL");
 		return -EINVAL;
 	}
 	if (cci_dev->cci_state != CCI_STATE_ENABLED) {
-		pr_err("%s invalid cci state %d\n",
-			__func__, cci_dev->cci_state);
+		CAM_ERR(CAM_CCI, "invalid cci state %d", cci_dev->cci_state);
 		return -EINVAL;
 	}
 
 	if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
 			|| c_ctrl->cci_info->cci_i2c_master < 0) {
-		pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
+		CAM_ERR(CAM_CCI, "Invalid I2C master addr");
 		return -EINVAL;
 	}
 
 	master = c_ctrl->cci_info->cci_i2c_master;
 	read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
 	if ((!read_cfg->num_byte) || (read_cfg->num_byte > CCI_I2C_MAX_READ)) {
-		pr_err("%s:%d read num bytes 0\n", __func__, __LINE__);
+		CAM_ERR(CAM_CCI, "read num bytes 0");
 		rc = -EINVAL;
 		goto ERROR;
 	}
@@ -1181,7 +1160,7 @@
 			read_cfg->num_byte = read_bytes;
 		rc = cam_cci_read(sd, c_ctrl);
 		if (rc < 0) {
-			pr_err("%s:%d failed rc %d\n", __func__, __LINE__, rc);
+			CAM_ERR(CAM_CCI, "failed rc %d", rc);
 			goto ERROR;
 		}
 		if (read_bytes > CCI_READ_MAX) {
@@ -1205,8 +1184,8 @@
 
 	cci_dev = v4l2_get_subdevdata(sd);
 	if (!cci_dev || !c_ctrl) {
-		pr_err("%s:%d failed: invalid params %pK %pK\n", __func__,
-			__LINE__, cci_dev, c_ctrl);
+		CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+			cci_dev, c_ctrl);
 		rc = -EINVAL;
 		return rc;
 	}
@@ -1225,8 +1204,7 @@
 
 	rc = cam_cci_soc_release(cci_dev);
 	if (rc < 0) {
-		pr_err("%s:%d Failed in releasing the cci: %d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_CCI, "Failed in releasing the cci: %d", rc);
 		cam_cpas_stop(cci_dev->cpas_handle);
 		return rc;
 	}
@@ -1246,8 +1224,8 @@
 
 	cci_dev = v4l2_get_subdevdata(sd);
 	if (!cci_dev || !c_ctrl) {
-		pr_err("%s:%d failed: invalid params %pK %pK\n", __func__,
-			__LINE__, cci_dev, c_ctrl);
+		CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+			cci_dev, c_ctrl);
 		rc = -EINVAL;
 		return rc;
 	}
@@ -1256,7 +1234,7 @@
 
 	if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
 		|| c_ctrl->cci_info->cci_i2c_master < 0) {
-		pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
+		CAM_ERR(CAM_CCI, "Invalid I2C master addr");
 		return -EINVAL;
 	}
 
@@ -1304,8 +1282,7 @@
 {
 	int32_t rc = 0;
 
-	CDBG("%s line %d cmd %d\n", __func__, __LINE__,
-		cci_ctrl->cmd);
+	CAM_DBG(CAM_CCI, "cmd %d", cci_ctrl->cmd);
 	switch (cci_ctrl->cmd) {
 	case MSM_CCI_INIT:
 		rc = cam_cci_init(sd, cci_ctrl);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
index 63655a4..dad02bf 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
@@ -37,8 +37,7 @@
 	case VIDIOC_CAM_CONTROL:
 		break;
 	default:
-		pr_err("%s:%d Invalid ioctl cmd: %d\n",
-			__func__, __LINE__, cmd);
+		CAM_ERR(CAM_CCI, "Invalid ioctl cmd: %d", cmd);
 		rc = -ENOIOCTLCMD;
 	}
 
@@ -134,13 +133,13 @@
 			base + CCI_RESET_CMD_ADDR);
 	}
 	if (irq & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) {
-		pr_err("%s:%d MASTER_0 error 0x%x\n", __func__, __LINE__, irq);
+		CAM_ERR(CAM_CCI, "MASTER_0 error 0x%x", irq);
 		cci_dev->cci_master_info[MASTER_0].status = -EINVAL;
 		cam_io_w_mb(CCI_M0_HALT_REQ_RMSK,
 			base + CCI_HALT_REQ_ADDR);
 	}
 	if (irq & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) {
-		pr_err("%s:%d MASTER_1 error 0x%x\n", __func__, __LINE__, irq);
+		CAM_ERR(CAM_CCI, "MASTER_1 error 0x%x", irq);
 		cci_dev->cci_master_info[MASTER_1].status = -EINVAL;
 		cam_io_w_mb(CCI_M1_HALT_REQ_RMSK,
 			base + CCI_HALT_REQ_ADDR);
@@ -192,8 +191,7 @@
 
 	rc = cam_cci_parse_dt_info(pdev, new_cci_dev);
 	if (rc < 0) {
-		pr_err("%s: %d Resource get Failed: %d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_CCI, "Resource get Failed: %d", rc);
 		goto cci_no_resource;
 	}
 
@@ -214,8 +212,7 @@
 
 	rc = cam_register_subdev(&(new_cci_dev->v4l2_dev_str));
 	if (rc < 0) {
-		pr_err("%s:%d :Error: Fail with cam_register_subdev\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_CCI, "Fail with cam_register_subdev");
 		goto cci_no_resource;
 	}
 
@@ -230,10 +227,10 @@
 	strlcpy(cpas_parms.identifier, "cci", CAM_HW_IDENTIFIER_LENGTH);
 	rc = cam_cpas_register_client(&cpas_parms);
 	if (rc) {
-		pr_err("%s:%d CPAS registration failed\n", __func__, __LINE__);
+		CAM_ERR(CAM_CCI, "CPAS registration failed");
 		goto cci_no_resource;
 	}
-	CDBG("CPAS registration successful handle=%d\n",
+	CAM_DBG(CAM_CCI, "CPAS registration successful handle=%d",
 		cpas_parms.client_handle);
 	new_cci_dev->cpas_handle = cpas_parms.client_handle;
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
index 6268a1b..cb01c6c 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
@@ -37,6 +37,7 @@
 #include <cam_cpas_api.h>
 #include "cam_cci_hwreg.h"
 #include "cam_soc_util.h"
+#include "cam_debug_util.h"
 
 #define V4L2_IDENT_CCI 50005
 #define CCI_I2C_QUEUE_0_SIZE 128
@@ -80,16 +81,6 @@
 #define PRIORITY_QUEUE (QUEUE_0)
 #define SYNC_QUEUE (QUEUE_1)
 
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-
-#undef CCI_DBG
-#ifdef MSM_CCI_DEBUG
-#define CCI_DBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CCI_DBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
 enum cci_i2c_sync {
 	MSM_SYNC_DISABLE,
 	MSM_SYNC_ENABLE,
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
index d976788..83cb49e3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
@@ -27,8 +27,8 @@
 
 	cci_dev = v4l2_get_subdevdata(sd);
 	if (!cci_dev || !c_ctrl) {
-		pr_err("%s:%d failed: invalid params %pK %pK\n", __func__,
-			__LINE__, cci_dev, c_ctrl);
+		CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+			cci_dev, c_ctrl);
 		rc = -EINVAL;
 		return rc;
 	}
@@ -37,19 +37,18 @@
 	base = soc_info->reg_map[0].mem_base;
 
 	if (!soc_info || !base) {
-		pr_err("%s:%d failed: invalid params %pK %pK\n", __func__,
-			__LINE__, soc_info, base);
+		CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+			soc_info, base);
 		rc = -EINVAL;
 		return rc;
 	}
 
-	CDBG("%s:%d Base address %pK\n", __func__, __LINE__, base);
+	CAM_DBG(CAM_CCI, "Base address %pK", base);
 
 	if (cci_dev->ref_count++) {
-		CDBG("%s:%d ref_count %d\n", __func__, __LINE__,
-			cci_dev->ref_count);
+		CAM_DBG(CAM_CCI, "ref_count %d", cci_dev->ref_count);
 		master = c_ctrl->cci_info->cci_i2c_master;
-		CDBG("%s:%d master %d\n", __func__, __LINE__, master);
+		CAM_DBG(CAM_CCI, "master %d", master);
 		if (master < MASTER_MAX && master >= 0) {
 			mutex_lock(&cci_dev->cci_master_info[master].mutex);
 			flush_workqueue(cci_dev->write_wq[master]);
@@ -74,8 +73,7 @@
 				reset_complete,
 				CCI_TIMEOUT);
 			if (rc <= 0)
-				pr_err("%s:%d wait failed %d\n", __func__,
-					__LINE__, rc);
+				CAM_ERR(CAM_CCI, "wait failed %d", rc);
 			mutex_unlock(&cci_dev->cci_master_info[master].mutex);
 		}
 		return 0;
@@ -89,8 +87,7 @@
 	rc = cam_cpas_start(cci_dev->cpas_handle,
 		&ahb_vote, &axi_vote);
 	if (rc != 0) {
-		pr_err("%s:%d CPAS start failed\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_CCI, "CPAS start failed");
 	}
 	cam_cci_get_clk_rates(cci_dev, c_ctrl);
 
@@ -104,15 +101,13 @@
 	rc = cam_soc_util_enable_platform_resource(soc_info, true,
 		CAM_TURBO_VOTE, true);
 	if (rc < 0) {
-		CDBG("%s:%d request platform resources failed\n", __func__,
-			__LINE__);
+		CAM_DBG(CAM_CCI, "request platform resources failed");
 		goto platform_enable_failed;
 	}
 
 	cci_dev->hw_version = cam_io_r_mb(base +
 		CCI_HW_VERSION_ADDR);
-	CDBG("%s:%d: hw_version = 0x%x\n", __func__, __LINE__,
-		cci_dev->hw_version);
+	CAM_DBG(CAM_CCI, "hw_version = 0x%x", cci_dev->hw_version);
 
 	cci_dev->payload_size =
 		MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11;
@@ -129,12 +124,11 @@
 					max_queue_size =
 						CCI_I2C_QUEUE_1_SIZE;
 
-			CDBG("%s:%d : CCI Master[%d] :: Q0 : %d Q1 : %d\n",
-				__func__, __LINE__, i,
+			CAM_DBG(CAM_CCI, "CCI Master[%d] :: Q0 : %d Q1 : %d", i
+				, cci_dev->cci_i2c_queue_info[i][j].
+					max_queue_size,
 				cci_dev->cci_i2c_queue_info[i][j].
-				max_queue_size,
-				cci_dev->cci_i2c_queue_info[i][j].
-				max_queue_size);
+					max_queue_size);
 		}
 	}
 
@@ -146,8 +140,7 @@
 		&cci_dev->cci_master_info[MASTER_0].reset_complete,
 		CCI_TIMEOUT);
 	if (rc <= 0) {
-		pr_err("%s:%d wait_for_completion_timeout\n",
-			 __func__, __LINE__);
+		CAM_ERR(CAM_CCI, "wait_for_completion_timeout");
 		if (rc == 0)
 			rc = -ETIMEDOUT;
 		goto reset_complete_failed;
@@ -162,8 +155,7 @@
 
 	for (i = 0; i < MASTER_MAX; i++) {
 		if (!cci_dev->write_wq[i]) {
-			pr_err("%s:%d Failed to flush write wq\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_CCI, "Failed to flush write wq");
 			rc = -ENOMEM;
 			goto reset_complete_failed;
 		} else {
@@ -251,77 +243,75 @@
 				"qcom,i2c_custom_mode");
 
 		rc = of_property_read_u32(src_node, "hw-thigh", &val);
-		CDBG("%s:%d hw-thigh %d, rc %d\n", __func__, __LINE__, val, rc);
+		CAM_DBG(CAM_CCI, "hw-thigh %d, rc %d", val, rc);
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_thigh = val;
 			rc = of_property_read_u32(src_node, "hw-tlow",
 				&val);
-			CDBG("%s:%d hw-tlow %d, rc %d\n", __func__, __LINE__,
+			CAM_DBG(CAM_CCI, "hw-tlow %d, rc %d",
 				val, rc);
 		}
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_tlow = val;
 			rc = of_property_read_u32(src_node, "hw-tsu-sto",
 				&val);
-			CDBG("%s:%d hw-tsu-sto %d, rc %d\n",
-				__func__, __LINE__, val, rc);
+			CAM_DBG(CAM_CCI, "hw-tsu-sto %d, rc %d",
+				val, rc);
 		}
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_tsu_sto = val;
 			rc = of_property_read_u32(src_node, "hw-tsu-sta",
 				&val);
-			CDBG("%s:%d hw-tsu-sta %d, rc %d\n",
-				__func__, __LINE__, val, rc);
+			CAM_DBG(CAM_CCI, "hw-tsu-sta %d, rc %d",
+				val, rc);
 		}
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_tsu_sta = val;
 			rc = of_property_read_u32(src_node, "hw-thd-dat",
 				&val);
-			CDBG("%s:%d hw-thd-dat %d, rc %d\n",
-				__func__, __LINE__, val, rc);
+			CAM_DBG(CAM_CCI, "hw-thd-dat %d, rc %d",
+				val, rc);
 		}
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_thd_dat = val;
 			rc = of_property_read_u32(src_node, "hw-thd-sta",
 				&val);
-			CDBG("%s:%d hw-thd-sta %d, rc %d\n", __func__, __LINE__,
+			CAM_DBG(CAM_CCI, "hw-thd-sta %d, rc %d",
 				val, rc);
 		}
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_thd_sta = val;
 			rc = of_property_read_u32(src_node, "hw-tbuf",
 				&val);
-			CDBG("%s:%d hw-tbuf %d, rc %d\n", __func__, __LINE__,
+			CAM_DBG(CAM_CCI, "hw-tbuf %d, rc %d",
 				val, rc);
 		}
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_tbuf = val;
 			rc = of_property_read_u32(src_node,
 				"hw-scl-stretch-en", &val);
-			CDBG("%s:%d hw-scl-stretch-en %d, rc %d\n",
-				__func__, __LINE__, val, rc);
+			CAM_DBG(CAM_CCI, "hw-scl-stretch-en %d, rc %d",
+				val, rc);
 		}
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_scl_stretch_en = val;
 			rc = of_property_read_u32(src_node, "hw-trdhld",
 				&val);
-			CDBG("%s:%d hw-trdhld %d, rc %d\n",
-				__func__, __LINE__, val, rc);
+			CAM_DBG(CAM_CCI, "hw-trdhld %d, rc %d",
+				val, rc);
 		}
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_trdhld = val;
 			rc = of_property_read_u32(src_node, "hw-tsp",
 				&val);
-			CDBG("%s:%d hw-tsp %d, rc %d\n", __func__, __LINE__,
-				val, rc);
+			CAM_DBG(CAM_CCI, "hw-tsp %d, rc %d", val, rc);
 		}
 		if (!rc) {
 			cci_dev->cci_clk_params[count].hw_tsp = val;
 			val = 0;
 			rc = of_property_read_u32(src_node, "cci-clk-src",
 				&val);
-			CDBG("%s:%d cci-clk-src %d, rc %d\n",
-				__func__, __LINE__, val, rc);
+			CAM_DBG(CAM_CCI, "cci-clk-src %d, rc %d", val, rc);
 			cci_dev->cci_clk_params[count].cci_clk_src = val;
 		} else
 			cam_cci_init_default_clk_params(cci_dev, count);
@@ -339,8 +329,7 @@
 
 	rc = cam_soc_util_get_dt_properties(soc_info);
 	if (rc < 0) {
-		pr_err("%s:%d :Error: Parsing DT data failed:%d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_CCI, "Parsing DT data failed:%d", rc);
 		return -EINVAL;
 	}
 
@@ -349,8 +338,7 @@
 	rc = cam_soc_util_request_platform_resource(soc_info,
 		cam_cci_irq, new_cci_dev);
 	if (rc < 0) {
-		pr_err("%s:%d :Error: requesting platform resources failed:%d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_CCI, "requesting platform resources failed:%d", rc);
 		return -EINVAL;
 	}
 	new_cci_dev->v4l2_dev_str.pdev = pdev;
@@ -359,17 +347,15 @@
 
 	rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
 	if (rc)
-		pr_err("%s:%d failed to add child nodes, rc=%d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_CCI, "failed to add child nodes, rc=%d", rc);
 
 	for (i = 0; i < MASTER_MAX; i++) {
 		new_cci_dev->write_wq[i] = create_singlethread_workqueue(
 			"cam_cci_wq");
 		if (!new_cci_dev->write_wq[i])
-			pr_err("%s:%d Failed to create write wq\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_CCI, "Failed to create write wq");
 	}
-	CDBG("%s line %d\n", __func__, __LINE__);
+	CAM_DBG(CAM_CCI, "Exit");
 	return 0;
 }
 
@@ -380,13 +366,12 @@
 		&cci_dev->soc_info;
 
 	if (!cci_dev->ref_count || cci_dev->cci_state != CCI_STATE_ENABLED) {
-		pr_err("%s:%d invalid ref count %d / cci state %d\n", __func__,
-			__LINE__, cci_dev->ref_count, cci_dev->cci_state);
+		CAM_ERR(CAM_CCI, "invalid ref count %d / cci state %d",
+			cci_dev->ref_count, cci_dev->cci_state);
 		return -EINVAL;
 	}
 	if (--cci_dev->ref_count) {
-		CDBG("%s:%d ref_count Exit %d\n", __func__, __LINE__,
-			cci_dev->ref_count);
+		CAM_DBG(CAM_CCI, "ref_count Exit %d", cci_dev->ref_count);
 		return 0;
 	}
 	for (i = 0; i < MASTER_MAX; i++)
@@ -398,8 +383,8 @@
 
 	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
 	if (rc) {
-		pr_err("%s:%d: platform resources disable failed, rc=%d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_CCI, "platform resources disable failed, rc=%d",
+			rc);
 		return rc;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index 6b20bcc..fcf76c8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -63,7 +63,7 @@
 	size_t                  len;
 
 	if (!cfg_dev || !csiphy_dev) {
-		pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+		CAM_ERR(CAM_CSIPHY, "Invalid Args");
 		return -EINVAL;
 	}
 
@@ -75,16 +75,16 @@
 	rc = cam_mem_get_cpu_buf((int32_t) cfg_dev->packet_handle,
 		(uint64_t *)&generic_ptr, &len);
 	if (rc < 0) {
-		pr_err("%s:%d :ERROR: Failed to get packet Mem address: %d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_CSIPHY, "Failed to get packet Mem address: %d", rc);
 		kfree(csiphy_dev->csiphy_info);
 		csiphy_dev->csiphy_info = NULL;
 		return rc;
 	}
 
 	if (cfg_dev->offset > len) {
-		pr_err("%s: %d offset is out of bounds: offset: %lld len: %zu\n",
-			__func__, __LINE__, cfg_dev->offset, len);
+		CAM_ERR(CAM_CSIPHY,
+			"offset is out of bounds: offset: %lld len: %zu",
+			cfg_dev->offset, len);
 		kfree(csiphy_dev->csiphy_info);
 		csiphy_dev->csiphy_info = NULL;
 		return -EINVAL;
@@ -99,8 +99,8 @@
 	rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
 		(uint64_t *)&generic_ptr, &len);
 	if (rc < 0) {
-		pr_err("%s:%d :ERROR: Failed to get cmd buf Mem address : %d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_CSIPHY,
+			"Failed to get cmd buf Mem address : %d", rc);
 		kfree(csiphy_dev->csiphy_info);
 		csiphy_dev->csiphy_info = NULL;
 		return rc;
@@ -158,8 +158,7 @@
 	void __iomem *base = NULL;
 
 	if (!csiphy_dev) {
-		pr_err("%s:%d Invalid Args\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_CSIPHY, "Invalid Args");
 		return -EINVAL;
 	}
 
@@ -175,9 +174,9 @@
 			base +
 			csiphy_dev->ctrl_reg->csiphy_reg.
 			mipi_csiphy_interrupt_clear0_addr + 0x4*i);
-		pr_err_ratelimited(
-			"%s CSIPHY%d_IRQ_STATUS_ADDR%d = 0x%x\n",
-			__func__, soc_info->index, i, irq);
+		CAM_ERR_RATE_LIMIT(CAM_CSIPHY,
+			"CSIPHY%d_IRQ_STATUS_ADDR%d = 0x%x",
+			soc_info->index, i, irq);
 		cam_io_w_mb(0x0,
 			base +
 			csiphy_dev->ctrl_reg->csiphy_reg.
@@ -204,8 +203,7 @@
 	struct csiphy_reg_t (*reg_array)[MAX_SETTINGS_PER_LANE];
 
 	if (csiphy_dev->csiphy_info == NULL) {
-		pr_err("%s:%d csiphy_info is NULL, No/Fail CONFIG_DEV ?\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_CSIPHY, "csiphy_info is NULL, No/Fail CONFIG_DEV?");
 		return -EINVAL;
 	}
 
@@ -215,7 +213,7 @@
 	csiphybase = csiphy_dev->soc_info.reg_map[0].mem_base;
 
 	if (!csiphybase) {
-		pr_err("%s: csiphybase NULL\n", __func__);
+		CAM_ERR(CAM_CSIPHY, "csiphybase NULL");
 		return -EINVAL;
 	}
 
@@ -305,7 +303,7 @@
 					reg_array[lane_pos][i].reg_addr);
 			break;
 			default:
-				CDBG("%s: %d Do Nothing\n", __func__, __LINE__);
+				CAM_DBG(CAM_CSIPHY, "Do Nothing");
 			break;
 			}
 			usleep_range(reg_array[lane_pos][i].delay*1000,
@@ -329,13 +327,11 @@
 	int32_t              rc = 0;
 
 	if (!csiphy_dev || !cmd) {
-		pr_err("%s:%d Invalid input args\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_CSIPHY, "Invalid input args");
 		return -EINVAL;
 	}
 
-	pr_debug("%s:%d Opcode received: %d\n", __func__, __LINE__,
-		cmd->op_code);
+	CAM_DBG(CAM_CSIPHY, "Opcode received: %d", cmd->op_code);
 	mutex_lock(&csiphy_dev->mutex);
 	switch (cmd->op_code) {
 	case CAM_ACQUIRE_DEV: {
@@ -348,16 +344,15 @@
 			(void __user *)cmd->handle,
 			sizeof(csiphy_acq_dev));
 		if (rc < 0) {
-			pr_err("%s:%d :ERROR: Failed copying from User\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_CSIPHY, "Failed copying from User");
 			goto release_mutex;
 		}
 
 		csiphy_acq_params.combo_mode = 0;
 
 		if (csiphy_dev->acquire_count == 2) {
-			pr_err("%s:%d CSIPHY device do not allow more than 2 acquires\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_CSIPHY,
+					"CSIPHY device do not allow more than 2 acquires");
 			rc = -EINVAL;
 			goto release_mutex;
 		}
@@ -380,8 +375,7 @@
 		if (copy_to_user((void __user *)cmd->handle,
 				&csiphy_acq_dev,
 				sizeof(struct cam_sensor_acquire_dev))) {
-			pr_err("%s:%d :ERROR: Failed copying from User\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_CSIPHY, "Failed copying from User");
 			rc = -EINVAL;
 			goto release_mutex;
 		}
@@ -391,13 +385,12 @@
 	}
 		break;
 	case CAM_QUERY_CAP: {
-		struct cam_csiphy_query_cap csiphy_cap;
+		struct cam_csiphy_query_cap csiphy_cap = {0};
 
 		cam_csiphy_query_cap(csiphy_dev, &csiphy_cap);
 		if (copy_to_user((void __user *)cmd->handle,
 			&csiphy_cap, sizeof(struct cam_csiphy_query_cap))) {
-			pr_err("%s:%d :ERROR: Failed copying from User\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_CSIPHY, "Failed copying from User");
 			rc = -EINVAL;
 			goto release_mutex;
 		}
@@ -406,15 +399,13 @@
 	case CAM_STOP_DEV: {
 		rc = cam_csiphy_disable_hw(csiphy_dev);
 		if (rc < 0) {
-			pr_err("%s:%d Failed in csiphy release\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_CSIPHY, "Failed in csiphy release");
 			cam_cpas_stop(csiphy_dev->cpas_handle);
 			goto release_mutex;
 		}
 		rc = cam_cpas_stop(csiphy_dev->cpas_handle);
 		if (rc < 0) {
-			pr_err("%s:%d :Error: de-voting CPAS: %d\n",
-				__func__, __LINE__, rc);
+			CAM_ERR(CAM_CSIPHY, "de-voting CPAS: %d", rc);
 			goto release_mutex;
 		}
 	}
@@ -423,8 +414,7 @@
 		struct cam_release_dev_cmd release;
 
 		if (!csiphy_dev->acquire_count) {
-			pr_err("%s:%d No valid devices to release\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_CSIPHY, "No valid devices to release");
 			rc = -EINVAL;
 			goto release_mutex;
 		}
@@ -437,8 +427,7 @@
 
 		rc = cam_destroy_device_hdl(release.dev_handle);
 		if (rc < 0)
-			pr_err("%s:%d :ERROR: destroying the device hdl\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_CSIPHY, "destroying the device hdl");
 		if (release.dev_handle ==
 			csiphy_dev->bridge_intf.device_hdl[0]) {
 			csiphy_dev->bridge_intf.device_hdl[0] = -1;
@@ -462,8 +451,7 @@
 		} else {
 			rc = cam_cmd_buf_parser(csiphy_dev, &config);
 			if (rc < 0) {
-				pr_err("%s:%d Fail in cmd buf parser\n",
-					__func__, __LINE__);
+				CAM_ERR(CAM_CSIPHY, "Fail in cmd buf parser");
 				goto release_mutex;
 			}
 		}
@@ -481,22 +469,19 @@
 		rc = cam_cpas_start(csiphy_dev->cpas_handle,
 			&ahb_vote, &axi_vote);
 		if (rc < 0) {
-			pr_err("%s:%d :Error: voting CPAS: %d\n",
-				__func__, __LINE__, rc);
+			CAM_ERR(CAM_CSIPHY, "voting CPAS: %d", rc);
 			goto release_mutex;
 		}
 
 		rc = cam_csiphy_enable_hw(csiphy_dev);
 		if (rc != 0) {
-			pr_err("%s: %d cam_csiphy_enable_hw failed\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_CSIPHY, "cam_csiphy_enable_hw failed");
 			cam_cpas_stop(csiphy_dev->cpas_handle);
 			goto release_mutex;
 		}
 		rc = cam_csiphy_config_dev(csiphy_dev);
 		if (rc < 0) {
-			pr_err("%s: %d cam_csiphy_config_dev failed\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_CSIPHY, "cam_csiphy_config_dev failed");
 			cam_cpas_stop(csiphy_dev->cpas_handle);
 			goto release_mutex;
 		}
@@ -505,8 +490,7 @@
 	case CAM_SD_SHUTDOWN:
 		break;
 	default:
-		pr_err("%s:%d :Error: Invalid Opcode: %d\n",
-			__func__, __LINE__, cmd->op_code);
+		CAM_ERR(CAM_CSIPHY, "Invalid Opcode: %d", cmd->op_code);
 		rc = -EINVAL;
 		goto release_mutex;
 	}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
index 7783b2e..1c93a1a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
@@ -16,13 +16,6 @@
 #include "cam_csiphy_core.h"
 #include <media/cam_sensor.h>
 
-#undef CDBG
-#ifdef CAM_CSIPHY_DEV_DEBUG
-#define CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
 static long cam_csiphy_subdev_ioctl(struct v4l2_subdev *sd,
 	unsigned int cmd, void *arg)
 {
@@ -33,13 +26,12 @@
 	case VIDIOC_CAM_CONTROL:
 		rc = cam_csiphy_core_cfg(csiphy_dev, arg);
 		if (rc != 0) {
-			pr_err("%s: %d :ERROR: in configuring the device\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_CSIPHY, "in configuring the device");
 			return rc;
 		}
 		break;
 	default:
-		pr_err("%s:%d :ERROR: Wrong ioctl\n", __func__, __LINE__);
+		CAM_ERR(CAM_CSIPHY, "Wrong ioctl : %d", cmd);
 		break;
 	}
 
@@ -55,7 +47,7 @@
 
 	if (copy_from_user(&cmd_data, (void __user *)arg,
 		sizeof(cmd_data))) {
-		pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+		CAM_ERR(CAM_CSIPHY, "Failed to copy from user_ptr=%pK size=%zu",
 			(void __user *)arg, sizeof(cmd_data));
 		return -EFAULT;
 	}
@@ -68,15 +60,15 @@
 		rc = cam_csiphy_subdev_ioctl(sd, cmd, &cmd_data);
 		break;
 	default:
-		pr_err("%s:%d Invalid compat ioctl cmd: %d\n",
-			__func__, __LINE__, cmd);
+		CAM_ERR(CAM_CSIPHY, "Invalid compat ioctl cmd: %d", cmd);
 		rc = -EINVAL;
 	}
 
 	if (!rc) {
 		if (copy_to_user((void __user *)arg, &cmd_data,
 			sizeof(cmd_data))) {
-			pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+			CAM_ERR(CAM_CSIPHY,
+				"Failed to copy to user_ptr=%pK size=%zu",
 				(void __user *)arg, sizeof(cmd_data));
 			rc = -EFAULT;
 		}
@@ -126,8 +118,7 @@
 
 	rc = cam_csiphy_parse_dt_info(pdev, new_csiphy_dev);
 	if (rc < 0) {
-		pr_err("%s:%d :ERROR: dt parsing failed: %d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_CSIPHY, "DT parsing failed: %d", rc);
 		goto csiphy_no_resource;
 	}
 
@@ -148,8 +139,7 @@
 
 	rc = cam_register_subdev(&(new_csiphy_dev->v4l2_dev_str));
 	if (rc < 0) {
-		pr_err("%s:%d :ERROR: In cam_register_subdev\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_CSIPHY, "cam_register_subdev Failed rc: %d", rc);
 		goto csiphy_no_resource;
 	}
 
@@ -176,11 +166,10 @@
 	strlcpy(cpas_parms.identifier, "csiphy", CAM_HW_IDENTIFIER_LENGTH);
 	rc = cam_cpas_register_client(&cpas_parms);
 	if (rc) {
-		pr_err("%s:%d CPAS registration failed\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_CSIPHY, "CPAS registration failed rc: %d", rc);
 		goto csiphy_no_resource;
 	}
-	CDBG("CPAS registration successful handle=%d\n",
+	CAM_DBG(CAM_CSIPHY, "CPAS registration successful handle=%d",
 		cpas_parms.client_handle);
 	new_csiphy_dev->cpas_handle = cpas_parms.client_handle;
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
index c4258bd..8ed5ba4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
@@ -34,6 +34,7 @@
 #include <cam_io_util.h>
 #include <cam_cpas_api.h>
 #include "cam_soc_util.h"
+#include "cam_debug_util.h"
 
 #define MAX_CSIPHY                  3
 #define MAX_DPHY_DATA_LN            4
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
index 6b5aba9..ea6b7c8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
@@ -22,16 +22,16 @@
 	soc_info = &csiphy_dev->soc_info;
 
 	if (csiphy_dev->ref_count++) {
-		pr_err("%s:%d csiphy refcount = %d\n", __func__,
-			__LINE__, csiphy_dev->ref_count);
+		CAM_ERR(CAM_CSIPHY, "csiphy refcount = %d",
+			csiphy_dev->ref_count);
 		return rc;
 	}
 
 	rc = cam_soc_util_enable_platform_resource(soc_info, true,
 		CAM_TURBO_VOTE, ENABLE_IRQ);
 	if (rc < 0) {
-		pr_err("%s:%d failed to enable platform resources %d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_CSIPHY, "failed to enable platform resources %d",
+			rc);
 		return rc;
 	}
 
@@ -41,8 +41,7 @@
 		soc_info->clk_rate[0][csiphy_dev->csiphy_clk_index]);
 
 	if (rc < 0) {
-		pr_err("%s:%d csiphy_clk_set_rate failed\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_CSIPHY, "csiphy_clk_set_rate failed rc: %d", rc);
 		goto csiphy_disable_platform_resource;
 	}
 
@@ -62,15 +61,14 @@
 	struct cam_hw_soc_info   *soc_info;
 
 	if (!csiphy_dev || !csiphy_dev->ref_count) {
-		pr_err("%s:%d csiphy dev NULL / ref_count ZERO\n", __func__,
-			__LINE__);
+		CAM_ERR(CAM_CSIPHY, "csiphy dev NULL / ref_count ZERO");
 		return 0;
 	}
 	soc_info = &csiphy_dev->soc_info;
 
 	if (--csiphy_dev->ref_count) {
-		pr_err("%s:%d csiphy refcount = %d\n", __func__,
-			__LINE__, csiphy_dev->ref_count);
+		CAM_ERR(CAM_CSIPHY, "csiphy refcount = %d",
+			csiphy_dev->ref_count);
 		return 0;
 	}
 
@@ -95,8 +93,7 @@
 
 	rc = cam_soc_util_get_dt_properties(soc_info);
 	if (rc < 0) {
-		pr_err("%s:%d :Error: parsing common soc dt(rc %d)\n",
-			 __func__, __LINE__, rc);
+		CAM_ERR(CAM_CSIPHY, "parsing common soc dt(rc %d)", rc);
 		return  rc;
 	}
 
@@ -117,15 +114,15 @@
 		csiphy_dev->is_csiphy_3phase_hw = CSI_3PHASE_HW;
 		csiphy_dev->clk_lane = 0;
 	} else {
-		pr_err("%s:%d, invalid hw version : 0x%x\n", __func__, __LINE__,
-		csiphy_dev->hw_version);
+		CAM_ERR(CAM_CSIPHY, "invalid hw version : 0x%x",
+			csiphy_dev->hw_version);
 		rc =  -EINVAL;
 		return rc;
 	}
 
 	if (soc_info->num_clk > CSIPHY_NUM_CLK_MAX) {
-		pr_err("%s:%d invalid clk count=%d, max is %d\n", __func__,
-			__LINE__, soc_info->num_clk, CSIPHY_NUM_CLK_MAX);
+		CAM_ERR(CAM_CSIPHY, "invalid clk count=%d, max is %d",
+			soc_info->num_clk, CSIPHY_NUM_CLK_MAX);
 		return -EINVAL;
 	}
 	for (i = 0; i < soc_info->num_clk; i++) {
@@ -155,7 +152,7 @@
 			soc_info->clk_rate[0][clk_cnt];
 			csiphy_dev->csiphy_clk_index = clk_cnt;
 		}
-		CDBG("%s:%d clk_rate[%d] = %d\n", __func__, __LINE__, clk_cnt,
+		CAM_DBG(CAM_CSIPHY, "clk_rate[%d] = %d", clk_cnt,
 			soc_info->clk_rate[0][clk_cnt]);
 		clk_cnt++;
 	}
@@ -168,7 +165,7 @@
 int32_t cam_csiphy_soc_release(struct csiphy_device *csiphy_dev)
 {
 	if (!csiphy_dev) {
-		pr_err("%s:%d csiphy dev NULL\n", __func__, __LINE__);
+		CAM_ERR(CAM_CSIPHY, "csiphy dev NULL");
 		return 0;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile
new file mode 100644
index 0000000..5490992
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile
@@ -0,0 +1,8 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_eeprom_dev.o cam_eeprom_core.o cam_eeprom_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
new file mode 100644
index 0000000..96697f9
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -0,0 +1,784 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <media/cam_sensor.h>
+
+#include "cam_eeprom_core.h"
+#include "cam_eeprom_soc.h"
+#include "cam_debug_util.h"
+
+/**
+ * cam_eeprom_read_memory() - read map data into buffer
+ * @e_ctrl:     eeprom control struct
+ * @block:      block to be read
+ *
+ * This function iterates through blocks stored in block->map, reads each
+ * region and concatenate them into the pre-allocated block->mapdata
+ */
+static int cam_eeprom_read_memory(struct cam_eeprom_ctrl_t *e_ctrl,
+	struct cam_eeprom_memory_block_t *block)
+{
+	int                                rc = 0;
+	int                                j;
+	struct cam_sensor_i2c_reg_setting  i2c_reg_settings;
+	struct cam_sensor_i2c_reg_array    i2c_reg_array;
+	struct cam_eeprom_memory_map_t    *emap = block->map;
+	struct cam_eeprom_soc_private     *eb_info;
+	uint8_t                           *memptr = block->mapdata;
+
+	if (!e_ctrl) {
+		CAM_ERR(CAM_EEPROM, "e_ctrl is NULL");
+		return -EINVAL;
+	}
+
+	eb_info = (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+
+	for (j = 0; j < block->num_map; j++) {
+		CAM_DBG(CAM_EEPROM, "slave-addr = 0x%X", emap[j].saddr);
+		if (emap[j].saddr) {
+			eb_info->i2c_info.slave_addr = emap[j].saddr;
+			rc = cam_eeprom_update_i2c_info(e_ctrl,
+				&eb_info->i2c_info);
+			if (rc) {
+				CAM_ERR(CAM_EEPROM,
+					"failed: to update i2c info rc %d",
+					rc);
+				return rc;
+			}
+		}
+
+		if (emap[j].page.valid_size) {
+			i2c_reg_settings.addr_type = emap[j].page.addr_type;
+			i2c_reg_settings.data_type = emap[j].page.data_type;
+			i2c_reg_settings.size = 1;
+			i2c_reg_array.reg_addr = emap[j].page.addr;
+			i2c_reg_array.reg_data = emap[j].page.data;
+			i2c_reg_array.delay = emap[j].page.delay;
+			i2c_reg_settings.reg_setting = &i2c_reg_array;
+			rc = camera_io_dev_write(&e_ctrl->io_master_info,
+				&i2c_reg_settings);
+			if (rc) {
+				CAM_ERR(CAM_EEPROM, "page write failed rc %d",
+					rc);
+				return rc;
+			}
+		}
+
+		if (emap[j].pageen.valid_size) {
+			i2c_reg_settings.addr_type = emap[j].pageen.addr_type;
+			i2c_reg_settings.data_type = emap[j].pageen.data_type;
+			i2c_reg_settings.size = 1;
+			i2c_reg_array.reg_addr = emap[j].pageen.addr;
+			i2c_reg_array.reg_data = emap[j].pageen.data;
+			i2c_reg_array.delay = emap[j].pageen.delay;
+			i2c_reg_settings.reg_setting = &i2c_reg_array;
+			rc = camera_io_dev_write(&e_ctrl->io_master_info,
+				&i2c_reg_settings);
+			if (rc) {
+				CAM_ERR(CAM_EEPROM, "page enable failed rc %d",
+					rc);
+				return rc;
+			}
+		}
+
+		if (emap[j].poll.valid_size) {
+			rc = camera_io_dev_poll(&e_ctrl->io_master_info,
+				emap[j].poll.addr, emap[j].poll.data,
+				0, emap[j].poll.addr_type,
+				emap[j].poll.data_type,
+				emap[j].poll.delay);
+			if (rc) {
+				CAM_ERR(CAM_EEPROM, "poll failed rc %d",
+					rc);
+				return rc;
+			}
+		}
+
+		if (emap[j].mem.valid_size) {
+			rc = camera_io_dev_read_seq(&e_ctrl->io_master_info,
+				emap[j].mem.addr, memptr,
+				emap[j].mem.addr_type,
+				emap[j].mem.valid_size);
+			if (rc) {
+				CAM_ERR(CAM_EEPROM, "read failed rc %d",
+					rc);
+				return rc;
+			}
+			memptr += emap[j].mem.valid_size;
+		}
+
+		if (emap[j].pageen.valid_size) {
+			i2c_reg_settings.addr_type = emap[j].pageen.addr_type;
+			i2c_reg_settings.data_type = emap[j].pageen.data_type;
+			i2c_reg_settings.size = 1;
+			i2c_reg_array.reg_addr = emap[j].pageen.addr;
+			i2c_reg_array.reg_data = 0;
+			i2c_reg_array.delay = emap[j].pageen.delay;
+			i2c_reg_settings.reg_setting = &i2c_reg_array;
+			rc = camera_io_dev_write(&e_ctrl->io_master_info,
+				&i2c_reg_settings);
+
+			if (rc) {
+				CAM_ERR(CAM_EEPROM,
+					"page disable failed rc %d",
+					rc);
+				return rc;
+			}
+		}
+	}
+	return rc;
+}
+
+/**
+ * cam_eeprom_power_up - Power up eeprom hardware
+ * @e_ctrl:     ctrl structure
+ * @power_info: power up/down info for eeprom
+ *
+ * Returns success or failure
+ */
+static int cam_eeprom_power_up(struct cam_eeprom_ctrl_t *e_ctrl,
+	struct cam_sensor_power_ctrl_t *power_info)
+{
+	int32_t                 rc = 0;
+	struct cam_hw_soc_info *soc_info =
+		&e_ctrl->soc_info;
+
+	/* Parse and fill vreg params for power up settings */
+	rc = msm_camera_fill_vreg_params(
+		&e_ctrl->soc_info,
+		power_info->power_setting,
+		power_info->power_setting_size);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM,
+			"failed to fill vreg params for power up rc:%d", rc);
+		return rc;
+	}
+
+	/* Parse and fill vreg params for power down settings*/
+	rc = msm_camera_fill_vreg_params(
+		&e_ctrl->soc_info,
+		power_info->power_down_setting,
+		power_info->power_down_setting_size);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM,
+			"failed to fill vreg params power down rc:%d", rc);
+		return rc;
+	}
+
+	rc = cam_sensor_core_power_up(power_info, soc_info);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "failed in eeprom power up rc %d", rc);
+		return rc;
+	}
+
+	if (e_ctrl->io_master_info.master_type == CCI_MASTER) {
+		rc = camera_io_init(&(e_ctrl->io_master_info));
+		if (rc) {
+			CAM_ERR(CAM_EEPROM, "cci_init failed");
+			return -EINVAL;
+		}
+	}
+	return rc;
+}
+
+/**
+ * cam_eeprom_power_down - Power down eeprom hardware
+ * @e_ctrl:    ctrl structure
+ *
+ * Returns success or failure
+ */
+static int cam_eeprom_power_down(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+	struct cam_sensor_power_ctrl_t *power_info;
+	struct cam_hw_soc_info         *soc_info;
+	struct cam_eeprom_soc_private  *soc_private;
+	int                             rc = 0;
+
+	if (!e_ctrl) {
+		CAM_ERR(CAM_EEPROM, "failed: e_ctrl %pK", e_ctrl);
+		return -EINVAL;
+	}
+
+	soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+	soc_info = &e_ctrl->soc_info;
+
+	if (!power_info) {
+		CAM_ERR(CAM_EEPROM, "failed: power_info %pK", power_info);
+		return -EINVAL;
+	}
+	rc = msm_camera_power_down(power_info, soc_info);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "power down the core is failed:%d", rc);
+		return rc;
+	}
+
+	if (e_ctrl->io_master_info.master_type == CCI_MASTER)
+		camera_io_release(&(e_ctrl->io_master_info));
+
+	return rc;
+}
+
+/**
+ * cam_eeprom_match_id - match eeprom id
+ * @e_ctrl:     ctrl structure
+ *
+ * Returns success or failure
+ */
+static int cam_eeprom_match_id(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+	int                      rc;
+	struct camera_io_master *client = &e_ctrl->io_master_info;
+	uint8_t                  id[2];
+
+	rc = cam_spi_query_id(client, 0, &id[0], 2);
+	if (rc)
+		return rc;
+	CAM_DBG(CAM_EEPROM, "read 0x%x 0x%x, check 0x%x 0x%x",
+		id[0], id[1], client->spi_client->mfr_id0,
+		client->spi_client->device_id0);
+	if (id[0] != client->spi_client->mfr_id0
+		|| id[1] != client->spi_client->device_id0)
+		return -ENODEV;
+	return 0;
+}
+
+/**
+ * cam_eeprom_parse_read_memory_map - Parse memory map
+ * @of_node:    device node
+ * @e_ctrl:     ctrl structure
+ *
+ * Returns success or failure
+ */
+int32_t cam_eeprom_parse_read_memory_map(struct device_node *of_node,
+	struct cam_eeprom_ctrl_t *e_ctrl)
+{
+	int32_t                         rc = 0;
+	struct cam_eeprom_soc_private  *soc_private;
+	struct cam_sensor_power_ctrl_t *power_info;
+
+	if (!e_ctrl) {
+		CAM_ERR(CAM_EEPROM, "failed: e_ctrl is NULL");
+		return -EINVAL;
+	}
+
+	soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+
+	rc = cam_eeprom_parse_dt_memory_map(of_node, &e_ctrl->cal_data);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "failed: eeprom dt parse rc %d", rc);
+		return rc;
+	}
+	rc = cam_eeprom_power_up(e_ctrl, power_info);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "failed: eeprom power up rc %d", rc);
+		goto data_mem_free;
+	}
+	if (e_ctrl->eeprom_device_type == MSM_CAMERA_SPI_DEVICE) {
+		rc = cam_eeprom_match_id(e_ctrl);
+		if (rc) {
+			CAM_DBG(CAM_EEPROM, "eeprom not matching %d", rc);
+			goto power_down;
+		}
+	}
+	rc = cam_eeprom_read_memory(e_ctrl, &e_ctrl->cal_data);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "read_eeprom_memory failed");
+		goto power_down;
+	}
+
+	rc = cam_eeprom_power_down(e_ctrl);
+	if (rc)
+		CAM_ERR(CAM_EEPROM, "failed: eeprom power down rc %d", rc);
+	return rc;
+power_down:
+	rc = cam_eeprom_power_down(e_ctrl);
+data_mem_free:
+	kfree(e_ctrl->cal_data.mapdata);
+	kfree(e_ctrl->cal_data.map);
+	return rc;
+}
+
+/**
+ * cam_eeprom_get_dev_handle - get device handle
+ * @e_ctrl:     ctrl structure
+ * @arg:        Camera control command argument
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_get_dev_handle(struct cam_eeprom_ctrl_t *e_ctrl,
+	void *arg)
+{
+	struct cam_sensor_acquire_dev    eeprom_acq_dev;
+	struct cam_create_dev_hdl        bridge_params;
+	struct cam_control              *cmd = (struct cam_control *)arg;
+
+	if (e_ctrl->bridge_intf.device_hdl != -1) {
+		CAM_ERR(CAM_EEPROM, "Device is already acquired");
+		return -EFAULT;
+	}
+	if (copy_from_user(&eeprom_acq_dev, (void __user *) cmd->handle,
+		sizeof(eeprom_acq_dev))) {
+		CAM_ERR(CAM_EEPROM,
+			"EEPROM:ACQUIRE_DEV: copy from user failed");
+		return -EFAULT;
+	}
+
+	bridge_params.session_hdl = eeprom_acq_dev.session_handle;
+	bridge_params.ops = &e_ctrl->bridge_intf.ops;
+	bridge_params.v4l2_sub_dev_flag = 0;
+	bridge_params.media_entity_flag = 0;
+	bridge_params.priv = e_ctrl;
+
+	eeprom_acq_dev.device_handle =
+		cam_create_device_hdl(&bridge_params);
+	e_ctrl->bridge_intf.device_hdl = eeprom_acq_dev.device_handle;
+	e_ctrl->bridge_intf.session_hdl = eeprom_acq_dev.session_handle;
+
+	CAM_DBG(CAM_EEPROM, "Device Handle: %d", eeprom_acq_dev.device_handle);
+	if (copy_to_user((void __user *) cmd->handle, &eeprom_acq_dev,
+		sizeof(struct cam_sensor_acquire_dev))) {
+		CAM_ERR(CAM_EEPROM, "EEPROM:ACQUIRE_DEV: copy to user failed");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+/**
+ * cam_eeprom_update_slaveInfo - Update slave info
+ * @e_ctrl:     ctrl structure
+ * @cmd_buf:    command buffer
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_update_slaveInfo(struct cam_eeprom_ctrl_t *e_ctrl,
+	void *cmd_buf)
+{
+	int32_t                         rc = 0;
+	struct cam_eeprom_soc_private  *soc_private;
+	struct cam_cmd_i2c_info        *cmd_i2c_info = NULL;
+
+	soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	cmd_i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+	soc_private->i2c_info.slave_addr = cmd_i2c_info->slave_addr;
+	soc_private->i2c_info.i2c_freq_mode = cmd_i2c_info->i2c_freq_mode;
+
+	rc = cam_eeprom_update_i2c_info(e_ctrl,
+		&soc_private->i2c_info);
+	CAM_DBG(CAM_EEPROM, "Slave addr: 0x%x Freq Mode: %d",
+		soc_private->i2c_info.slave_addr,
+		soc_private->i2c_info.i2c_freq_mode);
+
+	return rc;
+}
+
+/**
+ * cam_eeprom_parse_memory_map - Parse memory map info
+ * @data:             memory block data
+ * @cmd_buf:          command buffer
+ * @cmd_length:       command buffer length
+ * @num_map:          memory map size
+ * @cmd_length_bytes: command length processed in this function
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_parse_memory_map(
+	struct cam_eeprom_memory_block_t *data,
+	void *cmd_buf, int cmd_length, uint16_t *cmd_length_bytes,
+	int16_t num_map)
+{
+	int32_t                            rc = 0;
+	int32_t                            processed_size = 0;
+	struct cam_eeprom_memory_map_t    *map = data->map;
+	struct common_header              *cmm_hdr =
+		(struct common_header *)cmd_buf;
+	uint16_t                           cmd_length_in_bytes = 0;
+	struct cam_cmd_i2c_random_wr      *i2c_random_wr = NULL;
+	struct cam_cmd_i2c_continuous_rd  *i2c_cont_rd = NULL;
+	struct cam_cmd_conditional_wait   *i2c_poll = NULL;
+
+	switch (cmm_hdr->cmd_type) {
+	case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR:
+		i2c_random_wr = (struct cam_cmd_i2c_random_wr *)cmd_buf;
+		cmd_length_in_bytes   = sizeof(struct cam_cmd_i2c_random_wr);
+
+		map[num_map].page.addr =
+			i2c_random_wr->random_wr_payload[0].reg_addr;
+		map[num_map].page.addr_type = i2c_random_wr->header.addr_type;
+		map[num_map].page.data =
+			i2c_random_wr->random_wr_payload[0].reg_data;
+		map[num_map].page.data_type = i2c_random_wr->header.data_type;
+		map[num_map].page.valid_size = 1;
+		cmd_buf += cmd_length_in_bytes / sizeof(int32_t);
+		processed_size +=
+			cmd_length_in_bytes;
+		break;
+	case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD:
+		i2c_cont_rd = (struct cam_cmd_i2c_continuous_rd *)cmd_buf;
+		cmd_length_in_bytes = sizeof(struct cam_cmd_i2c_continuous_rd);
+
+		map[num_map].mem.addr = i2c_cont_rd->reg_addr;
+		map[num_map].mem.addr_type = i2c_cont_rd->header.addr_type;
+		map[num_map].mem.data_type = i2c_cont_rd->header.data_type;
+		map[num_map].mem.valid_size =
+			i2c_cont_rd->header.count;
+		cmd_buf += cmd_length_in_bytes / sizeof(int32_t);
+		processed_size +=
+			cmd_length_in_bytes;
+		data->num_data += map[num_map].mem.valid_size;
+		break;
+	case CAMERA_SENSOR_CMD_TYPE_WAIT:
+		i2c_poll = (struct cam_cmd_conditional_wait *)cmd_buf;
+		cmd_length_in_bytes = sizeof(struct cam_cmd_conditional_wait);
+
+		map[num_map].poll.addr = i2c_poll->reg_addr;
+		map[num_map].poll.addr_type = i2c_poll->addr_type;
+		map[num_map].poll.data = i2c_poll->reg_data;
+		map[num_map].poll.data_type = i2c_poll->data_type;
+		map[num_map].poll.delay = i2c_poll->timeout;
+		map[num_map].poll.valid_size = 1;
+		break;
+	default:
+		break;
+	}
+	*cmd_length_bytes = processed_size;
+	return rc;
+}
+
+/**
+ * cam_eeprom_init_pkt_parser - Parse eeprom packet
+ * @e_ctrl:       ctrl structure
+ * @csl_packet:	  csl packet received
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_init_pkt_parser(struct cam_eeprom_ctrl_t *e_ctrl,
+	struct cam_packet *csl_packet)
+{
+	int32_t                         rc = 0;
+	int                             i = 0;
+	struct cam_cmd_buf_desc        *cmd_desc = NULL;
+	uint32_t                       *offset = NULL;
+	uint32_t                       *cmd_buf = NULL;
+	uint64_t                        generic_pkt_addr;
+	size_t                          pkt_len = 0;
+	uint32_t                        total_cmd_buf_in_bytes = 0;
+	uint32_t                        processed_cmd_buf_in_bytes = 0;
+	struct common_header           *cmm_hdr = NULL;
+	uint16_t                        cmd_length_in_bytes = 0;
+	struct cam_cmd_i2c_info        *i2c_info = NULL;
+	int                             num_map = -1;
+	struct cam_eeprom_memory_map_t *map;
+	struct cam_eeprom_soc_private  *soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+
+	e_ctrl->cal_data.map = kcalloc((MSM_EEPROM_MEMORY_MAP_MAX_SIZE *
+		MSM_EEPROM_MAX_MEM_MAP_CNT),
+		(sizeof(struct cam_eeprom_memory_map_t)), GFP_KERNEL);
+	if (!e_ctrl->cal_data.map) {
+		rc = -ENOMEM;
+		CAM_ERR(CAM_EEPROM, "failed");
+		return rc;
+	}
+	map = e_ctrl->cal_data.map;
+
+	offset = (uint32_t *)&csl_packet->payload;
+	offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t));
+	cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+
+	/* Loop through multiple command buffers */
+	for (i = 0; i < csl_packet->num_cmd_buf; i++) {
+		total_cmd_buf_in_bytes = cmd_desc[i].length;
+		processed_cmd_buf_in_bytes = 0;
+		if (!total_cmd_buf_in_bytes)
+			continue;
+		rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+			(uint64_t *)&generic_pkt_addr, &pkt_len);
+		if (rc) {
+			CAM_ERR(CAM_EEPROM, "Failed to get cpu buf");
+			return rc;
+		}
+		cmd_buf = (uint32_t *)generic_pkt_addr;
+		if (!cmd_buf) {
+			CAM_ERR(CAM_EEPROM, "invalid cmd buf");
+			return -EINVAL;
+		}
+		cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
+		/* Loop through multiple cmd formats in one cmd buffer */
+		while (processed_cmd_buf_in_bytes < total_cmd_buf_in_bytes) {
+			cmm_hdr = (struct common_header *)cmd_buf;
+			switch (cmm_hdr->cmd_type) {
+			case CAMERA_SENSOR_CMD_TYPE_I2C_INFO:
+				i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+				num_map++;
+				map[num_map].saddr = i2c_info->slave_addr;
+				rc = cam_eeprom_update_slaveInfo(e_ctrl,
+					cmd_buf);
+				cmd_length_in_bytes =
+					sizeof(struct cam_cmd_i2c_info);
+				processed_cmd_buf_in_bytes +=
+					cmd_length_in_bytes;
+				cmd_buf += cmd_length_in_bytes/4;
+				e_ctrl->cal_data.num_map = num_map + 1;
+				break;
+			case CAMERA_SENSOR_CMD_TYPE_PWR_UP:
+			case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN:
+				cmd_length_in_bytes =
+					sizeof(struct cam_cmd_power);
+				rc = cam_sensor_update_power_settings(cmd_buf,
+					cmd_length_in_bytes, power_info);
+				processed_cmd_buf_in_bytes +=
+					total_cmd_buf_in_bytes;
+				cmd_buf += total_cmd_buf_in_bytes/4;
+				if (rc) {
+					CAM_ERR(CAM_EEPROM, "Failed");
+					return rc;
+				}
+				break;
+			case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR:
+			case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD:
+			case CAMERA_SENSOR_CMD_TYPE_WAIT:
+				rc = cam_eeprom_parse_memory_map(
+					&e_ctrl->cal_data, cmd_buf,
+					total_cmd_buf_in_bytes,
+					&cmd_length_in_bytes, num_map);
+				processed_cmd_buf_in_bytes +=
+					cmd_length_in_bytes;
+				cmd_buf += cmd_length_in_bytes/4;
+				break;
+			default:
+				break;
+			}
+		}
+	}
+	return rc;
+}
+
+/**
+ * cam_eeprom_get_cal_data - parse the userspace IO config and
+ *                                        copy read data to share with userspace
+ * @e_ctrl:     ctrl structure
+ * @csl_packet: csl packet received
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_get_cal_data(struct cam_eeprom_ctrl_t *e_ctrl,
+	struct cam_packet *csl_packet)
+{
+	struct cam_buf_io_cfg *io_cfg;
+	uint32_t              i = 0;
+	int                   rc = 0;
+	uint64_t              buf_addr;
+	size_t                buf_size;
+	uint8_t               *read_buffer;
+
+	io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
+		&csl_packet->payload +
+		csl_packet->io_configs_offset);
+
+	CAM_DBG(CAM_EEPROM, "number of IO configs: %d:",
+		csl_packet->num_io_configs);
+
+	for (i = 0; i < csl_packet->num_io_configs; i++) {
+		CAM_DBG(CAM_EEPROM, "Direction: %d:", io_cfg->direction);
+		if (io_cfg->direction == CAM_BUF_OUTPUT) {
+			rc = cam_mem_get_cpu_buf(io_cfg->mem_handle[0],
+				(uint64_t *)&buf_addr, &buf_size);
+			CAM_DBG(CAM_EEPROM, "buf_addr : %pK, buf_size : %zu\n",
+				(void *)buf_addr, buf_size);
+
+			read_buffer = (uint8_t *)buf_addr;
+			if (!read_buffer) {
+				CAM_ERR(CAM_EEPROM,
+					"invalid buffer to copy data");
+				return -EINVAL;
+			}
+			read_buffer += io_cfg->offsets[0];
+
+			if (buf_size < e_ctrl->cal_data.num_data) {
+				CAM_ERR(CAM_EEPROM,
+					"failed to copy, Invalid size");
+				return -EINVAL;
+			}
+
+			CAM_ERR(CAM_EEPROM, "copy the data, len:%d",
+				e_ctrl->cal_data.num_data);
+			memcpy(read_buffer, e_ctrl->cal_data.mapdata,
+					e_ctrl->cal_data.num_data);
+
+		} else {
+			CAM_ERR(CAM_EEPROM, "Invalid direction");
+			rc = -EINVAL;
+		}
+	}
+	return rc;
+}
+
+/**
+ * cam_eeprom_pkt_parse - Parse csl packet
+ * @e_ctrl:     ctrl structure
+ * @arg:        Camera control command argument
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_pkt_parse(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
+{
+	int32_t                         rc = 0;
+	struct cam_control             *ioctl_ctrl = NULL;
+	struct cam_config_dev_cmd       dev_config;
+	uint64_t                        generic_pkt_addr;
+	size_t                          pkt_len;
+	struct cam_packet              *csl_packet = NULL;
+	struct cam_eeprom_soc_private  *soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+
+	ioctl_ctrl = (struct cam_control *)arg;
+	if (copy_from_user(&dev_config, (void __user *) ioctl_ctrl->handle,
+		sizeof(dev_config)))
+		return -EFAULT;
+	rc = cam_mem_get_cpu_buf(dev_config.packet_handle,
+		(uint64_t *)&generic_pkt_addr, &pkt_len);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM,
+			"error in converting command Handle Error: %d", rc);
+		return rc;
+	}
+	csl_packet = (struct cam_packet *)
+		(generic_pkt_addr + dev_config.offset);
+	switch (csl_packet->header.op_code & 0xFFFFFF) {
+	case CAM_EEPROM_PACKET_OPCODE_INIT:
+		if (e_ctrl->userspace_probe == false) {
+			rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
+			CAM_ERR(CAM_EEPROM,
+				"Eeprom already probed at kernel boot");
+			rc = -EINVAL;
+		break;
+		}
+		if (e_ctrl->cal_data.num_data == 0) {
+			rc = cam_eeprom_init_pkt_parser(e_ctrl, csl_packet);
+			if (rc) {
+				CAM_ERR(CAM_EEPROM,
+					"Failed in parsing the pkt");
+				return rc;
+			}
+
+			e_ctrl->cal_data.mapdata =
+				kzalloc(e_ctrl->cal_data.num_data, GFP_KERNEL);
+			if (!e_ctrl->cal_data.mapdata) {
+				rc = -ENOMEM;
+				CAM_ERR(CAM_EEPROM, "failed");
+				goto error;
+			}
+
+			rc = cam_eeprom_power_up(e_ctrl,
+				&soc_private->power_info);
+			if (rc) {
+				CAM_ERR(CAM_EEPROM, "failed rc %d", rc);
+				goto memdata_free;
+			}
+
+			rc = cam_eeprom_read_memory(e_ctrl, &e_ctrl->cal_data);
+			if (rc) {
+				CAM_ERR(CAM_EEPROM,
+					"read_eeprom_memory failed");
+				goto power_down;
+			}
+
+			rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
+			rc = cam_eeprom_power_down(e_ctrl);
+		} else {
+			CAM_DBG(CAM_EEPROM, "Already read eeprom");
+		}
+		break;
+	default:
+		break;
+	}
+	kfree(e_ctrl->cal_data.mapdata);
+	kfree(e_ctrl->cal_data.map);
+	return rc;
+power_down:
+	rc = cam_eeprom_power_down(e_ctrl);
+memdata_free:
+	kfree(e_ctrl->cal_data.mapdata);
+error:
+	kfree(e_ctrl->cal_data.map);
+	return rc;
+}
+
+/**
+ * cam_eeprom_driver_cmd - Handle eeprom cmds
+ * @e_ctrl:     ctrl structure
+ * @arg:        Camera control command argument
+ *
+ * Returns success or failure
+ */
+int32_t cam_eeprom_driver_cmd(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
+{
+	int                            rc = 0;
+	struct cam_eeprom_query_cap_t  eeprom_cap;
+	struct cam_control            *cmd = (struct cam_control *)arg;
+
+	if (!e_ctrl) {
+		CAM_ERR(CAM_EEPROM, "e_ctrl is NULL");
+		return -EINVAL;
+	}
+
+	mutex_lock(&(e_ctrl->eeprom_mutex));
+	switch (cmd->op_code) {
+	case CAM_QUERY_CAP:
+		eeprom_cap.slot_info = e_ctrl->subdev_id;
+		if (e_ctrl->userspace_probe == false)
+			eeprom_cap.eeprom_kernel_probe = true;
+		else
+			eeprom_cap.eeprom_kernel_probe = false;
+
+		if (copy_to_user((void __user *) cmd->handle,
+			&eeprom_cap,
+			sizeof(struct cam_eeprom_query_cap_t))) {
+			CAM_ERR(CAM_EEPROM, "Failed Copy to User");
+			return -EFAULT;
+			goto release_mutex;
+		}
+		CAM_DBG(CAM_EEPROM, "eeprom_cap: ID: %d", eeprom_cap.slot_info);
+		break;
+	case CAM_ACQUIRE_DEV:
+		rc = cam_eeprom_get_dev_handle(e_ctrl, arg);
+		if (rc) {
+			CAM_ERR(CAM_EEPROM, "Failed to acquire dev");
+			goto release_mutex;
+		}
+		break;
+	case CAM_CONFIG_DEV:
+		rc = cam_eeprom_pkt_parse(e_ctrl, arg);
+		if (rc) {
+			CAM_ERR(CAM_EEPROM, "Failed in eeprom pkt Parsing");
+			goto release_mutex;
+		}
+		break;
+	default:
+		CAM_DBG(CAM_EEPROM, "invalid opcode");
+		break;
+	}
+
+release_mutex:
+	mutex_unlock(&(e_ctrl->eeprom_mutex));
+
+	return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.h
new file mode 100644
index 0000000..84736df
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_EEPROM_CORE_H_
+#define _CAM_EEPROM_CORE_H_
+
+#include "cam_eeprom_dev.h"
+
+int32_t cam_eeprom_driver_cmd(struct cam_eeprom_ctrl_t *e_ctrl, void *arg);
+int32_t cam_eeprom_parse_read_memory_map(struct device_node *of_node,
+	struct cam_eeprom_ctrl_t *e_ctrl);
+#endif
+/* _CAM_EEPROM_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
new file mode 100644
index 0000000..82dcc9c
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
@@ -0,0 +1,487 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_eeprom_dev.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_eeprom_soc.h"
+#include "cam_eeprom_core.h"
+#include "cam_debug_util.h"
+
+static long cam_eeprom_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	int                       rc     = 0;
+	struct cam_eeprom_ctrl_t *e_ctrl = v4l2_get_subdevdata(sd);
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_eeprom_driver_cmd(e_ctrl, arg);
+		break;
+	default:
+		rc = -ENOIOCTLCMD;
+		break;
+	}
+
+	return rc;
+}
+
+int32_t cam_eeprom_update_i2c_info(struct cam_eeprom_ctrl_t *e_ctrl,
+	struct cam_eeprom_i2c_info_t *i2c_info)
+{
+	struct cam_sensor_cci_client        *cci_client = NULL;
+
+	if (e_ctrl->io_master_info.master_type == CCI_MASTER) {
+		cci_client = e_ctrl->io_master_info.cci_client;
+		if (!cci_client) {
+			CAM_ERR(CAM_EEPROM, "failed: cci_client %pK",
+				cci_client);
+			return -EINVAL;
+		}
+		cci_client->cci_i2c_master = e_ctrl->cci_i2c_master;
+		cci_client->sid = (i2c_info->slave_addr) >> 1;
+		cci_client->retries = 3;
+		cci_client->id_map = 0;
+		cci_client->i2c_freq_mode = i2c_info->i2c_freq_mode;
+	}
+	return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_eeprom_init_subdev_do_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	struct cam_control cmd_data;
+	int32_t rc = 0;
+
+	if (copy_from_user(&cmd_data, (void __user *)arg,
+		sizeof(cmd_data))) {
+		CAM_ERR(CAM_EEPROM,
+			"Failed to copy from user_ptr=%pK size=%zu",
+			(void __user *)arg, sizeof(cmd_data));
+		return -EFAULT;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_eeprom_subdev_ioctl(sd, cmd, &cmd_data);
+		if (rc < 0) {
+			CAM_ERR(CAM_EEPROM,
+				"Failed in eeprom suddev handling rc %d",
+				rc);
+			return rc;
+		}
+		break;
+	default:
+		CAM_ERR(CAM_EEPROM, "Invalid compat ioctl: %d", cmd);
+		rc = -EINVAL;
+	}
+
+	if (!rc) {
+		if (copy_to_user((void __user *)arg, &cmd_data,
+			sizeof(cmd_data))) {
+			CAM_ERR(CAM_EEPROM,
+				"Failed to copy from user_ptr=%pK size=%zu",
+				(void __user *)arg, sizeof(cmd_data));
+			rc = -EFAULT;
+		}
+	}
+	return rc;
+}
+#endif
+
+static const struct v4l2_subdev_internal_ops cam_eeprom_internal_ops;
+
+static struct v4l2_subdev_core_ops cam_eeprom_subdev_core_ops = {
+	.ioctl = cam_eeprom_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_eeprom_init_subdev_do_ioctl,
+#endif
+};
+
+static struct v4l2_subdev_ops cam_eeprom_subdev_ops = {
+	.core = &cam_eeprom_subdev_core_ops,
+};
+
+static int cam_eeprom_i2c_driver_probe(struct i2c_client *client,
+	 const struct i2c_device_id *id)
+{
+	int                             rc = 0;
+	struct cam_eeprom_ctrl_t       *e_ctrl = NULL;
+	struct cam_eeprom_soc_private  *soc_private = NULL;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CAM_ERR(CAM_EEPROM, "i2c_check_functionality failed");
+		goto probe_failure;
+	}
+
+	e_ctrl = kzalloc(sizeof(*e_ctrl), GFP_KERNEL);
+	if (!e_ctrl) {
+		CAM_ERR(CAM_EEPROM, "kzalloc failed");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+	e_ctrl->v4l2_dev_str.ops = &cam_eeprom_subdev_ops;
+	soc_private = (struct cam_eeprom_soc_private *)(id->driver_data);
+	if (!soc_private) {
+		CAM_ERR(CAM_EEPROM, "board info NULL");
+		rc = -EINVAL;
+		goto ectrl_free;
+	}
+	e_ctrl->cal_data.mapdata = NULL;
+	e_ctrl->cal_data.map = NULL;
+	e_ctrl->userspace_probe = false;
+
+	e_ctrl->eeprom_device_type = MSM_CAMERA_I2C_DEVICE;
+	e_ctrl->io_master_info.master_type = I2C_MASTER;
+	e_ctrl->io_master_info.client = client;
+
+	if (soc_private->i2c_info.slave_addr != 0)
+		e_ctrl->io_master_info.client->addr =
+			soc_private->i2c_info.slave_addr;
+
+	return rc;
+
+ectrl_free:
+	kfree(e_ctrl);
+probe_failure:
+	return rc;
+}
+
+static int cam_eeprom_i2c_driver_remove(struct i2c_client *client)
+{
+	struct v4l2_subdev             *sd = i2c_get_clientdata(client);
+	struct cam_eeprom_ctrl_t       *e_ctrl;
+	struct cam_eeprom_soc_private  *soc_private;
+
+	if (!sd) {
+		CAM_ERR(CAM_EEPROM, "Subdevice is NULL");
+		return -EINVAL;
+	}
+
+	e_ctrl = (struct cam_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
+	if (!e_ctrl) {
+		CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
+		return -EINVAL;
+	}
+
+	soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	if (!soc_private) {
+		CAM_ERR(CAM_EEPROM, "soc_info.soc_private is NULL");
+		return -EINVAL;
+	}
+
+	kfree(e_ctrl->cal_data.mapdata);
+	kfree(e_ctrl->cal_data.map);
+	if (soc_private) {
+		kfree(soc_private->power_info.gpio_num_info);
+		kfree(soc_private);
+	}
+	kfree(e_ctrl);
+
+	return 0;
+}
+
+static int cam_eeprom_spi_setup(struct spi_device *spi)
+{
+	struct cam_eeprom_ctrl_t       *e_ctrl = NULL;
+	struct cam_sensor_spi_client   *spi_client;
+	struct cam_eeprom_soc_private  *eb_info;
+	struct cam_sensor_power_ctrl_t *power_info = NULL;
+	int                             rc = 0;
+
+	e_ctrl = kzalloc(sizeof(*e_ctrl), GFP_KERNEL);
+	if (!e_ctrl)
+		return -ENOMEM;
+
+	e_ctrl->v4l2_dev_str.ops = &cam_eeprom_subdev_ops;
+	e_ctrl->userspace_probe = false;
+	e_ctrl->cal_data.mapdata = NULL;
+	e_ctrl->cal_data.map = NULL;
+
+	spi_client = kzalloc(sizeof(*spi_client), GFP_KERNEL);
+	if (!spi_client) {
+		kfree(e_ctrl);
+		return -ENOMEM;
+	}
+
+	eb_info = kzalloc(sizeof(*eb_info), GFP_KERNEL);
+	if (!eb_info)
+		goto spi_free;
+	e_ctrl->soc_info.soc_private = eb_info;
+
+	e_ctrl->eeprom_device_type = MSM_CAMERA_SPI_DEVICE;
+	e_ctrl->io_master_info.spi_client = spi_client;
+	e_ctrl->io_master_info.master_type = SPI_MASTER;
+	spi_client->spi_master = spi;
+
+	power_info = &eb_info->power_info;
+	power_info->dev = &spi->dev;
+
+	/* set spi instruction info */
+	spi_client->retry_delay = 1;
+	spi_client->retries = 0;
+
+	/* Initialize mutex */
+	mutex_init(&(e_ctrl->eeprom_mutex));
+
+	rc = cam_eeprom_spi_driver_soc_init(e_ctrl);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "failed: spi soc init rc %d", rc);
+		goto board_free;
+	}
+
+	if (e_ctrl->userspace_probe == false) {
+		rc = cam_eeprom_parse_read_memory_map(spi->dev.of_node,
+			e_ctrl);
+		if (rc) {
+			CAM_ERR(CAM_EEPROM, "failed: read mem map rc %d", rc);
+			goto board_free;
+		}
+	}
+
+	return rc;
+
+board_free:
+	kfree(e_ctrl->soc_info.soc_private);
+spi_free:
+	kfree(spi_client);
+	kfree(e_ctrl);
+	return rc;
+}
+
+static int cam_eeprom_spi_driver_probe(struct spi_device *spi)
+{
+	spi->bits_per_word = 8;
+	spi->mode = SPI_MODE_0;
+	spi_setup(spi);
+
+	CAM_DBG(CAM_EEPROM, "irq[%d] cs[%x] CPHA[%x] CPOL[%x] CS_HIGH[%x]",
+		spi->irq, spi->chip_select, (spi->mode & SPI_CPHA) ? 1 : 0,
+		(spi->mode & SPI_CPOL) ? 1 : 0,
+		(spi->mode & SPI_CS_HIGH) ? 1 : 0);
+	CAM_DBG(CAM_EEPROM, "max_speed[%u]", spi->max_speed_hz);
+
+	return cam_eeprom_spi_setup(spi);
+}
+
+static int cam_eeprom_spi_driver_remove(struct spi_device *sdev)
+{
+	struct v4l2_subdev             *sd = spi_get_drvdata(sdev);
+	struct cam_eeprom_ctrl_t       *e_ctrl;
+	struct cam_eeprom_soc_private  *soc_private;
+
+	if (!sd) {
+		CAM_ERR(CAM_EEPROM, "Subdevice is NULL");
+		return -EINVAL;
+	}
+
+	e_ctrl = (struct cam_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
+	if (!e_ctrl) {
+		CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
+		return -EINVAL;
+	}
+
+	kfree(e_ctrl->io_master_info.spi_client);
+	kfree(e_ctrl->cal_data.mapdata);
+	kfree(e_ctrl->cal_data.map);
+	soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	if (soc_private) {
+		kfree(soc_private->power_info.gpio_num_info);
+		kfree(soc_private);
+	}
+	kfree(e_ctrl);
+
+	return 0;
+}
+
+static int32_t cam_eeprom_platform_driver_probe(
+	struct platform_device *pdev)
+{
+	int32_t                         rc = 0;
+	struct cam_eeprom_ctrl_t       *e_ctrl = NULL;
+	struct cam_eeprom_soc_private  *soc_private = NULL;
+
+	e_ctrl = kzalloc(sizeof(struct cam_eeprom_ctrl_t), GFP_KERNEL);
+	if (!e_ctrl)
+		return -ENOMEM;
+
+	e_ctrl->soc_info.pdev = pdev;
+	e_ctrl->eeprom_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+	e_ctrl->cal_data.mapdata = NULL;
+	e_ctrl->cal_data.map = NULL;
+	e_ctrl->userspace_probe = false;
+
+	e_ctrl->io_master_info.master_type = CCI_MASTER;
+	e_ctrl->io_master_info.cci_client = kzalloc(
+		sizeof(struct cam_sensor_cci_client), GFP_KERNEL);
+	if (!e_ctrl->io_master_info.cci_client)
+		goto free_e_ctrl;
+
+	soc_private = kzalloc(sizeof(struct cam_eeprom_soc_private),
+		GFP_KERNEL);
+	if (!soc_private) {
+		rc = -ENOMEM;
+		goto free_cci_client;
+	}
+	e_ctrl->soc_info.soc_private = soc_private;
+
+	/* Initialize mutex */
+	mutex_init(&(e_ctrl->eeprom_mutex));
+	rc = cam_eeprom_platform_driver_soc_init(e_ctrl);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "failed: soc init rc %d", rc);
+		goto free_soc;
+	}
+	rc = cam_eeprom_update_i2c_info(e_ctrl, &soc_private->i2c_info);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "failed: to update i2c info rc %d", rc);
+		goto free_soc;
+	}
+
+	if (e_ctrl->userspace_probe == false) {
+		rc = cam_eeprom_parse_read_memory_map(pdev->dev.of_node,
+			e_ctrl);
+		if (rc) {
+			CAM_ERR(CAM_EEPROM, "failed: read mem map rc %d", rc);
+			goto free_soc;
+		}
+	}
+
+	e_ctrl->v4l2_dev_str.internal_ops = &cam_eeprom_internal_ops;
+	e_ctrl->v4l2_dev_str.ops = &cam_eeprom_subdev_ops;
+	strlcpy(e_ctrl->device_name, CAM_EEPROM_NAME,
+		sizeof(e_ctrl->device_name));
+	e_ctrl->v4l2_dev_str.name = e_ctrl->device_name;
+	e_ctrl->v4l2_dev_str.sd_flags =
+		(V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS);
+	e_ctrl->v4l2_dev_str.ent_function = CAM_EEPROM_DEVICE_TYPE;
+	e_ctrl->v4l2_dev_str.token = e_ctrl;
+
+	rc = cam_register_subdev(&(e_ctrl->v4l2_dev_str));
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "fail to create subdev");
+		goto free_soc;
+	}
+
+	e_ctrl->bridge_intf.device_hdl = -1;
+	e_ctrl->bridge_intf.ops.get_dev_info = NULL;
+	e_ctrl->bridge_intf.ops.link_setup = NULL;
+	e_ctrl->bridge_intf.ops.apply_req = NULL;
+
+	platform_set_drvdata(pdev, e_ctrl);
+	v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, e_ctrl);
+	return rc;
+free_soc:
+	kfree(soc_private);
+free_cci_client:
+	kfree(e_ctrl->io_master_info.cci_client);
+free_e_ctrl:
+	kfree(e_ctrl);
+	return rc;
+}
+
+static int cam_eeprom_platform_driver_remove(struct platform_device *pdev)
+{
+	struct cam_eeprom_ctrl_t  *e_ctrl;
+
+	e_ctrl = platform_get_drvdata(pdev);
+	if (!e_ctrl) {
+		CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
+		return -EINVAL;
+	}
+
+	kfree(e_ctrl->soc_info.soc_private);
+	kfree(e_ctrl->io_master_info.cci_client);
+	kfree(e_ctrl);
+	return 0;
+}
+
+static const struct of_device_id cam_eeprom_dt_match[] = {
+	{ .compatible = "qcom,eeprom" },
+	{ }
+};
+
+
+MODULE_DEVICE_TABLE(of, cam_eeprom_dt_match);
+
+static struct platform_driver cam_eeprom_platform_driver = {
+	.driver = {
+		.name = "qcom,eeprom",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_eeprom_dt_match,
+	},
+	.probe = cam_eeprom_platform_driver_probe,
+	.remove = cam_eeprom_platform_driver_remove,
+};
+
+static const struct i2c_device_id cam_eeprom_i2c_id[] = {
+	{ "msm_eeprom", (kernel_ulong_t)NULL},
+	{ }
+};
+
+static struct i2c_driver cam_eeprom_i2c_driver = {
+	.id_table = cam_eeprom_i2c_id,
+	.probe  = cam_eeprom_i2c_driver_probe,
+	.remove = cam_eeprom_i2c_driver_remove,
+	.driver = {
+		.name = "msm_eeprom",
+	},
+};
+
+static struct spi_driver cam_eeprom_spi_driver = {
+	.driver = {
+		.name = "qcom_eeprom",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_eeprom_dt_match,
+	},
+	.probe = cam_eeprom_spi_driver_probe,
+	.remove = cam_eeprom_spi_driver_remove,
+};
+static int __init cam_eeprom_driver_init(void)
+{
+	int rc = 0;
+
+	rc = platform_driver_register(&cam_eeprom_platform_driver);
+	if (rc < 0) {
+		CAM_ERR(CAM_EEPROM, "platform_driver_register failed rc = %d",
+			rc);
+		return rc;
+	}
+
+	rc = spi_register_driver(&cam_eeprom_spi_driver);
+	if (rc < 0) {
+		CAM_ERR(CAM_EEPROM, "spi_register_driver failed rc = %d", rc);
+		return rc;
+	}
+
+	rc = i2c_add_driver(&cam_eeprom_i2c_driver);
+	if (rc < 0) {
+		CAM_ERR(CAM_EEPROM, "i2c_add_driver failed rc = %d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static void __exit cam_eeprom_driver_exit(void)
+{
+	platform_driver_unregister(&cam_eeprom_platform_driver);
+	spi_unregister_driver(&cam_eeprom_spi_driver);
+	i2c_del_driver(&cam_eeprom_i2c_driver);
+}
+
+module_init(cam_eeprom_driver_init);
+module_exit(cam_eeprom_driver_exit);
+MODULE_DESCRIPTION("CAM EEPROM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
new file mode 100644
index 0000000..a98bf00
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
@@ -0,0 +1,183 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_EEPROM_DEV_H_
+#define _CAM_EEPROM_DEV_H_
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/cam_sensor.h>
+#include <cam_sensor_i2c.h>
+#include <cam_sensor_spi.h>
+#include <cam_sensor_io.h>
+#include <cam_cci_dev.h>
+#include <cam_req_mgr_util.h>
+#include <cam_req_mgr_interface.h>
+#include <cam_mem_mgr.h>
+#include <cam_subdev.h>
+#include "cam_soc_util.h"
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+	static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+#define PROPERTY_MAXSIZE 32
+
+#define MSM_EEPROM_MEMORY_MAP_MAX_SIZE         80
+#define MSM_EEPROM_MAX_MEM_MAP_CNT             8
+#define MSM_EEPROM_MEM_MAP_PROPERTIES_CNT      8
+
+/**
+ * struct cam_eeprom_map_t - eeprom map
+ * @data_type       :   Data type
+ * @addr_type       :   Address type
+ * @addr            :   Address
+ * @data            :   data
+ * @delay           :   Delay
+ *
+ */
+struct cam_eeprom_map_t {
+	uint32_t valid_size;
+	uint32_t addr;
+	uint32_t addr_type;
+	uint32_t data;
+	uint32_t data_type;
+	uint32_t delay;
+};
+
+/**
+ * struct cam_eeprom_memory_map_t - eeprom memory map types
+ * @page            :   page memory
+ * @pageen          :   pageen memory
+ * @poll            :   poll memory
+ * @mem             :   mem
+ * @saddr           :   slave addr
+ *
+ */
+struct cam_eeprom_memory_map_t {
+	struct cam_eeprom_map_t page;
+	struct cam_eeprom_map_t pageen;
+	struct cam_eeprom_map_t poll;
+	struct cam_eeprom_map_t mem;
+	uint32_t saddr;
+};
+
+/**
+ * struct cam_eeprom_memory_block_t - eeprom mem block info
+ * @map             :   eeprom memory map
+ * @num_map         :   number of map blocks
+ * @mapdata         :   map data
+ * @cmd_type        :   size of total mapdata
+ *
+ */
+struct cam_eeprom_memory_block_t {
+	struct cam_eeprom_memory_map_t *map;
+	uint32_t num_map;
+	uint8_t *mapdata;
+	uint32_t num_data;
+};
+
+/**
+ * struct cam_eeprom_cmm_t - camera multimodule
+ * @cmm_support     :   cmm support flag
+ * @cmm_compression :   cmm compression flag
+ * @cmm_offset      :   cmm data start offset
+ * @cmm_size        :   cmm data size
+ *
+ */
+struct cam_eeprom_cmm_t {
+	uint32_t cmm_support;
+	uint32_t cmm_compression;
+	uint32_t cmm_offset;
+	uint32_t cmm_size;
+};
+
+/**
+ * struct cam_eeprom_i2c_info_t - I2C info
+ * @slave_addr      :   slave address
+ * @i2c_freq_mode   :   i2c frequency mode
+ *
+ */
+struct cam_eeprom_i2c_info_t {
+	uint16_t slave_addr;
+	uint8_t i2c_freq_mode;
+};
+
+/**
+ * struct cam_eeprom_soc_private - eeprom soc private data structure
+ * @eeprom_name     :   eeprom name
+ * @i2c_info        :   i2c info structure
+ * @power_info      :   eeprom power info
+ * @cmm_data        :   cmm data
+ *
+ */
+struct cam_eeprom_soc_private {
+	const char *eeprom_name;
+	struct cam_eeprom_i2c_info_t i2c_info;
+	struct cam_sensor_power_ctrl_t power_info;
+	struct cam_eeprom_cmm_t cmm_data;
+};
+
+/**
+ * struct cam_eeprom_intf_params - bridge interface params
+ * @device_hdl   : Device Handle
+ * @session_hdl  : Session Handle
+ * @ops          : KMD operations
+ * @crm_cb       : Callback API pointers
+ */
+struct cam_eeprom_intf_params {
+	int32_t device_hdl;
+	int32_t session_hdl;
+	int32_t link_hdl;
+	struct cam_req_mgr_kmd_ops ops;
+	struct cam_req_mgr_crm_cb *crm_cb;
+};
+
+/**
+ * struct cam_cmd_conditional_wait - Conditional wait command
+ * @pdev            :   platform device
+ * @spi             :   spi device
+ * @eeprom_mutex    :   eeprom mutex
+ * @soc_info        :   eeprom soc related info
+ * @io_master_info  :   Information about the communication master
+ * @gpio_num_info   :   gpio info
+ * @cci_i2c_master  :   I2C structure
+ * @v4l2_dev_str    :   V4L2 device structure
+ * @bridge_intf     :   bridge interface params
+ * @subdev_id       :   subdev id
+ * @userspace_probe :   flag indicates userspace or kernel probe
+ * @cal_data        :   Calibration data
+ * @device_name     :   Device name
+ *
+ */
+struct cam_eeprom_ctrl_t {
+	struct platform_device *pdev;
+	struct spi_device *spi;
+	struct mutex eeprom_mutex;
+	struct cam_hw_soc_info soc_info;
+	struct camera_io_master io_master_info;
+	struct msm_camera_gpio_num_info *gpio_num_info;
+	enum cci_i2c_master_t cci_i2c_master;
+	struct cam_subdev v4l2_dev_str;
+	struct cam_eeprom_intf_params bridge_intf;
+	enum msm_camera_device_type_t eeprom_device_type;
+	uint32_t subdev_id;
+	bool userspace_probe;
+	struct cam_eeprom_memory_block_t cal_data;
+	char device_name[20];
+};
+
+int32_t cam_eeprom_update_i2c_info(struct cam_eeprom_ctrl_t *e_ctrl,
+	struct cam_eeprom_i2c_info_t *i2c_info);
+
+#endif /*_CAM_EEPROM_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
new file mode 100644
index 0000000..84e723f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
@@ -0,0 +1,323 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_sensor_util.h>
+#include <cam_sensor_io.h>
+#include <cam_req_mgr_util.h>
+
+#include "cam_eeprom_soc.h"
+#include "cam_debug_util.h"
+
+/*
+ * cam_eeprom_parse_memory_map() - parse memory map in device node
+ * @of:         device node
+ * @data:       memory block for output
+ *
+ * This functions parses @of to fill @data.  It allocates map itself, parses
+ * the @of node, calculate total data length, and allocates required buffer.
+ * It only fills the map, but does not perform actual reading.
+ */
+int cam_eeprom_parse_dt_memory_map(struct device_node *node,
+	struct cam_eeprom_memory_block_t *data)
+{
+	int       i, rc = 0;
+	char      property[PROPERTY_MAXSIZE];
+	uint32_t  count = MSM_EEPROM_MEM_MAP_PROPERTIES_CNT;
+	struct    cam_eeprom_memory_map_t *map;
+
+	snprintf(property, PROPERTY_MAXSIZE, "num-blocks");
+	rc = of_property_read_u32(node, property, &data->num_map);
+	if (rc < 0) {
+		CAM_ERR(CAM_EEPROM, "failed: num-blocks not available rc %d",
+			rc);
+		return rc;
+	}
+
+	map = kzalloc((sizeof(*map) * data->num_map), GFP_KERNEL);
+	if (!map) {
+		rc = -ENOMEM;
+		return rc;
+	}
+	data->map = map;
+
+	for (i = 0; i < data->num_map; i++) {
+		snprintf(property, PROPERTY_MAXSIZE, "page%d", i);
+		rc = of_property_read_u32_array(node, property,
+			(uint32_t *) &map[i].page, count);
+		if (rc < 0) {
+			CAM_ERR(CAM_EEPROM, "failed: page not available rc %d",
+				rc);
+			goto ERROR;
+		}
+
+		snprintf(property, PROPERTY_MAXSIZE, "pageen%d", i);
+		rc = of_property_read_u32_array(node, property,
+			(uint32_t *) &map[i].pageen, count);
+		if (rc < 0)
+			CAM_DBG(CAM_EEPROM, "pageen not needed");
+
+		snprintf(property, PROPERTY_MAXSIZE, "saddr%d", i);
+		rc = of_property_read_u32_array(node, property,
+			(uint32_t *) &map[i].saddr, 1);
+		if (rc < 0)
+			CAM_DBG(CAM_EEPROM, "saddr not needed - block %d", i);
+
+		snprintf(property, PROPERTY_MAXSIZE, "poll%d", i);
+		rc = of_property_read_u32_array(node, property,
+			(uint32_t *) &map[i].poll, count);
+		if (rc < 0) {
+			CAM_ERR(CAM_EEPROM, "failed: poll not available rc %d",
+				rc);
+			goto ERROR;
+		}
+
+		snprintf(property, PROPERTY_MAXSIZE, "mem%d", i);
+		rc = of_property_read_u32_array(node, property,
+			(uint32_t *) &map[i].mem, count);
+		if (rc < 0) {
+			CAM_ERR(CAM_EEPROM, "failed: mem not available rc %d",
+				rc);
+			goto ERROR;
+		}
+		data->num_data += map[i].mem.valid_size;
+	}
+
+	data->mapdata = kzalloc(data->num_data, GFP_KERNEL);
+	if (!data->mapdata) {
+		rc = -ENOMEM;
+		goto ERROR;
+	}
+	return rc;
+
+ERROR:
+	kfree(data->map);
+	memset(data, 0, sizeof(*data));
+	return rc;
+}
+
+/**
+ * @e_ctrl: ctrl structure
+ *
+ * Parses eeprom dt
+ */
+static int cam_eeprom_get_dt_data(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+	int                             rc = 0;
+	struct cam_hw_soc_info         *soc_info = &e_ctrl->soc_info;
+	struct cam_eeprom_soc_private  *soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+	struct device_node             *of_node = NULL;
+
+	if (e_ctrl->eeprom_device_type == MSM_CAMERA_SPI_DEVICE)
+		of_node = e_ctrl->io_master_info.
+			spi_client->spi_master->dev.of_node;
+	else if (e_ctrl->eeprom_device_type == MSM_CAMERA_PLATFORM_DEVICE)
+		of_node = soc_info->pdev->dev.of_node;
+
+	if (!of_node) {
+		CAM_ERR(CAM_EEPROM, "of_node is NULL, device type %d",
+			e_ctrl->eeprom_device_type);
+		return -EINVAL;
+	}
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_EEPROM, "cam_soc_util_get_dt_properties rc %d",
+			rc);
+		return rc;
+	}
+
+	if (e_ctrl->userspace_probe == false) {
+		rc = cam_get_dt_power_setting_data(of_node,
+			soc_info, power_info);
+		if (rc < 0) {
+			CAM_ERR(CAM_EEPROM, "failed in getting power settings");
+			return rc;
+		}
+	}
+
+	if (!soc_info->gpio_data) {
+		CAM_INFO(CAM_EEPROM, "No GPIO found");
+		return 0;
+	}
+
+	if (!soc_info->gpio_data->cam_gpio_common_tbl_size) {
+		CAM_INFO(CAM_EEPROM, "No GPIO found");
+		return -EINVAL;
+	}
+
+	rc = cam_sensor_util_init_gpio_pin_tbl(soc_info,
+		&power_info->gpio_num_info);
+	if ((rc < 0) || (!power_info->gpio_num_info)) {
+		CAM_ERR(CAM_EEPROM, "No/Error EEPROM GPIOs");
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+/**
+ * @eb_info: eeprom private data structure
+ * @of_node: eeprom device node
+ *
+ * This function parses the eeprom dt to get the MM data
+ */
+static int cam_eeprom_cmm_dts(struct cam_eeprom_soc_private *eb_info,
+	struct device_node *of_node)
+{
+	int                      rc = 0;
+	struct cam_eeprom_cmm_t *cmm_data = &eb_info->cmm_data;
+
+	cmm_data->cmm_support =
+		of_property_read_bool(of_node, "cmm-data-support");
+	if (!cmm_data->cmm_support) {
+		CAM_DBG(CAM_EEPROM, "No cmm support");
+		return 0;
+	}
+
+	cmm_data->cmm_compression =
+		of_property_read_bool(of_node, "cmm-data-compressed");
+
+	rc = of_property_read_u32(of_node, "cmm-data-offset",
+		&cmm_data->cmm_offset);
+	if (rc < 0)
+		CAM_DBG(CAM_EEPROM, "No MM offset data rc %d", rc);
+
+	rc = of_property_read_u32(of_node, "cmm-data-size",
+		&cmm_data->cmm_size);
+	if (rc < 0)
+		CAM_DBG(CAM_EEPROM, "No MM size data rc %d", rc);
+
+	CAM_DBG(CAM_EEPROM, "cmm_compr %d, cmm_offset %d, cmm_size %d",
+		cmm_data->cmm_compression, cmm_data->cmm_offset,
+		cmm_data->cmm_size);
+	return 0;
+}
+
+/**
+ * @e_ctrl: ctrl structure
+ *
+ * This function is called from cam_eeprom_spi_driver_probe, it parses
+ * the eeprom dt node and decides for userspace or kernel probe.
+ */
+int cam_eeprom_spi_driver_soc_init(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+	int                             rc = 0;
+	struct cam_eeprom_soc_private  *soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+
+	rc = of_property_read_u32(e_ctrl->spi->dev.of_node, "cell-index",
+		&e_ctrl->subdev_id);
+	if (rc < 0) {
+		CAM_ERR(CAM_EEPROM, "failed rc %d", rc);
+		return rc;
+	}
+	rc = of_property_read_string(e_ctrl->spi->dev.of_node,
+		"eeprom-name", &soc_private->eeprom_name);
+	if (rc < 0) {
+		CAM_ERR(CAM_EEPROM, "failed rc %d", rc);
+		e_ctrl->userspace_probe = true;
+	}
+
+	CAM_DBG(CAM_EEPROM, "eeprom-name %s, rc %d", soc_private->eeprom_name,
+		rc);
+	rc = cam_eeprom_cmm_dts(soc_private,
+		e_ctrl->io_master_info.spi_client->spi_master->dev.of_node);
+	if (rc < 0)
+		CAM_DBG(CAM_EEPROM, "MM data not available rc %d", rc);
+	rc = cam_eeprom_get_dt_data(e_ctrl);
+	if (rc < 0)
+		CAM_DBG(CAM_EEPROM, "failed: eeprom get dt data rc %d", rc);
+
+	return rc;
+}
+
+/**
+ * @e_ctrl: ctrl structure
+ *
+ * This function is called from cam_eeprom_platform_driver_probe, it parses
+ * the eeprom dt node and decides for userspace or kernel probe.
+ */
+int cam_eeprom_platform_driver_soc_init(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+	int                             rc = 0;
+	struct cam_hw_soc_info         *soc_info = &e_ctrl->soc_info;
+	struct device_node             *of_node = NULL;
+	struct platform_device         *pdev = NULL;
+	struct cam_eeprom_soc_private  *soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	uint32_t                        temp;
+
+	if (!soc_info->pdev) {
+		CAM_ERR(CAM_EEPROM, "Error:soc_info is not initialized");
+		return -EINVAL;
+	}
+
+	pdev = soc_info->pdev;
+	of_node = pdev->dev.of_node;
+	if (!of_node) {
+		CAM_ERR(CAM_EEPROM, "dev.of_node NULL");
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32(of_node, "cell-index",
+		&e_ctrl->subdev_id);
+	if (rc < 0) {
+		CAM_ERR(CAM_EEPROM, "failed rc %d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(of_node, "cci-master",
+		&e_ctrl->cci_i2c_master);
+	if (rc < 0) {
+		CAM_DBG(CAM_EEPROM, "failed rc %d", rc);
+		return rc;
+	}
+
+	rc = of_property_read_string(of_node, "eeprom-name",
+		&soc_private->eeprom_name);
+	if (rc < 0) {
+		CAM_DBG(CAM_EEPROM, "kernel probe is not enabled");
+		e_ctrl->userspace_probe = true;
+	}
+
+	rc = cam_eeprom_get_dt_data(e_ctrl);
+	if (rc < 0)
+		CAM_DBG(CAM_EEPROM, "failed: eeprom get dt data rc %d", rc);
+
+	if (e_ctrl->userspace_probe == false) {
+		rc = of_property_read_u32(of_node, "slave-addr", &temp);
+		if (rc < 0)
+			CAM_DBG(CAM_EEPROM, "failed: no slave-addr rc %d", rc);
+
+		soc_private->i2c_info.slave_addr = temp;
+
+		rc = of_property_read_u32(of_node, "i2c-freq-mode", &temp);
+		soc_private->i2c_info.i2c_freq_mode = temp;
+		if (rc < 0) {
+			CAM_ERR(CAM_EEPROM,
+				"i2c-freq-mode read fail %d", rc);
+			soc_private->i2c_info.i2c_freq_mode = 0;
+		}
+		if (soc_private->i2c_info.i2c_freq_mode	>= I2C_MAX_MODES) {
+			CAM_ERR(CAM_EEPROM, "invalid i2c_freq_mode = %d",
+				soc_private->i2c_info.i2c_freq_mode);
+			soc_private->i2c_info.i2c_freq_mode = 0;
+		}
+		CAM_DBG(CAM_EEPROM, "slave-addr = 0x%X",
+			soc_private->i2c_info.slave_addr);
+	}
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h
new file mode 100644
index 0000000..02e59d7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h
@@ -0,0 +1,22 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_EEPROM_SOC_H_
+#define _CAM_EEPROM_SOC_H_
+
+#include "cam_eeprom_dev.h"
+
+int cam_eeprom_parse_dt_memory_map(struct device_node *of,
+	struct cam_eeprom_memory_block_t *data);
+
+int cam_eeprom_platform_driver_soc_init(struct cam_eeprom_ctrl_t *e_ctrl);
+int cam_eeprom_spi_driver_soc_init(struct cam_eeprom_ctrl_t *e_ctrl);
+#endif/* _CAM_EEPROM_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
new file mode 100644
index 0000000..9aab0e4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_flash_dev.o cam_flash_core.o cam_flash_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
new file mode 100644
index 0000000..7af7efc
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
@@ -0,0 +1,741 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+
+#include "cam_sensor_cmn_header.h"
+#include "cam_flash_core.h"
+
+int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
+	enum cam_flash_state state)
+{
+	int rc = 0;
+
+	if (!(flash_ctrl->switch_trigger)) {
+		CAM_ERR(CAM_FLASH, "Invalid argument");
+		return -EINVAL;
+	}
+
+	if ((state == CAM_FLASH_STATE_INIT) &&
+		(flash_ctrl->is_regulator_enabled == false)) {
+		rc = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
+			ENABLE_REGULATOR, NULL);
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "regulator enable failed rc = %d",
+				rc);
+			return rc;
+		}
+		flash_ctrl->is_regulator_enabled = true;
+	} else if ((state == CAM_FLASH_STATE_RELEASE) &&
+		(flash_ctrl->is_regulator_enabled == true)) {
+		rc = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
+			DISABLE_REGULATOR, NULL);
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "regulator disable failed rc = %d",
+				rc);
+			return rc;
+		}
+		flash_ctrl->is_regulator_enabled = false;
+	} else {
+		CAM_ERR(CAM_FLASH, "Wrong Flash State : %d",
+			flash_ctrl->flash_state);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int cam_flash_ops(struct cam_flash_ctrl *flash_ctrl,
+	struct cam_flash_frame_setting *flash_data, enum camera_flash_opcode op)
+{
+	uint32_t curr = 0, max_current = 0;
+	struct cam_flash_private_soc *soc_private = NULL;
+	int i = 0;
+
+	if (!flash_ctrl || !flash_data) {
+		CAM_ERR(CAM_FLASH, "Fctrl or Data NULL");
+		return -EINVAL;
+	}
+
+	soc_private = (struct cam_flash_private_soc *)
+		&flash_ctrl->soc_info.soc_private;
+
+	if (op == CAMERA_SENSOR_FLASH_OP_FIRELOW) {
+		for (i = 0; i < flash_ctrl->torch_num_sources; i++) {
+			if (flash_ctrl->torch_trigger[i]) {
+				max_current = soc_private->torch_max_current[i];
+
+				if (flash_data->led_current_ma[i] <=
+					max_current)
+					curr = flash_data->led_current_ma[i];
+				else
+					curr = soc_private->torch_op_current[i];
+
+				CAM_DBG(CAM_FLASH,
+					"Led_Current[%d] = %d", i, curr);
+				led_trigger_event(flash_ctrl->torch_trigger[i],
+					curr);
+			}
+		}
+	} else if (op == CAMERA_SENSOR_FLASH_OP_FIREHIGH) {
+		for (i = 0; i < flash_ctrl->flash_num_sources; i++) {
+			if (flash_ctrl->flash_trigger[i]) {
+				max_current = soc_private->flash_max_current[i];
+
+				if (flash_data->led_current_ma[i] <=
+					max_current)
+					curr = flash_data->led_current_ma[i];
+				else
+					curr = soc_private->flash_op_current[i];
+
+				CAM_DBG(CAM_FLASH, "LED flash_current[%d]: %d",
+					i, curr);
+				led_trigger_event(flash_ctrl->flash_trigger[i],
+					curr);
+			}
+		}
+	} else {
+		CAM_ERR(CAM_FLASH, "Wrong Operation: %d", op);
+		return -EINVAL;
+	}
+
+	if (flash_ctrl->switch_trigger)
+		led_trigger_event(flash_ctrl->switch_trigger, LED_SWITCH_ON);
+
+	return 0;
+}
+
+int cam_flash_off(struct cam_flash_ctrl *flash_ctrl)
+{
+	int i = 0;
+
+	if (!flash_ctrl) {
+		CAM_ERR(CAM_FLASH, "Flash control Null");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < flash_ctrl->flash_num_sources; i++)
+		if (flash_ctrl->flash_trigger[i])
+			led_trigger_event(flash_ctrl->flash_trigger[i],
+				LED_OFF);
+
+	for (i = 0; i < flash_ctrl->torch_num_sources; i++)
+		if (flash_ctrl->torch_trigger[i])
+			led_trigger_event(flash_ctrl->torch_trigger[i],
+				LED_OFF);
+
+	if (flash_ctrl->switch_trigger)
+		led_trigger_event(flash_ctrl->switch_trigger,
+			LED_SWITCH_OFF);
+
+	return 0;
+}
+
+static int cam_flash_low(
+	struct cam_flash_ctrl *flash_ctrl,
+	struct cam_flash_frame_setting *flash_data)
+{
+	int i = 0, rc = 0;
+
+	if (!flash_data) {
+		CAM_ERR(CAM_FLASH, "Flash Data Null");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < flash_ctrl->flash_num_sources; i++)
+		if (flash_ctrl->flash_trigger[i])
+			led_trigger_event(flash_ctrl->flash_trigger[i],
+				LED_OFF);
+
+	rc = cam_flash_ops(flash_ctrl, flash_data,
+		CAMERA_SENSOR_FLASH_OP_FIRELOW);
+	if (rc)
+		CAM_ERR(CAM_FLASH, "Fire Torch failed: %d", rc);
+
+	return rc;
+}
+
+static int cam_flash_high(
+	struct cam_flash_ctrl *flash_ctrl,
+	struct cam_flash_frame_setting *flash_data)
+{
+	int i = 0, rc = 0;
+
+	if (!flash_data) {
+		CAM_ERR(CAM_FLASH, "Flash Data Null");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < flash_ctrl->torch_num_sources; i++)
+		if (flash_ctrl->torch_trigger[i])
+			led_trigger_event(flash_ctrl->torch_trigger[i],
+				LED_OFF);
+
+	rc = cam_flash_ops(flash_ctrl, flash_data,
+		CAMERA_SENSOR_FLASH_OP_FIREHIGH);
+	if (rc)
+		CAM_ERR(CAM_FLASH, "Fire Flash Failed: %d", rc);
+
+	return rc;
+}
+
+static int delete_req(struct cam_flash_ctrl *fctrl, uint64_t req_id)
+{
+	int i = 0;
+	int frame_offset = 0;
+	struct cam_flash_frame_setting *flash_data = NULL;
+
+	if (req_id == 0) {
+		flash_data = &fctrl->nrt_info;
+		if ((fctrl->nrt_info.cmn_attr.cmd_type ==
+			CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) ||
+			(fctrl->nrt_info.cmn_attr.cmd_type ==
+			CAMERA_SENSOR_FLASH_CMD_TYPE_RER)) {
+			flash_data->cmn_attr.is_settings_valid = false;
+			for (i = 0; i < flash_data->cmn_attr.count; i++)
+				flash_data->led_current_ma[i] = 0;
+		} else {
+			fctrl->flash_init_setting.cmn_attr.
+				is_settings_valid = false;
+		}
+	} else {
+		frame_offset = (req_id + MAX_PER_FRAME_ARRAY -
+			CAM_FLASH_PIPELINE_DELAY) % 8;
+		flash_data = &fctrl->per_frame[frame_offset];
+		if (req_id > flash_data->cmn_attr.request_id) {
+			flash_data->cmn_attr.request_id = 0;
+			flash_data->cmn_attr.is_settings_valid = false;
+			for (i = 0; i < flash_data->cmn_attr.count; i++)
+				flash_data->led_current_ma[i] = 0;
+		}
+	}
+
+	return 0;
+}
+
+int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl,
+	uint64_t req_id)
+{
+	int rc = 0, i = 0;
+	int frame_offset = 0;
+	uint16_t num_iterations;
+	struct cam_flash_frame_setting *flash_data = NULL;
+
+	if (req_id == 0) {
+		if (fctrl->nrt_info.cmn_attr.cmd_type ==
+			CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) {
+			flash_data = &fctrl->nrt_info;
+			if (flash_data->opcode ==
+				CAMERA_SENSOR_FLASH_OP_FIRELOW) {
+				if (!(fctrl->is_regulator_enabled)) {
+					rc = cam_flash_prepare(fctrl,
+						CAM_FLASH_STATE_INIT);
+					if (rc) {
+						CAM_ERR(CAM_FLASH,
+							"Reg Enable Failed %d",
+							rc);
+						goto nrt_del_req;
+					}
+					fctrl->flash_state =
+						CAM_FLASH_STATE_INIT;
+					rc = cam_flash_low(fctrl, flash_data);
+					if (rc) {
+						CAM_ERR(CAM_FLASH,
+							"Torch ON failed : %d",
+							rc);
+						goto nrt_del_req;
+					}
+					fctrl->flash_state =
+						CAM_FLASH_STATE_LOW;
+				}
+			} else if (flash_data->opcode ==
+				CAMERA_SENSOR_FLASH_OP_OFF) {
+				if (fctrl->flash_state !=
+					CAM_FLASH_STATE_INIT) {
+					rc = cam_flash_off(fctrl);
+					if (rc)
+						CAM_ERR(CAM_FLASH,
+							"LED off failed: %d",
+							rc);
+				}
+
+				rc = cam_flash_prepare(fctrl,
+					CAM_FLASH_STATE_RELEASE);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+						"Regulator Disable failed %d",
+						rc);
+					goto nrt_del_req;
+				}
+
+				fctrl->flash_state =
+					CAM_FLASH_STATE_RELEASE;
+				fctrl->is_regulator_enabled = false;
+			}
+		} else if (fctrl->nrt_info.cmn_attr.cmd_type ==
+			CAMERA_SENSOR_FLASH_CMD_TYPE_RER) {
+			flash_data = &fctrl->nrt_info;
+
+			if (fctrl->flash_state != CAM_FLASH_STATE_INIT) {
+				rc = cam_flash_off(fctrl);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+						"Flash off failed: %d",
+						rc);
+				} else {
+					fctrl->flash_state =
+						CAM_FLASH_STATE_INIT;
+				}
+			}
+
+			num_iterations = flash_data->num_iterations;
+			for (i = 0; i < num_iterations; i++) {
+				/* Turn On Torch */
+				if (fctrl->flash_state ==
+					CAM_FLASH_STATE_INIT) {
+					rc = cam_flash_low(fctrl, flash_data);
+					if (rc) {
+						CAM_ERR(CAM_FLASH,
+							"Fire Torch Failed");
+						goto nrt_del_req;
+					}
+					fctrl->flash_state =
+						CAM_FLASH_STATE_LOW;
+				}
+				usleep_range(
+				flash_data->led_on_delay_ms * 1000,
+				flash_data->led_on_delay_ms * 1000 + 100);
+
+				/* Turn Off Torch */
+				rc = cam_flash_off(fctrl);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+						"Flash off failed: %d",
+						rc);
+					continue;
+				}
+				fctrl->flash_state = CAM_FLASH_STATE_INIT;
+				usleep_range(
+				flash_data->led_off_delay_ms * 1000,
+				flash_data->led_off_delay_ms * 1000 + 100);
+			}
+		}
+	} else {
+		frame_offset = req_id % MAX_PER_FRAME_ARRAY;
+		flash_data = &fctrl->per_frame[frame_offset];
+
+		if ((flash_data->opcode == CAMERA_SENSOR_FLASH_OP_FIREHIGH) &&
+			(flash_data->cmn_attr.is_settings_valid)) {
+			/* Turn On Flash */
+			if (fctrl->flash_state == CAM_FLASH_STATE_INIT) {
+				rc = cam_flash_high(fctrl, flash_data);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+						"Flash ON failed: rc= %d",
+						rc);
+					goto apply_setting_err;
+				}
+				fctrl->flash_state = CAM_FLASH_STATE_HIGH;
+			}
+		} else if ((flash_data->opcode ==
+			CAMERA_SENSOR_FLASH_OP_FIRELOW) &&
+			(flash_data->cmn_attr.is_settings_valid)) {
+			/* Turn Off Flash */
+			if (fctrl->flash_state == CAM_FLASH_STATE_INIT) {
+				rc = cam_flash_low(fctrl, flash_data);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+						"Torch ON failed: rc= %d",
+						rc);
+					goto apply_setting_err;
+				}
+				fctrl->flash_state = CAM_FLASH_STATE_LOW;
+			}
+		} else if ((flash_data->opcode == CAMERA_SENSOR_FLASH_OP_OFF) &&
+			(flash_data->cmn_attr.is_settings_valid)) {
+			if ((fctrl->flash_state != CAM_FLASH_STATE_RELEASE) ||
+				(fctrl->flash_state != CAM_FLASH_STATE_INIT)) {
+				rc = cam_flash_off(fctrl);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+						"Flash off failed %d", rc);
+				} else {
+					fctrl->flash_state =
+						CAM_FLASH_STATE_INIT;
+				}
+			}
+		} else {
+			CAM_ERR(CAM_FLASH, "Wrong opcode : %d",
+				flash_data->opcode);
+			rc = -EINVAL;
+			goto apply_setting_err;
+		}
+	}
+
+nrt_del_req:
+	delete_req(fctrl, req_id);
+apply_setting_err:
+	return rc;
+}
+
+int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
+{
+	int rc = 0, i = 0;
+	uint64_t generic_ptr;
+	uint32_t *cmd_buf =  NULL;
+	uint32_t *offset = NULL;
+	uint32_t frame_offset = 0;
+	size_t len_of_buffer;
+	struct cam_control *ioctl_ctrl = NULL;
+	struct cam_packet *csl_packet = NULL;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	struct common_header *cmn_hdr;
+	struct cam_config_dev_cmd config;
+	struct cam_req_mgr_add_request add_req;
+	struct cam_flash_init *cam_flash_info = NULL;
+	struct cam_flash_set_rer *flash_rer_info = NULL;
+	struct cam_flash_set_on_off *flash_operation_info = NULL;
+	struct cam_flash_query_curr *flash_query_info = NULL;
+
+	if (!fctrl || !arg) {
+		CAM_ERR(CAM_FLASH, "fctrl/arg is NULL");
+		return -EINVAL;
+	}
+	/* getting CSL Packet */
+	ioctl_ctrl = (struct cam_control *)arg;
+
+	if (copy_from_user((&config), (void __user *) ioctl_ctrl->handle,
+		sizeof(config))) {
+		CAM_ERR(CAM_FLASH, "Copy cmd handle from user failed");
+		rc = -EFAULT;
+		return rc;
+	}
+
+	rc = cam_mem_get_cpu_buf(config.packet_handle,
+		(uint64_t *)&generic_ptr, &len_of_buffer);
+	if (rc) {
+		CAM_ERR(CAM_FLASH, "Failed in getting the buffer : %d", rc);
+		return rc;
+	}
+
+	csl_packet = (struct cam_packet *)generic_ptr;
+
+	switch (csl_packet->header.op_code & 0xFFFFFF) {
+	case CAM_FLASH_PACKET_OPCODE_INIT: {
+		/* INIT packet*/
+		offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+			csl_packet->cmd_buf_offset);
+		fctrl->flash_init_setting.cmn_attr.request_id = 0;
+		fctrl->flash_init_setting.cmn_attr.is_settings_valid = true;
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+		rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
+			(uint64_t *)&generic_ptr, &len_of_buffer);
+		cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
+			cmd_desc->offset);
+		cam_flash_info = (struct cam_flash_init *)cmd_buf;
+
+		switch (cam_flash_info->cmd_type) {
+		case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT:
+			fctrl->flash_type = cam_flash_info->flash_type;
+			fctrl->is_regulator_enabled = false;
+			fctrl->nrt_info.cmn_attr.cmd_type =
+				CAMERA_SENSOR_FLASH_CMD_TYPE_INIT;
+			break;
+		default:
+			CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d",
+				cam_flash_info->cmd_type);
+			return -EINVAL;
+		}
+		break;
+	}
+	case CAM_FLASH_PACKET_OPCODE_SET_OPS: {
+		offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+			csl_packet->cmd_buf_offset);
+		frame_offset = csl_packet->header.request_id %
+			MAX_PER_FRAME_ARRAY;
+		fctrl->per_frame[frame_offset].cmn_attr.request_id =
+			csl_packet->header.request_id;
+		fctrl->per_frame[frame_offset].cmn_attr.is_settings_valid =
+			true;
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+		rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
+			(uint64_t *)&generic_ptr, &len_of_buffer);
+		cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
+			cmd_desc->offset);
+		cmn_hdr = (struct common_header *)cmd_buf;
+
+		switch (cmn_hdr->cmd_type) {
+		case CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE: {
+			CAM_DBG(CAM_FLASH,
+				"CAMERA_FLASH_CMD_TYPE_OPS case called");
+			flash_operation_info =
+				(struct cam_flash_set_on_off *) cmd_buf;
+			fctrl->per_frame[frame_offset].opcode =
+				flash_operation_info->opcode;
+			fctrl->per_frame[frame_offset].cmn_attr.count =
+				flash_operation_info->count;
+			for (i = 0; i < flash_operation_info->count; i++)
+				fctrl->per_frame[frame_offset].led_current_ma[i]
+					= flash_operation_info->
+					led_current_ma[i];
+			break;
+		}
+		default:
+			CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d",
+				cmn_hdr->cmd_type);
+			return -EINVAL;
+		}
+
+		break;
+	}
+	case CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS: {
+		offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+			csl_packet->cmd_buf_offset);
+		fctrl->nrt_info.cmn_attr.is_settings_valid = true;
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+		rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
+			(uint64_t *)&generic_ptr, &len_of_buffer);
+		cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
+			cmd_desc->offset);
+		cmn_hdr = (struct common_header *)cmd_buf;
+
+		switch (cmn_hdr->cmd_type) {
+		case CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET: {
+			CAM_DBG(CAM_FLASH, "Widget Flash Operation");
+			flash_operation_info =
+				(struct cam_flash_set_on_off *) cmd_buf;
+			fctrl->nrt_info.cmn_attr.count =
+				flash_operation_info->count;
+			fctrl->nrt_info.cmn_attr.request_id = 0;
+			fctrl->nrt_info.opcode =
+				flash_operation_info->opcode;
+			fctrl->nrt_info.cmn_attr.cmd_type =
+				CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET;
+
+			for (i = 0; i < flash_operation_info->count; i++)
+				fctrl->nrt_info.led_current_ma[i] =
+					flash_operation_info->led_current_ma[i];
+
+			mutex_lock(&fctrl->flash_wq_mutex);
+			rc = cam_flash_apply_setting(fctrl, 0);
+			if (rc)
+				CAM_ERR(CAM_FLASH, "Apply setting failed: %d",
+					rc);
+			mutex_unlock(&fctrl->flash_wq_mutex);
+			return rc;
+		}
+		case CAMERA_SENSOR_FLASH_CMD_TYPE_QUERYCURR: {
+			int query_curr_ma = 0;
+
+			flash_query_info =
+				(struct cam_flash_query_curr *)cmd_buf;
+
+			rc = qpnp_flash_led_prepare(fctrl->switch_trigger,
+				QUERY_MAX_CURRENT, &query_curr_ma);
+			CAM_DBG(CAM_FLASH, "query_curr_ma = %d",
+				query_curr_ma);
+			if (rc) {
+				CAM_ERR(CAM_FLASH,
+				"Query current failed with rc=%d", rc);
+				return rc;
+			}
+			flash_query_info->query_current_ma = query_curr_ma;
+			break;
+		}
+		case CAMERA_SENSOR_FLASH_CMD_TYPE_RER: {
+			rc = 0;
+			flash_rer_info = (struct cam_flash_set_rer *)cmd_buf;
+			fctrl->nrt_info.cmn_attr.cmd_type =
+				CAMERA_SENSOR_FLASH_CMD_TYPE_RER;
+			fctrl->nrt_info.opcode = flash_rer_info->opcode;
+			fctrl->nrt_info.cmn_attr.count = flash_rer_info->count;
+			fctrl->nrt_info.cmn_attr.request_id = 0;
+			fctrl->nrt_info.num_iterations =
+				flash_rer_info->num_iteration;
+			fctrl->nrt_info.led_on_delay_ms =
+				flash_rer_info->led_on_delay_ms;
+			fctrl->nrt_info.led_off_delay_ms =
+				flash_rer_info->led_off_delay_ms;
+
+			for (i = 0; i < flash_rer_info->count; i++)
+				fctrl->nrt_info.led_current_ma[i] =
+					flash_rer_info->led_current_ma[i];
+
+
+			mutex_lock(&fctrl->flash_wq_mutex);
+			rc = cam_flash_apply_setting(fctrl, 0);
+			if (rc)
+				CAM_ERR(CAM_FLASH, "apply_setting failed: %d",
+					rc);
+			mutex_unlock(&fctrl->flash_wq_mutex);
+			return rc;
+		}
+		default:
+			CAM_ERR(CAM_FLASH, "Wrong cmd_type : %d",
+				cmn_hdr->cmd_type);
+			return -EINVAL;
+		}
+
+		break;
+	}
+	case CAM_PKT_NOP_OPCODE: {
+		goto update_req_mgr;
+	}
+	default:
+		CAM_ERR(CAM_FLASH, "Wrong Opcode : %d",
+			(csl_packet->header.op_code & 0xFFFFFF));
+		return -EINVAL;
+	}
+update_req_mgr:
+	if (((csl_packet->header.op_code  & 0xFFFFF) ==
+		CAM_PKT_NOP_OPCODE) ||
+		((csl_packet->header.op_code & 0xFFFFF) ==
+		CAM_FLASH_PACKET_OPCODE_SET_OPS)) {
+		add_req.link_hdl = fctrl->bridge_intf.link_hdl;
+		add_req.req_id = csl_packet->header.request_id;
+		add_req.dev_hdl = fctrl->bridge_intf.device_hdl;
+		if (fctrl->bridge_intf.crm_cb &&
+			fctrl->bridge_intf.crm_cb->add_req)
+			fctrl->bridge_intf.crm_cb->add_req(&add_req);
+		CAM_DBG(CAM_FLASH, "add req to req_mgr= %lld", add_req.req_id);
+	}
+
+	return rc;
+}
+
+int cam_flash_publish_dev_info(struct cam_req_mgr_device_info *info)
+{
+	info->dev_id = CAM_REQ_MGR_DEVICE_FLASH;
+	strlcpy(info->name, CAM_FLASH_NAME, sizeof(info->name));
+	info->p_delay = CAM_FLASH_PIPELINE_DELAY;
+	return 0;
+}
+
+int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link)
+{
+	struct cam_flash_ctrl *fctrl = NULL;
+
+	if (!link)
+		return -EINVAL;
+
+	fctrl = (struct cam_flash_ctrl *)cam_get_device_priv(link->dev_hdl);
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, " Device data is NULL");
+		return -EINVAL;
+	}
+
+	if (link->link_enable) {
+		fctrl->bridge_intf.link_hdl = link->link_hdl;
+		fctrl->bridge_intf.crm_cb = link->crm_cb;
+	} else {
+		fctrl->bridge_intf.link_hdl = -1;
+		fctrl->bridge_intf.crm_cb = NULL;
+	}
+
+	return 0;
+}
+
+static int cam_flash_flush_nrt(struct cam_flash_ctrl *fctrl)
+{
+	int j = 0;
+	struct cam_flash_frame_setting *nrt_settings;
+
+	if (!fctrl)
+		return -EINVAL;
+
+	nrt_settings = &fctrl->nrt_info;
+
+	if (nrt_settings->cmn_attr.cmd_type ==
+		CAMERA_SENSOR_FLASH_CMD_TYPE_INIT) {
+		fctrl->flash_init_setting.cmn_attr.is_settings_valid = false;
+	} else if ((nrt_settings->cmn_attr.cmd_type ==
+		CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) ||
+		(nrt_settings->cmn_attr.cmd_type ==
+		CAMERA_SENSOR_FLASH_CMD_TYPE_RER)) {
+		fctrl->nrt_info.cmn_attr.is_settings_valid = false;
+		fctrl->nrt_info.cmn_attr.count = 0;
+		fctrl->nrt_info.num_iterations = 0;
+		fctrl->nrt_info.led_on_delay_ms = 0;
+		fctrl->nrt_info.led_off_delay_ms = 0;
+		for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
+			fctrl->nrt_info.led_current_ma[j] = 0;
+	}
+
+	return 0;
+}
+
+int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush)
+{
+	int rc = 0;
+	int i = 0, j = 0;
+	struct cam_flash_ctrl *fctrl = NULL;
+	int frame_offset = 0;
+
+	fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(flush->dev_hdl);
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, "Device data is NULL");
+		return -EINVAL;
+	}
+
+	if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+	/* flush all requests*/
+		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+			fctrl->per_frame[i].cmn_attr.request_id = 0;
+			fctrl->per_frame[i].cmn_attr.is_settings_valid = false;
+			fctrl->per_frame[i].cmn_attr.count = 0;
+			for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
+				fctrl->per_frame[i].led_current_ma[j] = 0;
+		}
+
+		rc = cam_flash_flush_nrt(fctrl);
+		if (rc)
+			CAM_ERR(CAM_FLASH, "NonRealTime flush error");
+	} else if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+	/* flush request with req_id*/
+		frame_offset = flush->req_id % MAX_PER_FRAME_ARRAY;
+		fctrl->per_frame[frame_offset].cmn_attr.request_id = 0;
+		fctrl->per_frame[frame_offset].cmn_attr.is_settings_valid =
+			false;
+		fctrl->per_frame[frame_offset].cmn_attr.count = 0;
+		for (i = 0; i < CAM_FLASH_MAX_LED_TRIGGERS; i++)
+			fctrl->per_frame[frame_offset].led_current_ma[i] = 0;
+	}
+	return rc;
+}
+
+int cam_flash_apply_request(struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_flash_ctrl *fctrl = NULL;
+
+	if (!apply)
+		return -EINVAL;
+
+	fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(apply->dev_hdl);
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, "Device data is NULL");
+		rc = -EINVAL;
+		goto free_resource;
+	}
+
+	if (!(apply->report_if_bubble)) {
+		mutex_lock(&fctrl->flash_wq_mutex);
+		rc = cam_flash_apply_setting(fctrl, apply->request_id);
+		if (rc)
+			CAM_ERR(CAM_FLASH, "apply_setting failed with rc=%d",
+				rc);
+		mutex_unlock(&fctrl->flash_wq_mutex);
+	}
+
+free_resource:
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
new file mode 100644
index 0000000..4b0cf8d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FLASH_CORE_H_
+#define _CAM_FLASH_CORE_H_
+
+#include <linux/leds-qpnp-flash.h>
+#include <media/cam_sensor.h>
+#include "cam_flash_dev.h"
+#include "cam_sync_api.h"
+#include "cam_mem_mgr_api.h"
+
+int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg);
+int cam_flash_publish_dev_info(struct cam_req_mgr_device_info *info);
+int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link);
+int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl, uint64_t req_id);
+int cam_flash_apply_request(struct cam_req_mgr_apply_request *apply);
+int cam_flash_process_evt(struct cam_req_mgr_link_evt_data *event_data);
+int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush);
+int cam_flash_off(struct cam_flash_ctrl *fctrl);
+int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
+	enum cam_flash_state state);
+
+#endif /*_CAM_FLASH_CORE_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
new file mode 100644
index 0000000..32df2f1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
@@ -0,0 +1,361 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include "cam_flash_dev.h"
+#include "cam_flash_soc.h"
+#include "cam_flash_core.h"
+
+static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl,
+		void *arg, struct cam_flash_private_soc *soc_private)
+{
+	int rc = 0;
+	int i = 0;
+	struct cam_control *cmd = (struct cam_control *)arg;
+
+	if (!fctrl || !arg) {
+		CAM_ERR(CAM_FLASH, "fctrl/arg is NULL with arg:%pK fctrl%pK",
+			fctrl, arg);
+		return -EINVAL;
+	}
+
+	mutex_lock(&(fctrl->flash_mutex));
+	switch (cmd->op_code) {
+	case CAM_ACQUIRE_DEV: {
+		struct cam_sensor_acquire_dev flash_acq_dev;
+		struct cam_create_dev_hdl bridge_params;
+
+		CAM_DBG(CAM_FLASH, "CAM_ACQUIRE_DEV");
+		if (fctrl->bridge_intf.device_hdl != -1) {
+			CAM_ERR(CAM_FLASH, "Device is already acquired");
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		rc = copy_from_user(&flash_acq_dev, (void __user *)cmd->handle,
+			sizeof(flash_acq_dev));
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "Failed Copying from User");
+			goto release_mutex;
+		}
+
+		bridge_params.session_hdl = flash_acq_dev.session_handle;
+		bridge_params.ops = &fctrl->bridge_intf.ops;
+		bridge_params.v4l2_sub_dev_flag = 0;
+		bridge_params.media_entity_flag = 0;
+		bridge_params.priv = fctrl;
+
+		flash_acq_dev.device_handle =
+			cam_create_device_hdl(&bridge_params);
+		fctrl->bridge_intf.device_hdl =
+			flash_acq_dev.device_handle;
+		fctrl->bridge_intf.session_hdl =
+			flash_acq_dev.session_handle;
+
+		rc = copy_to_user((void __user *) cmd->handle, &flash_acq_dev,
+			sizeof(struct cam_sensor_acquire_dev));
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "Failed Copy to User with rc = %d",
+				rc);
+			rc = -EFAULT;
+			goto release_mutex;
+		}
+		break;
+	}
+	case CAM_RELEASE_DEV: {
+		CAM_DBG(CAM_FLASH, "CAM_RELEASE_DEV");
+		if (fctrl->bridge_intf.device_hdl == -1) {
+			CAM_ERR(CAM_FLASH,
+				"Invalid Handle: Link Hdl: %d device hdl: %d",
+				fctrl->bridge_intf.device_hdl,
+				fctrl->bridge_intf.link_hdl);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+		rc = cam_destroy_device_hdl(fctrl->bridge_intf.device_hdl);
+		if (rc)
+			CAM_ERR(CAM_FLASH,
+				"Failed in destroying the device Handle rc= %d",
+				rc);
+		fctrl->bridge_intf.device_hdl = -1;
+		fctrl->bridge_intf.link_hdl = -1;
+		fctrl->bridge_intf.session_hdl = -1;
+		break;
+	}
+	case CAM_QUERY_CAP: {
+		struct cam_flash_query_cap_info flash_cap;
+
+		CAM_DBG(CAM_FLASH, "CAM_QUERY_CAP");
+		flash_cap.slot_info = fctrl->soc_info.index;
+		for (i = 0; i < fctrl->flash_num_sources; i++) {
+			flash_cap.max_current_flash[i] =
+				soc_private->flash_max_current[i];
+			flash_cap.max_duration_flash[i] =
+				soc_private->flash_max_duration[i];
+		}
+
+		for (i = 0; i < fctrl->torch_num_sources; i++)
+			flash_cap.max_current_torch[i] =
+				soc_private->torch_max_current[i];
+
+		if (copy_to_user((void __user *) cmd->handle, &flash_cap,
+			sizeof(struct cam_flash_query_cap_info))) {
+			CAM_ERR(CAM_FLASH, "Failed Copy to User");
+			rc = -EFAULT;
+			goto release_mutex;
+		}
+		break;
+	}
+	case CAM_START_DEV: {
+		CAM_DBG(CAM_FLASH, "CAM_START_DEV");
+		rc = cam_flash_prepare(fctrl, CAM_FLASH_STATE_INIT);
+		if (rc) {
+			CAM_ERR(CAM_FLASH,
+				"Enable Regulator Failed rc = %d", rc);
+			goto release_mutex;
+		}
+		fctrl->flash_state = CAM_FLASH_STATE_INIT;
+		rc = cam_flash_apply_setting(fctrl, 0);
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "cannot apply settings rc = %d", rc);
+			goto release_mutex;
+		}
+		break;
+	}
+	case CAM_STOP_DEV: {
+		CAM_DBG(CAM_FLASH, "CAM_STOP_DEV");
+		if (fctrl->flash_state != CAM_FLASH_STATE_INIT)
+			cam_flash_off(fctrl);
+
+		rc = cam_flash_prepare(fctrl, CAM_FLASH_STATE_RELEASE);
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "Disable Regulator Failed ret = %d",
+				rc);
+			goto release_mutex;
+		}
+		fctrl->flash_state = CAM_FLASH_STATE_RELEASE;
+
+		break;
+	}
+	case CAM_CONFIG_DEV: {
+		CAM_DBG(CAM_FLASH, "CAM_CONFIG_DEV");
+		rc = cam_flash_parser(fctrl, arg);
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "Failed Flash Config: rc=%d\n", rc);
+			goto release_mutex;
+		}
+		break;
+	}
+	default:
+		CAM_ERR(CAM_FLASH, "Invalid Opcode: %d", cmd->op_code);
+		rc = -EINVAL;
+	}
+
+release_mutex:
+	mutex_unlock(&(fctrl->flash_mutex));
+	return rc;
+}
+
+static const struct of_device_id cam_flash_dt_match[] = {
+	{.compatible = "qcom,camera-flash", .data = NULL},
+	{}
+};
+
+static long cam_flash_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	int rc = 0;
+	struct cam_flash_ctrl *fctrl = NULL;
+	struct cam_flash_private_soc *soc_private = NULL;
+
+	CAM_DBG(CAM_FLASH, "Enter");
+
+	fctrl = v4l2_get_subdevdata(sd);
+	soc_private = fctrl->soc_info.soc_private;
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL: {
+		rc = cam_flash_driver_cmd(fctrl, arg,
+			soc_private);
+		break;
+	}
+	default:
+		CAM_ERR(CAM_FLASH, "Invalid ioctl cmd type");
+		rc = -EINVAL;
+		break;
+	}
+
+	CAM_DBG(CAM_FLASH, "Exit");
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_flash_subdev_do_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	struct cam_control cmd_data;
+	int32_t rc = 0;
+
+	if (copy_from_user(&cmd_data, (void __user *)arg,
+		sizeof(cmd_data))) {
+		CAM_ERR(CAM_FLASH,
+			"Failed to copy from user_ptr=%pK size=%zu",
+			(void __user *)arg, sizeof(cmd_data));
+		return -EFAULT;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL: {
+		rc = cam_flash_subdev_ioctl(sd, cmd, &cmd_data);
+		if (rc)
+			CAM_ERR(CAM_FLASH, "cam_flash_ioctl failed");
+		break;
+	}
+	default:
+		CAM_ERR(CAM_FLASH, "Invalid compat ioctl cmd_type:%d",
+			cmd);
+		rc = -EINVAL;
+	}
+
+	if (!rc) {
+		if (copy_to_user((void __user *)arg, &cmd_data,
+			sizeof(cmd_data))) {
+			CAM_ERR(CAM_FLASH,
+				"Failed to copy to user_ptr=%pK size=%zu",
+				(void __user *)arg, sizeof(cmd_data));
+			rc = -EFAULT;
+		}
+	}
+
+	return rc;
+}
+#endif
+
+static int cam_flash_platform_remove(struct platform_device *pdev)
+{
+	struct cam_flash_ctrl *fctrl;
+
+	fctrl = platform_get_drvdata(pdev);
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, "Flash device is NULL");
+		return 0;
+	}
+
+	devm_kfree(&pdev->dev, fctrl);
+
+	return 0;
+}
+
+static struct v4l2_subdev_core_ops cam_flash_subdev_core_ops = {
+	.ioctl = cam_flash_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_flash_subdev_do_ioctl
+#endif
+};
+
+static struct v4l2_subdev_ops cam_flash_subdev_ops = {
+	.core = &cam_flash_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cam_flash_internal_ops;
+
+static int32_t cam_flash_platform_probe(struct platform_device *pdev)
+{
+	int32_t rc = 0;
+	struct cam_flash_ctrl *flash_ctrl = NULL;
+
+	CAM_DBG(CAM_FLASH, "Enter");
+	if (!pdev->dev.of_node) {
+		CAM_ERR(CAM_FLASH, "of_node NULL");
+		return -EINVAL;
+	}
+
+	flash_ctrl = kzalloc(sizeof(struct cam_flash_ctrl), GFP_KERNEL);
+	if (!flash_ctrl)
+		return -ENOMEM;
+
+	flash_ctrl->pdev = pdev;
+	flash_ctrl->soc_info.pdev = pdev;
+
+	rc = cam_flash_get_dt_data(flash_ctrl, &flash_ctrl->soc_info);
+	if (rc) {
+		CAM_ERR(CAM_FLASH, "cam_flash_get_dt_data failed with %d", rc);
+		kfree(flash_ctrl);
+		return -EINVAL;
+	}
+
+	flash_ctrl->v4l2_dev_str.internal_ops =
+		&cam_flash_internal_ops;
+	flash_ctrl->v4l2_dev_str.ops = &cam_flash_subdev_ops;
+	flash_ctrl->v4l2_dev_str.name = CAMX_FLASH_DEV_NAME;
+	flash_ctrl->v4l2_dev_str.sd_flags =
+		V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+	flash_ctrl->v4l2_dev_str.ent_function = CAM_FLASH_DEVICE_TYPE;
+	flash_ctrl->v4l2_dev_str.token = flash_ctrl;
+
+	rc = cam_register_subdev(&(flash_ctrl->v4l2_dev_str));
+	if (rc) {
+		CAM_ERR(CAM_FLASH, "Fail to create subdev with %d", rc);
+		goto free_resource;
+	}
+	flash_ctrl->bridge_intf.device_hdl = -1;
+	flash_ctrl->bridge_intf.ops.get_dev_info = cam_flash_publish_dev_info;
+	flash_ctrl->bridge_intf.ops.link_setup = cam_flash_establish_link;
+	flash_ctrl->bridge_intf.ops.apply_req = cam_flash_apply_request;
+	flash_ctrl->bridge_intf.ops.flush_req = cam_flash_flush_request;
+
+	platform_set_drvdata(pdev, flash_ctrl);
+	v4l2_set_subdevdata(&flash_ctrl->v4l2_dev_str.sd, flash_ctrl);
+
+	mutex_init(&(flash_ctrl->flash_mutex));
+	mutex_init(&(flash_ctrl->flash_wq_mutex));
+
+	CAM_DBG(CAM_FLASH, "Probe success");
+	return rc;
+free_resource:
+	kfree(flash_ctrl);
+	return rc;
+}
+
+MODULE_DEVICE_TABLE(of, cam_flash_dt_match);
+
+static struct platform_driver cam_flash_platform_driver = {
+	.probe = cam_flash_platform_probe,
+	.remove = cam_flash_platform_remove,
+	.driver = {
+		.name = "CAM-FLASH-DRIVER",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_flash_dt_match,
+	},
+};
+
+static int __init cam_flash_init_module(void)
+{
+	int32_t rc = 0;
+
+	rc = platform_driver_register(&cam_flash_platform_driver);
+	if (rc)
+		CAM_ERR(CAM_FLASH, "platform probe for flash failed");
+
+	return rc;
+}
+
+static void __exit cam_flash_exit_module(void)
+{
+	platform_driver_unregister(&cam_flash_platform_driver);
+}
+
+module_init(cam_flash_init_module);
+module_exit(cam_flash_exit_module);
+MODULE_DESCRIPTION("CAM FLASH");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
new file mode 100644
index 0000000..1897eb6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
@@ -0,0 +1,181 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _CAM_FLASH_DEV_H_
+#define _CAM_FLASH_DEV_H_
+
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/cam_sensor.h>
+#include <media/cam_req_mgr.h>
+#include "cam_req_mgr_util.h"
+#include "cam_req_mgr_interface.h"
+#include "cam_subdev.h"
+#include "cam_mem_mgr.h"
+#include "cam_sensor_cmn_header.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+#define CAMX_FLASH_DEV_NAME "cam-flash-dev"
+
+#define CAM_FLASH_PIPELINE_DELAY 1
+
+#define CAM_FLASH_PACKET_OPCODE_INIT                 0
+#define CAM_FLASH_PACKET_OPCODE_SET_OPS              1
+#define CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS 2
+
+enum cam_flash_switch_trigger_ops {
+	LED_SWITCH_OFF = 0,
+	LED_SWITCH_ON,
+};
+
+enum cam_flash_state {
+	CAM_FLASH_STATE_INIT,
+	CAM_FLASH_STATE_LOW,
+	CAM_FLASH_STATE_HIGH,
+	CAM_FLASH_STATE_RELEASE,
+};
+
+/**
+ * struct cam_flash_intf_params
+ * @device_hdl   : Device Handle
+ * @session_hdl  : Session Handle
+ * @link_hdl     : Link Handle
+ * @ops          : KMD operations
+ * @crm_cb       : Callback API pointers
+ */
+struct cam_flash_intf_params {
+	int32_t                     device_hdl;
+	int32_t                     session_hdl;
+	int32_t                     link_hdl;
+	struct cam_req_mgr_kmd_ops  ops;
+	struct cam_req_mgr_crm_cb  *crm_cb;
+};
+
+/**
+ * struct cam_flash_common_attr
+ * @is_settings_valid  : Notify the valid settings
+ * @request_id         : Request id provided by umd
+ * @count              : Number of led count
+ * @cmd_type           : Command buffer type
+ */
+struct cam_flash_common_attr {
+	bool     is_settings_valid;
+	int32_t  request_id;
+	uint16_t count;
+	uint8_t  cmd_type;
+};
+
+/**
+ * struct flash_init_packet
+ * @cmn_attr   : Provides common attributes
+ * @flash_type : Flash type(PMIC/I2C/GPIO)
+ */
+struct cam_flash_init_packet {
+	struct cam_flash_common_attr  cmn_attr;
+	uint8_t                       flash_type;
+};
+
+/**
+ * struct flash_frame_setting
+ * @cmn_attr         : Provides common attributes
+ * @num_iterations   : Iterations used to perform RER
+ * @led_on_delay_ms  : LED on time in milisec
+ * @led_off_delay_ms : LED off time in milisec
+ * @opcode           : Command buffer opcode
+ * @led_current_ma[] : LED current array in miliamps
+ *
+ */
+struct cam_flash_frame_setting {
+	struct cam_flash_common_attr cmn_attr;
+	uint16_t                     num_iterations;
+	uint16_t                     led_on_delay_ms;
+	uint16_t                     led_off_delay_ms;
+	int8_t                       opcode;
+	uint32_t                     led_current_ma[CAM_FLASH_MAX_LED_TRIGGERS];
+};
+
+/**
+ *  struct cam_flash_private_soc
+ * @switch_trigger_name : Switch trigger name
+ * @flash_trigger_name  : Flash trigger name array
+ * @flash_op_current    : Flash operational current
+ * @flash_max_current   : Max supported current for LED in flash mode
+ * @flash_max_duration  : Max turn on duration for LED in Flash mode
+ * @torch_trigger_name  : Torch trigger name array
+ * @torch_op_current    : Torch operational current
+ * @torch_max_current   : Max supported current for LED in torch mode
+ */
+
+struct cam_flash_private_soc {
+	const char   *switch_trigger_name;
+	const char   *flash_trigger_name[CAM_FLASH_MAX_LED_TRIGGERS];
+	uint32_t     flash_op_current[CAM_FLASH_MAX_LED_TRIGGERS];
+	uint32_t     flash_max_current[CAM_FLASH_MAX_LED_TRIGGERS];
+	uint32_t     flash_max_duration[CAM_FLASH_MAX_LED_TRIGGERS];
+	const char   *torch_trigger_name[CAM_FLASH_MAX_LED_TRIGGERS];
+	uint32_t     torch_op_current[CAM_FLASH_MAX_LED_TRIGGERS];
+	uint32_t     torch_max_current[CAM_FLASH_MAX_LED_TRIGGERS];
+};
+
+/**
+ *  struct cam_flash_ctrl
+ * @soc_info            : Soc related information
+ * @pdev                : Platform device
+ * @per_frame[]         : Per_frame setting array
+ * @nrt_info            : NonRealTime settings
+ * @of_node             : Of Node ptr
+ * @v4l2_dev_str        : V4L2 device structure
+ * @bridge_intf         : CRM interface
+ * @flash_init_setting  : Init command buffer structure
+ * @switch_trigger      : Switch trigger ptr
+ * @flash_num_sources   : Number of flash sources
+ * @torch_num_source    : Number of torch sources
+ * @flash_mutex         : Mutex for flash operations
+ * @flash_wq_mutex      : Mutex for flash apply setting
+ * @flash_state         : Current flash state (LOW/OFF/ON/INIT)
+ * @flash_type          : Flash types (PMIC/I2C/GPIO)
+ * @is_regulator_enable : Regulator disable/enable notifier
+ * @flash_trigger       : Flash trigger ptr
+ * @torch_trigger       : Torch trigger ptr
+ */
+struct cam_flash_ctrl {
+	struct cam_hw_soc_info          soc_info;
+	struct platform_device         *pdev;
+	struct cam_flash_frame_setting  per_frame[MAX_PER_FRAME_ARRAY];
+	struct cam_flash_frame_setting  nrt_info;
+	struct device_node             *of_node;
+	struct cam_subdev               v4l2_dev_str;
+	struct cam_flash_intf_params    bridge_intf;
+	struct cam_flash_init_packet    flash_init_setting;
+	struct led_trigger             *switch_trigger;
+	uint32_t                        flash_num_sources;
+	uint32_t                        torch_num_sources;
+	struct mutex                    flash_mutex;
+	struct mutex                    flash_wq_mutex;
+	enum   cam_flash_state          flash_state;
+	uint8_t                         flash_type;
+	bool                            is_regulator_enabled;
+	struct led_trigger           *flash_trigger[CAM_FLASH_MAX_LED_TRIGGERS];
+	struct led_trigger           *torch_trigger[CAM_FLASH_MAX_LED_TRIGGERS];
+};
+
+#endif /*_CAM_FLASH_DEV_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
new file mode 100644
index 0000000..a9ab169
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
@@ -0,0 +1,224 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include "cam_flash_soc.h"
+
+static int32_t cam_get_source_node_info(
+	struct device_node *of_node,
+	struct cam_flash_ctrl *fctrl,
+	struct cam_flash_private_soc *soc_private)
+{
+	int32_t rc = 0;
+	uint32_t count = 0, i = 0;
+	struct device_node *flash_src_node = NULL;
+	struct device_node *torch_src_node = NULL;
+	struct device_node *switch_src_node = NULL;
+
+	switch_src_node = of_parse_phandle(of_node, "switch-source", 0);
+	if (!switch_src_node) {
+		CAM_DBG(CAM_FLASH, "switch_src_node NULL");
+	} else {
+		rc = of_property_read_string(switch_src_node,
+			"qcom,default-led-trigger",
+			&soc_private->switch_trigger_name);
+		if (rc) {
+			CAM_ERR(CAM_FLASH,
+				"default-led-trigger read failed rc=%d", rc);
+		} else {
+			CAM_DBG(CAM_FLASH, "switch trigger %s",
+				soc_private->switch_trigger_name);
+			led_trigger_register_simple(
+				soc_private->switch_trigger_name,
+				&fctrl->switch_trigger);
+		}
+
+		of_node_put(switch_src_node);
+	}
+
+	if (of_get_property(of_node, "flash-source", &count)) {
+		count /= sizeof(uint32_t);
+
+		if (count > CAM_FLASH_MAX_LED_TRIGGERS) {
+			CAM_ERR(CAM_FLASH, "Invalid LED count: %d", count);
+			return -EINVAL;
+		}
+
+		fctrl->flash_num_sources = count;
+
+		for (i = 0; i < count; i++) {
+			flash_src_node = of_parse_phandle(of_node,
+				"flash-source", i);
+			if (!flash_src_node) {
+				CAM_WARN(CAM_FLASH, "flash_src_node NULL");
+				continue;
+			}
+
+			rc = of_property_read_string(flash_src_node,
+				"qcom,default-led-trigger",
+				&soc_private->flash_trigger_name[i]);
+			if (rc) {
+				CAM_WARN(CAM_FLASH,
+				"defalut-led-trigger read failed rc=%d", rc);
+				of_node_put(flash_src_node);
+				continue;
+			}
+
+			CAM_DBG(CAM_FLASH, "default trigger %s",
+				soc_private->flash_trigger_name[i]);
+
+			/* Read operational-current */
+			rc = of_property_read_u32(flash_src_node,
+				"qcom,current-ma",
+				&soc_private->flash_op_current[i]);
+			if (rc) {
+				CAM_WARN(CAM_FLASH, "op-current: read failed");
+				of_node_put(flash_src_node);
+				continue;
+			}
+
+			/* Read max-current */
+			rc = of_property_read_u32(flash_src_node,
+				"qcom,max-current",
+				&soc_private->flash_max_current[i]);
+			if (rc) {
+				CAM_WARN(CAM_FLASH,
+					"max-current: read failed");
+				of_node_put(flash_src_node);
+				continue;
+			}
+
+			/* Read max-duration */
+			rc = of_property_read_u32(flash_src_node,
+				"qcom,duration-ms",
+				&soc_private->flash_max_duration[i]);
+			if (rc)
+				CAM_WARN(CAM_FLASH,
+					"max-duration: read failed");
+
+			of_node_put(flash_src_node);
+
+			CAM_DBG(CAM_FLASH, "max_current[%d]: %d",
+				i, soc_private->flash_max_current[i]);
+
+			led_trigger_register_simple(
+				soc_private->flash_trigger_name[i],
+				&fctrl->flash_trigger[i]);
+		}
+	}
+
+	if (of_get_property(of_node, "torch-source", &count)) {
+		count /= sizeof(uint32_t);
+		if (count > CAM_FLASH_MAX_LED_TRIGGERS) {
+			CAM_ERR(CAM_FLASH, "Invalid LED count : %d", count);
+			return -EINVAL;
+		}
+
+		fctrl->torch_num_sources = count;
+
+		CAM_DBG(CAM_FLASH, "torch_num_sources = %d",
+			fctrl->torch_num_sources);
+		for (i = 0; i < count; i++) {
+			torch_src_node = of_parse_phandle(of_node,
+				"torch-source", i);
+			if (!torch_src_node) {
+				CAM_WARN(CAM_FLASH, "torch_src_node NULL");
+				continue;
+			}
+
+			rc = of_property_read_string(torch_src_node,
+				"qcom,default-led-trigger",
+				&soc_private->torch_trigger_name[i]);
+			if (rc < 0) {
+				CAM_WARN(CAM_FLASH,
+					"default-trigger read failed");
+				of_node_put(torch_src_node);
+				continue;
+			}
+
+			/* Read operational-current */
+			rc = of_property_read_u32(torch_src_node,
+				"qcom,current-ma",
+				&soc_private->torch_op_current[i]);
+			if (rc < 0) {
+				CAM_WARN(CAM_FLASH, "current: read failed");
+				of_node_put(torch_src_node);
+				continue;
+			}
+
+			/* Read max-current */
+			rc = of_property_read_u32(torch_src_node,
+				"qcom,max-current",
+				&soc_private->torch_max_current[i]);
+			if (rc < 0) {
+				CAM_WARN(CAM_FLASH,
+					"max-current: read failed");
+				of_node_put(torch_src_node);
+				continue;
+			}
+
+			of_node_put(torch_src_node);
+
+			CAM_DBG(CAM_FLASH, "max_current[%d]: %d",
+				i, soc_private->torch_max_current[i]);
+
+			led_trigger_register_simple(
+				soc_private->torch_trigger_name[i],
+				&fctrl->torch_trigger[i]);
+		}
+	}
+
+	return rc;
+}
+
+int cam_flash_get_dt_data(struct cam_flash_ctrl *fctrl,
+	struct cam_hw_soc_info *soc_info)
+{
+	int32_t rc = 0;
+	struct device_node *of_node = NULL;
+
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, "NULL flash control structure");
+		return -EINVAL;
+	}
+
+	of_node = fctrl->pdev->dev.of_node;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_FLASH, "Get_dt_properties failed rc %d", rc);
+		return rc;
+	}
+
+	soc_info->soc_private =
+		kzalloc(sizeof(struct cam_flash_private_soc), GFP_KERNEL);
+	if (!soc_info->soc_private) {
+		rc = -ENOMEM;
+		goto release_soc_res;
+	}
+
+	rc = cam_get_source_node_info(of_node, fctrl, soc_info->soc_private);
+	if (rc < 0) {
+		CAM_ERR(CAM_FLASH,
+			"cam_flash_get_pmic_source_info failed rc %d", rc);
+		goto free_soc_private;
+	}
+
+	return rc;
+
+free_soc_private:
+	kfree(soc_info->soc_private);
+release_soc_res:
+	cam_soc_util_release_platform_resource(soc_info);
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.h
new file mode 100644
index 0000000..2e1da69
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_FLASH_SOC_H_
+#define _CAM_FLASH_SOC_H_
+
+#include "cam_flash_dev.h"
+
+int cam_flash_get_dt_data(struct cam_flash_ctrl *fctrl,
+	struct cam_hw_soc_info *soc_info);
+
+#endif /*_CAM_FLASH_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index be8306c..f6e6a9a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -35,8 +35,7 @@
 	ioctl_ctrl = (struct cam_control *)arg;
 
 	if (ioctl_ctrl->handle_type != CAM_HANDLE_USER_POINTER) {
-		pr_err("%s:%d :Error: Invalid Handle Type\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "Invalid Handle Type");
 		return -EINVAL;
 	}
 
@@ -49,22 +48,21 @@
 		(uint64_t *)&generic_ptr,
 		&len_of_buff);
 	if (rc < 0) {
-		pr_err("%s:%d :Error: Failed in getting the buffer: %d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_SENSOR, "Failed in getting the buffer: %d", rc);
 		return rc;
 	}
 
 	csl_packet = (struct cam_packet *)(generic_ptr +
 		config.offset);
 	if (config.offset > len_of_buff) {
-		pr_err("%s: %d offset is out of bounds: off: %lld len: %zu\n",
-			__func__, __LINE__, config.offset, len_of_buff);
+		CAM_ERR(CAM_SENSOR,
+			"offset is out of bounds: off: %lld len: %zu",
+			 config.offset, len_of_buff);
 		return -EINVAL;
 	}
 
 	i2c_data = &(s_ctrl->i2c_data);
-	CDBG("%s:%d Header OpCode: %d\n",
-		__func__, __LINE__, csl_packet->header.op_code);
+	CAM_DBG(CAM_SENSOR, "Header OpCode: %d", csl_packet->header.op_code);
 	if ((csl_packet->header.op_code & 0xFFFFFF) ==
 		CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG) {
 		i2c_reg_settings = &i2c_data->init_settings;
@@ -76,16 +74,16 @@
 			&i2c_data->
 			per_frame[csl_packet->header.request_id %
 			MAX_PER_FRAME_ARRAY];
-		CDBG("%s:%d Received Packet: %lld\n", __func__, __LINE__,
+		CAM_DBG(CAM_SENSOR, "Received Packet: %lld",
 			csl_packet->header.request_id % MAX_PER_FRAME_ARRAY);
 		if (i2c_reg_settings->is_settings_valid == 1) {
-			pr_err("%s:%d :Error: Already some pkt in offset req : %lld\n",
-				__func__, __LINE__,
+			CAM_ERR(CAM_SENSOR,
+				"Already some pkt in offset req : %lld",
 				csl_packet->header.request_id);
 			rc = delete_request(i2c_reg_settings);
 			if (rc < 0) {
-				pr_err("%s: %d :Error: Failed in Deleting the err: %d\n",
-					__func__, __LINE__, rc);
+				CAM_ERR(CAM_SENSOR,
+					"Failed in Deleting the err: %d", rc);
 				return rc;
 			}
 		}
@@ -97,7 +95,7 @@
 		CAM_PKT_NOP_OPCODE) {
 		goto update_req_mgr;
 	} else {
-		pr_err("%s:%d Invalid Packet Header\n", __func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "Invalid Packet Header");
 		return -EINVAL;
 	}
 
@@ -107,8 +105,7 @@
 
 	rc = cam_sensor_i2c_pkt_parser(i2c_reg_settings, cmd_desc, 1);
 	if (rc < 0) {
-		pr_err("%s:%d :Error: Fail parsing I2C Pkt: %d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_SENSOR, "Fail parsing I2C Pkt: %d", rc);
 		return rc;
 	}
 
@@ -118,14 +115,14 @@
 		CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE)) {
 		add_req.link_hdl = s_ctrl->bridge_intf.link_hdl;
 		add_req.req_id = csl_packet->header.request_id;
-		CDBG("%s:%d Rxed Req Id: %lld\n",
-			__func__, __LINE__, csl_packet->header.request_id);
+		CAM_DBG(CAM_SENSOR, " Rxed Req Id: %lld",
+			 csl_packet->header.request_id);
 		add_req.dev_hdl = s_ctrl->bridge_intf.device_hdl;
 		if (s_ctrl->bridge_intf.crm_cb &&
 			s_ctrl->bridge_intf.crm_cb->add_req)
 			s_ctrl->bridge_intf.crm_cb->add_req(&add_req);
-		CDBG("%s:%d add req to req mgr: %lld\n",
-			__func__, __LINE__, add_req.req_id);
+		CAM_DBG(CAM_SENSOR, " add req to req mgr: %lld",
+			add_req.req_id);
 	}
 	return rc;
 }
@@ -139,7 +136,8 @@
 	if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
 		cci_client = s_ctrl->io_master_info.cci_client;
 		if (!cci_client) {
-			pr_err("failed: cci_client %pK", cci_client);
+			CAM_ERR(CAM_SENSOR, "failed: cci_client %pK",
+				cci_client);
 			return -EINVAL;
 		}
 		cci_client->cci_i2c_master = s_ctrl->cci_i2c_master;
@@ -147,8 +145,7 @@
 		cci_client->retries = 3;
 		cci_client->id_map = 0;
 		cci_client->i2c_freq_mode = i2c_info->i2c_freq_mode;
-		CDBG("%s:%d Master: %d sid: %d freq_mode: %d\n",
-			__func__, __LINE__,
+		CAM_DBG(CAM_SENSOR, " Master: %d sid: %d freq_mode: %d",
 			cci_client->cci_i2c_master, i2c_info->slave_addr,
 			i2c_info->i2c_freq_mode);
 	}
@@ -170,211 +167,14 @@
 
 	s_ctrl->sensor_probe_addr_type =  probe_info->addr_type;
 	s_ctrl->sensor_probe_data_type =  probe_info->data_type;
-	CDBG("%s:%d Sensor Addr: 0x%x sensor_id: 0x%x sensor_mask: 0x%x\n",
-		__func__, __LINE__,
+	CAM_DBG(CAM_SENSOR,
+		"Sensor Addr: 0x%x sensor_id: 0x%x sensor_mask: 0x%x",
 		s_ctrl->sensordata->slave_info.sensor_id_reg_addr,
 		s_ctrl->sensordata->slave_info.sensor_id,
 		s_ctrl->sensordata->slave_info.sensor_id_mask);
 	return rc;
 }
 
-int32_t cam_sensor_update_power_settings(void *cmd_buf,
-	int cmd_length, struct cam_sensor_ctrl_t *s_ctrl)
-{
-	int32_t rc = 0, tot_size = 0, last_cmd_type = 0;
-	int32_t i = 0, pwr_up = 0, pwr_down = 0;
-	void *ptr = cmd_buf, *scr;
-	struct cam_cmd_power *pwr_cmd = (struct cam_cmd_power *)cmd_buf;
-	struct common_header *cmm_hdr = (struct common_header *)cmd_buf;
-	struct cam_sensor_power_ctrl_t *power_info =
-		&s_ctrl->sensordata->power_info;
-
-	if (!pwr_cmd || !cmd_length) {
-		pr_err("%s:%d Invalid Args: pwr_cmd %pK, cmd_length: %d\n",
-			__func__, __LINE__, pwr_cmd, cmd_length);
-		return -EINVAL;
-	}
-
-	power_info->power_setting_size = 0;
-	power_info->power_setting =
-		(struct cam_sensor_power_setting *)
-		kzalloc(sizeof(struct cam_sensor_power_setting) *
-			MAX_POWER_CONFIG, GFP_KERNEL);
-	if (!power_info->power_setting)
-		return -ENOMEM;
-
-	power_info->power_down_setting =
-		(struct cam_sensor_power_setting *)
-		kzalloc(sizeof(struct cam_sensor_power_setting) *
-			MAX_POWER_CONFIG, GFP_KERNEL);
-	if (!power_info->power_down_setting) {
-		rc = -ENOMEM;
-		goto free_power_settings;
-	}
-
-	while (tot_size < cmd_length) {
-		if (cmm_hdr->cmd_type ==
-			CAMERA_SENSOR_CMD_TYPE_PWR_UP) {
-			struct cam_cmd_power *pwr_cmd =
-				(struct cam_cmd_power *)ptr;
-
-			power_info->
-				power_setting_size +=
-				pwr_cmd->count;
-			scr = ptr + sizeof(struct cam_cmd_power);
-			tot_size = tot_size + sizeof(struct cam_cmd_power);
-
-			if (pwr_cmd->count == 0)
-				CDBG("%s:%d Un expected Command\n",
-					__func__, __LINE__);
-
-			for (i = 0; i < pwr_cmd->count; i++, pwr_up++) {
-				power_info->
-					power_setting[pwr_up].seq_type =
-					pwr_cmd->power_settings[i].
-						power_seq_type;
-				power_info->
-					power_setting[pwr_up].config_val =
-					pwr_cmd->power_settings[i].
-						config_val_low;
-				power_info->power_setting[pwr_up].delay = 0;
-				if (i) {
-					scr = scr +
-						sizeof(
-						struct cam_power_settings);
-					tot_size = tot_size +
-						sizeof(
-						struct cam_power_settings);
-				}
-				if (tot_size > cmd_length) {
-					pr_err("%s:%d :Error: Command Buffer is wrong\n",
-						__func__, __LINE__);
-					rc = -EINVAL;
-					goto free_power_down_settings;
-				}
-				CDBG("Seq Type[%d]: %d Config_val: %ldn",
-					pwr_up,
-					power_info->
-						power_setting[pwr_up].seq_type,
-					power_info->
-						power_setting[pwr_up].
-						config_val);
-			}
-			last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_UP;
-			ptr = (void *) scr;
-			cmm_hdr = (struct common_header *)ptr;
-		} else if (cmm_hdr->cmd_type == CAMERA_SENSOR_CMD_TYPE_WAIT) {
-			struct cam_cmd_unconditional_wait *wait_cmd =
-				(struct cam_cmd_unconditional_wait *)ptr;
-			if (wait_cmd->op_code ==
-				CAMERA_SENSOR_WAIT_OP_SW_UCND) {
-				if (last_cmd_type ==
-					CAMERA_SENSOR_CMD_TYPE_PWR_UP) {
-					if (pwr_up > 0)
-						power_info->
-							power_setting
-							[pwr_up - 1].delay +=
-							wait_cmd->delay;
-					else
-						pr_err("%s:%d Delay is expected only after valid power up setting\n",
-							__func__, __LINE__);
-				} else if (last_cmd_type ==
-					CAMERA_SENSOR_CMD_TYPE_PWR_DOWN) {
-					if (pwr_down > 0)
-						power_info->
-							power_down_setting
-							[pwr_down - 1].delay +=
-							wait_cmd->delay;
-					else
-						pr_err("%s:%d Delay is expected only after valid power down setting\n",
-							__func__, __LINE__);
-				}
-			} else
-				CDBG("%s:%d Invalid op code: %d\n",
-					__func__, __LINE__, wait_cmd->op_code);
-			tot_size = tot_size +
-				sizeof(struct cam_cmd_unconditional_wait);
-			if (tot_size > cmd_length) {
-				pr_err("Command Buffer is wrong\n");
-				return -EINVAL;
-			}
-			scr = (void *) (wait_cmd);
-			ptr = (void *)
-				(scr +
-				sizeof(struct cam_cmd_unconditional_wait));
-			CDBG("%s:%d ptr: %pK sizeof: %d Next: %pK\n",
-				__func__, __LINE__, scr,
-				(int32_t)sizeof(
-				struct cam_cmd_unconditional_wait), ptr);
-
-			cmm_hdr = (struct common_header *)ptr;
-		} else if (cmm_hdr->cmd_type ==
-			CAMERA_SENSOR_CMD_TYPE_PWR_DOWN) {
-			struct cam_cmd_power *pwr_cmd =
-				(struct cam_cmd_power *)ptr;
-
-			scr = ptr + sizeof(struct cam_cmd_power);
-			tot_size = tot_size + sizeof(struct cam_cmd_power);
-			power_info->power_down_setting_size += pwr_cmd->count;
-
-			if (pwr_cmd->count == 0)
-				pr_err("%s:%d Invalid Command\n",
-					__func__, __LINE__);
-
-			for (i = 0; i < pwr_cmd->count; i++, pwr_down++) {
-				power_info->
-					power_down_setting[pwr_down].
-					seq_type =
-					pwr_cmd->power_settings[i].
-					power_seq_type;
-				power_info->
-					power_down_setting[pwr_down].
-					config_val =
-					pwr_cmd->power_settings[i].
-					config_val_low;
-				power_info->
-					power_down_setting[pwr_down].delay = 0;
-				if (i) {
-					scr = scr +
-						sizeof(
-						struct cam_power_settings);
-					tot_size =
-						tot_size +
-						sizeof(
-						struct cam_power_settings);
-				}
-				if (tot_size > cmd_length) {
-					pr_err("Command Buffer is wrong\n");
-					rc = -EINVAL;
-					goto free_power_down_settings;
-				}
-				CDBG("%s:%d Seq Type[%d]: %d Config_val: %ldn",
-					__func__, __LINE__,
-					pwr_down,
-					power_info->
-						power_down_setting[pwr_down].
-						seq_type,
-					power_info->
-						power_down_setting[pwr_down].
-						config_val);
-			}
-			last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_DOWN;
-			ptr = (void *) scr;
-			cmm_hdr = (struct common_header *)ptr;
-		} else {
-			pr_err("%s:%d: :Error: Un expected Header Type: %d\n",
-				__func__, __LINE__, cmm_hdr->cmd_type);
-		}
-	}
-
-	return rc;
-free_power_down_settings:
-	kfree(power_info->power_down_setting);
-free_power_settings:
-	kfree(power_info->power_setting);
-	return rc;
-}
-
 int32_t cam_handle_cmd_buffers_for_probe(void *cmd_buf,
 	struct cam_sensor_ctrl_t *s_ctrl,
 	int32_t cmd_buf_num, int cmd_buf_length)
@@ -389,16 +189,14 @@
 		i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
 		rc = cam_sensor_update_i2c_info(i2c_info, s_ctrl);
 		if (rc < 0) {
-			pr_err("%s:%d Failed in Updating the i2c Info\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_SENSOR, "Failed in Updating the i2c Info");
 			return rc;
 		}
 		probe_info = (struct cam_cmd_probe *)
 			(cmd_buf + sizeof(struct cam_cmd_i2c_info));
 		rc = cam_sensor_update_slave_info(probe_info, s_ctrl);
 		if (rc < 0) {
-			pr_err("%s:%d :Error: Updating the slave Info\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_SENSOR, "Updating the slave Info");
 			return rc;
 		}
 		cmd_buf = probe_info;
@@ -406,16 +204,16 @@
 		break;
 	case 1: {
 		rc = cam_sensor_update_power_settings(cmd_buf,
-			cmd_buf_length, s_ctrl);
+			cmd_buf_length, &s_ctrl->sensordata->power_info);
 		if (rc < 0) {
-			pr_err("Failed in updating power settings\n");
+			CAM_ERR(CAM_SENSOR,
+				"Failed in updating power settings");
 			return rc;
 		}
 	}
 		break;
 	default:
-		pr_err("%s:%d Invalid command buffer\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "Invalid command buffer");
 		break;
 	}
 	return rc;
@@ -434,21 +232,19 @@
 	rc = cam_mem_get_cpu_buf(handle,
 		(uint64_t *)&packet, &len);
 	if (rc < 0) {
-		pr_err("%s: %d Failed to get the command Buffer\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "Failed to get the command Buffer");
 		return -EINVAL;
 	}
 	pkt = (struct cam_packet *)packet;
 	cmd_desc = (struct cam_cmd_buf_desc *)
 		((uint32_t *)&pkt->payload + pkt->cmd_buf_offset/4);
 	if (cmd_desc == NULL) {
-		pr_err("%s: %d command descriptor pos is invalid\n",
-		__func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "command descriptor pos is invalid");
 		return -EINVAL;
 	}
 	if (pkt->num_cmd_buf != 2) {
-		pr_err("%s: %d Expected More Command Buffers : %d\n",
-			__func__, __LINE__, pkt->num_cmd_buf);
+		CAM_ERR(CAM_SENSOR, "Expected More Command Buffers : %d",
+			 pkt->num_cmd_buf);
 		return -EINVAL;
 	}
 	for (i = 0; i < pkt->num_cmd_buf; i++) {
@@ -457,8 +253,8 @@
 		rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
 			(uint64_t *)&cmd_buf1, &len);
 		if (rc < 0) {
-			pr_err("%s: %d Failed to parse the command Buffer Header\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_SENSOR,
+				"Failed to parse the command Buffer Header");
 			return -EINVAL;
 		}
 		cmd_buf = (uint32_t *)cmd_buf1;
@@ -468,8 +264,8 @@
 		rc = cam_handle_cmd_buffers_for_probe(ptr, s_ctrl,
 			i, cmd_desc[i].length);
 		if (rc < 0) {
-			pr_err("%s: %d Failed to parse the command Buffer Header\n",
-			__func__, __LINE__);
+			CAM_ERR(CAM_SENSOR,
+				"Failed to parse the command Buffer Header");
 			return -EINVAL;
 		}
 	}
@@ -525,8 +321,8 @@
 	slave_info = &(s_ctrl->sensordata->slave_info);
 
 	if (!slave_info) {
-		pr_err("%s:%d failed: %pK\n",
-			__func__, __LINE__, slave_info);
+		CAM_ERR(CAM_SENSOR, " failed: %pK",
+			 slave_info);
 		return -EINVAL;
 	}
 
@@ -536,11 +332,11 @@
 		&chipid, CAMERA_SENSOR_I2C_TYPE_WORD,
 		CAMERA_SENSOR_I2C_TYPE_WORD);
 
-	CDBG("%s:%d read id: 0x%x expected id 0x%x:\n",
-			__func__, __LINE__, chipid, slave_info->sensor_id);
+	CAM_DBG(CAM_SENSOR, "read id: 0x%x expected id 0x%x:",
+			 chipid, slave_info->sensor_id);
 	if (cam_sensor_id_by_mask(s_ctrl, chipid) != slave_info->sensor_id) {
-		pr_err("%s: chip id %x does not match %x\n",
-				__func__, chipid, slave_info->sensor_id);
+		CAM_ERR(CAM_SENSOR, "chip id %x does not match %x",
+				chipid, slave_info->sensor_id);
 		return -ENODEV;
 	}
 	return rc;
@@ -557,8 +353,7 @@
 		&s_ctrl->sensordata->power_info;
 
 	if (!s_ctrl || !arg) {
-		pr_err("%s: %d s_ctrl is NULL\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "s_ctrl is NULL");
 		return -EINVAL;
 	}
 
@@ -566,7 +361,8 @@
 	switch (cmd->op_code) {
 	case CAM_SENSOR_PROBE_CMD: {
 		if (s_ctrl->is_probe_succeed == 1) {
-			pr_err("Already Sensor Probed in the slot\n");
+			CAM_ERR(CAM_SENSOR,
+				"Already Sensor Probed in the slot");
 			break;
 		}
 		/* Allocate memory for power up setting */
@@ -592,15 +388,14 @@
 			CAM_HANDLE_MEM_HANDLE) {
 			rc = cam_handle_mem_ptr(cmd->handle, s_ctrl);
 			if (rc < 0) {
-				pr_err("%s: %d Get Buffer Handle Failed\n",
-					__func__, __LINE__);
+				CAM_ERR(CAM_SENSOR, "Get Buffer Handle Failed");
 				kfree(pu);
 				kfree(pd);
 				goto release_mutex;
 			}
 		} else {
-			pr_err("%s:%d :Error: Invalid Command Type: %d",
-				__func__, __LINE__, cmd->handle_type);
+			CAM_ERR(CAM_SENSOR, "Invalid Command Type: %d",
+				 cmd->handle_type);
 		}
 
 		/* Parse and fill vreg params for powerup settings */
@@ -609,8 +404,9 @@
 			s_ctrl->sensordata->power_info.power_setting,
 			s_ctrl->sensordata->power_info.power_setting_size);
 		if (rc < 0) {
-			pr_err("%s:%d :Error: Fail in filling vreg params for PUP rc %d",
-				__func__, __LINE__, rc);
+			CAM_ERR(CAM_SENSOR,
+				"Fail in filling vreg params for PUP rc %d",
+				 rc);
 			kfree(pu);
 			kfree(pd);
 			goto release_mutex;
@@ -622,8 +418,9 @@
 			s_ctrl->sensordata->power_info.power_down_setting,
 			s_ctrl->sensordata->power_info.power_down_setting_size);
 		if (rc < 0) {
-			pr_err("%s:%d :Error: Fail in filling vreg params for PDOWN rc %d",
-				__func__, __LINE__, rc);
+			CAM_ERR(CAM_SENSOR,
+				"Fail in filling vreg params for PDOWN rc %d",
+				 rc);
 			kfree(pu);
 			kfree(pd);
 			goto release_mutex;
@@ -632,7 +429,7 @@
 		/* Power up and probe sensor */
 		rc = cam_sensor_power_up(s_ctrl);
 		if (rc < 0) {
-			pr_err("power up failed");
+			CAM_ERR(CAM_SENSOR, "power up failed");
 			cam_sensor_power_down(s_ctrl);
 			kfree(pu);
 			kfree(pd);
@@ -649,13 +446,11 @@
 			goto release_mutex;
 		}
 
-		CDBG("%s:%d Probe Succeeded on the slot: %d\n",
-			__func__, __LINE__,
+		CAM_DBG(CAM_SENSOR, "Probe Succeeded on the slot: %d",
 			s_ctrl->soc_info.index);
 		rc = cam_sensor_power_down(s_ctrl);
 		if (rc < 0) {
-			pr_err("%s:%d :Error: fail in Sensor Power Down\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_SENSOR, "fail in Sensor Power Down");
 			kfree(pu);
 			kfree(pd);
 			goto release_mutex;
@@ -672,15 +467,14 @@
 		struct cam_create_dev_hdl bridge_params;
 
 		if (s_ctrl->bridge_intf.device_hdl != -1) {
-			pr_err("%s:%d Device is already acquired\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_SENSOR, "Device is already acquired");
 			rc = -EINVAL;
 			goto release_mutex;
 		}
 		rc = copy_from_user(&sensor_acq_dev,
 			(void __user *) cmd->handle, sizeof(sensor_acq_dev));
 		if (rc < 0) {
-			pr_err("Failed Copying from user\n");
+			CAM_ERR(CAM_SENSOR, "Failed Copying from user");
 			goto release_mutex;
 		}
 
@@ -695,11 +489,11 @@
 		s_ctrl->bridge_intf.device_hdl = sensor_acq_dev.device_handle;
 		s_ctrl->bridge_intf.session_hdl = sensor_acq_dev.session_handle;
 
-		CDBG("%s:%d Device Handle: %d\n", __func__, __LINE__,
+		CAM_DBG(CAM_SENSOR, "Device Handle: %d",
 			sensor_acq_dev.device_handle);
 		if (copy_to_user((void __user *) cmd->handle, &sensor_acq_dev,
 			sizeof(struct cam_sensor_acquire_dev))) {
-			pr_err("Failed Copy to User\n");
+			CAM_ERR(CAM_SENSOR, "Failed Copy to User");
 			rc = -EFAULT;
 			goto release_mutex;
 		}
@@ -707,8 +501,8 @@
 		break;
 	case CAM_RELEASE_DEV: {
 		if (s_ctrl->bridge_intf.device_hdl == -1) {
-			pr_err("%s:%d Invalid Handles: link hdl: %d device hdl: %d\n",
-				__func__, __LINE__,
+			CAM_ERR(CAM_SENSOR,
+				"Invalid Handles: link hdl: %d device hdl: %d",
 				s_ctrl->bridge_intf.device_hdl,
 				s_ctrl->bridge_intf.link_hdl);
 			rc = -EINVAL;
@@ -716,8 +510,8 @@
 		}
 		rc = cam_destroy_device_hdl(s_ctrl->bridge_intf.device_hdl);
 		if (rc < 0)
-			pr_err("%s:%d Failed in destroying the device hdl\n",
-			__func__, __LINE__);
+			CAM_ERR(CAM_SENSOR,
+				"failed in destroying the device hdl");
 		s_ctrl->bridge_intf.device_hdl = -1;
 		s_ctrl->bridge_intf.link_hdl = -1;
 		s_ctrl->bridge_intf.session_hdl = -1;
@@ -729,7 +523,7 @@
 		cam_sensor_query_cap(s_ctrl, &sensor_cap);
 		if (copy_to_user((void __user *) cmd->handle, &sensor_cap,
 			sizeof(struct  cam_sensor_query_cap))) {
-			pr_err("Failed Copy to User\n");
+			CAM_ERR(CAM_SENSOR, "Failed Copy to User");
 			rc = -EFAULT;
 			goto release_mutex;
 		}
@@ -738,19 +532,18 @@
 	case CAM_START_DEV: {
 		rc = cam_sensor_power_up(s_ctrl);
 		if (rc < 0) {
-			pr_err("%s:%d :Error: Sensor Power up failed\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_SENSOR, "Sensor Power up failed");
 			goto release_mutex;
 		}
 		rc = cam_sensor_apply_settings(s_ctrl, 0);
 		if (rc < 0) {
-			pr_err("cannot apply settings\n");
+			CAM_ERR(CAM_SENSOR, "cannot apply settings");
 			goto release_mutex;
 		}
 		rc = delete_request(&s_ctrl->i2c_data.init_settings);
 		if (rc < 0) {
-			pr_err("%s:%d Fail in deleting the Init settings\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_SENSOR,
+				"Fail in deleting the Init settings");
 			rc = -EINVAL;
 			goto release_mutex;
 		}
@@ -759,8 +552,7 @@
 	case CAM_STOP_DEV: {
 		rc = cam_sensor_power_down(s_ctrl);
 		if (rc < 0) {
-			pr_err("%s:%d Sensor Power Down failed\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_SENSOR, "Sensor Power Down failed");
 			goto release_mutex;
 		}
 	}
@@ -768,8 +560,7 @@
 	case CAM_CONFIG_DEV: {
 		rc = cam_sensor_i2c_pkt_parse(s_ctrl, arg);
 		if (rc < 0) {
-			pr_err("%s:%d :Error: Failed CCI Config: %d\n",
-				__func__, __LINE__, rc);
+			CAM_ERR(CAM_SENSOR, "Failed CCI Config: %d", rc);
 			goto release_mutex;
 		}
 	}
@@ -777,8 +568,7 @@
 	case CAM_SD_SHUTDOWN:
 		break;
 	default:
-		pr_err("%s:%d :Error: Invalid Opcode: %d\n",
-			__func__, __LINE__, cmd->op_code);
+		CAM_ERR(CAM_SENSOR, "Invalid Opcode: %d", cmd->op_code);
 		rc = -EINVAL;
 		goto release_mutex;
 	}
@@ -812,7 +602,7 @@
 	s_ctrl = (struct cam_sensor_ctrl_t *)
 		cam_get_device_priv(link->dev_hdl);
 	if (!s_ctrl) {
-		pr_err("%s: Device data is NULL\n", __func__);
+		CAM_ERR(CAM_SENSOR, "Device data is NULL");
 		return -EINVAL;
 	}
 	if (link->link_enable) {
@@ -849,8 +639,7 @@
 		&s_ctrl->soc_info;
 
 	if (!s_ctrl) {
-		pr_err("%s:%d failed: %pK\n",
-			__func__, __LINE__, s_ctrl);
+		CAM_ERR(CAM_SENSOR, "failed: %pK", s_ctrl);
 		return -EINVAL;
 	}
 
@@ -858,23 +647,20 @@
 	slave_info = &(s_ctrl->sensordata->slave_info);
 
 	if (!power_info || !slave_info) {
-		pr_err("%s:%d failed: %pK %pK\n",
-			__func__, __LINE__, power_info,
-			slave_info);
+		CAM_ERR(CAM_SENSOR, "failed: %pK %pK", power_info, slave_info);
 		return -EINVAL;
 	}
 
 	rc = cam_sensor_core_power_up(power_info, soc_info);
 	if (rc < 0) {
-		pr_err("%s:%d power up the core is failed:%d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_SENSOR, "power up the core is failed:%d", rc);
 		return rc;
 	}
 
 	if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
 		rc = camera_io_init(&(s_ctrl->io_master_info));
 		if (rc < 0) {
-			pr_err("%s cci_init failed\n", __func__);
+			CAM_ERR(CAM_SENSOR, "cci_init failed");
 			return -EINVAL;
 		}
 	}
@@ -891,8 +677,7 @@
 	int rc = 0;
 
 	if (!s_ctrl) {
-		pr_err("%s:%d failed: s_ctrl %pK\n",
-			__func__, __LINE__, s_ctrl);
+		CAM_ERR(CAM_SENSOR, "failed: s_ctrl %pK", s_ctrl);
 		return -EINVAL;
 	}
 
@@ -900,14 +685,12 @@
 	soc_info = &s_ctrl->soc_info;
 
 	if (!power_info) {
-		pr_err("%s:%d failed: power_info %pK\n",
-			__func__, __LINE__, power_info);
+		CAM_ERR(CAM_SENSOR, "failed: power_info %pK", power_info);
 		return -EINVAL;
 	}
 	rc = msm_camera_power_down(power_info, soc_info);
 	if (rc < 0) {
-		pr_err("%s:%d power down the core is failed:%d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_SENSOR, "power down the core is failed:%d", rc);
 		return rc;
 	}
 
@@ -935,15 +718,16 @@
 					&(s_ctrl->io_master_info),
 					&(i2c_list->i2c_settings));
 				if (rc < 0) {
-					pr_err("Failed to write the I2C settings\n");
+					CAM_ERR(CAM_SENSOR,
+					"Failed to write the I2C settings");
 					return rc;
 				}
 			}
 			rc = delete_request(&(s_ctrl->i2c_data.init_settings));
 			i2c_set->is_settings_valid = 0;
 			if (rc < 0) {
-				pr_err("%s:%d :Error: Failed in deleting the Init request: %d\n",
-					__func__, __LINE__, rc);
+				CAM_ERR(CAM_SENSOR,
+				"Failed in deleting the Init request: %d", rc);
 			}
 		}
 	} else {
@@ -957,8 +741,9 @@
 					&(s_ctrl->io_master_info),
 					&(i2c_list->i2c_settings));
 				if (rc < 0) {
-					pr_err("%s:%d :Error: Fail to write the I2C settings: %d\n",
-						__func__, __LINE__, rc);
+					CAM_ERR(CAM_SENSOR,
+					"Fail to write the I2C settings: %d",
+						 rc);
 					return rc;
 				}
 			}
@@ -966,8 +751,8 @@
 				MAX_PER_FRAME_ARRAY -
 				MAX_SYSTEM_PIPELINE_DELAY) %
 				MAX_PER_FRAME_ARRAY;
-			CDBG("%s:%d Deleting the Request: %d\n",
-				__func__, __LINE__,	del_req_id);
+			CAM_DBG(CAM_SENSOR, "Deleting the Request: %d",
+				del_req_id);
 			if (req_id >
 				s_ctrl->i2c_data.per_frame[del_req_id].
 				request_id) {
@@ -977,13 +762,13 @@
 					&(s_ctrl->i2c_data.
 					per_frame[del_req_id]));
 				if (rc < 0)
-					pr_err("%s:%d :Error: Failed in deleting the request: %d rc: %d\n",
-						__func__, __LINE__,
+					CAM_ERR(CAM_SENSOR,
+						"Delete request Fail:%d rc:%d",
 						del_req_id, rc);
 			}
 		} else {
-			CDBG("%s:%d Invalid/NOP request to apply: %lld\n",
-				__func__, __LINE__, req_id);
+			CAM_DBG(CAM_SENSOR,
+				"Invalid/NOP request to apply: %lld", req_id);
 		}
 	}
 	return rc;
@@ -1000,14 +785,11 @@
 	s_ctrl = (struct cam_sensor_ctrl_t *)
 		cam_get_device_priv(apply->dev_hdl);
 	if (!s_ctrl) {
-		pr_err("%s: Device data is NULL\n", __func__);
+		CAM_ERR(CAM_SENSOR, "Device data is NULL");
 		return -EINVAL;
 	}
-	CDBG("%s:%d Req Id: %lld\n", __func__, __LINE__,
-		apply->request_id);
-
+	CAM_DBG(CAM_SENSOR, " Req Id: %lld", apply->request_id);
 	trace_cam_apply_req("Sensor", apply);
-
 	rc = cam_sensor_apply_settings(s_ctrl, apply->request_id);
 	return rc;
 }
@@ -1025,7 +807,7 @@
 	s_ctrl = (struct cam_sensor_ctrl_t *)
 		cam_get_device_priv(flush_req->dev_hdl);
 	if (!s_ctrl) {
-		pr_err("%s: Device data is NULL\n", __func__);
+		CAM_ERR(CAM_SENSOR, "Device data is NULL");
 		return -EINVAL;
 	}
 
@@ -1039,8 +821,8 @@
 		if (i2c_set->is_settings_valid == 1) {
 			rc = delete_request(i2c_set);
 			if (rc < 0)
-				pr_err("%s:%d :Error: delete request: %lld rc: %d\n",
-					__func__, __LINE__,
+				CAM_ERR(CAM_SENSOR,
+					"delete request: %lld rc: %d",
 					i2c_set->request_id, rc);
 
 			if (flush_req->type ==
@@ -1053,7 +835,8 @@
 
 	if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
 		!cancel_req_id_found)
-		CDBG("%s:Flush request id:%lld not found in the pending list\n",
-			__func__, flush_req->req_id);
+		CAM_DBG(CAM_SENSOR,
+			"Flush request id:%lld not found in the pending list",
+			flush_req->req_id);
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
index c06a1b3..1453fb3 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
@@ -27,8 +27,7 @@
 		rc = cam_sensor_driver_cmd(s_ctrl, arg);
 		break;
 	default:
-		pr_err("%s:%d Invalid ioctl cmd: %d\n",
-			__func__, __LINE__, cmd);
+		CAM_ERR(CAM_SENSOR, " Invalid ioctl cmd: %d", cmd);
 		rc = -EINVAL;
 		break;
 	}
@@ -42,8 +41,8 @@
 	struct cam_sensor_ctrl_t *s_ctrl;
 
 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
-		pr_err("%s %s :Error: i2c_check_functionality failed\n",
-			__func__, client->name);
+		CAM_ERR(CAM_SENSOR,
+			"%s :i2c_check_functionality failed", client->name);
 		return -EFAULT;
 	}
 
@@ -60,8 +59,7 @@
 
 	rc = cam_sensor_parse_dt(s_ctrl);
 	if (rc < 0) {
-		pr_err("%s:%d :Error: cam_sensor_parse_dt rc %d",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_SENSOR, "cam_sensor_parse_dt rc %d", rc);
 		goto free_s_ctrl;
 	}
 
@@ -77,7 +75,7 @@
 
 	s_ctrl = platform_get_drvdata(pdev);
 	if (!s_ctrl) {
-		pr_err("%s: sensor device is NULL\n", __func__);
+		CAM_ERR(CAM_SENSOR, "sensor device is NULL");
 		return 0;
 	}
 
@@ -92,7 +90,7 @@
 	struct cam_sensor_ctrl_t  *s_ctrl = i2c_get_clientdata(client);
 
 	if (!s_ctrl) {
-		pr_err("%s: sensor device is NULL\n", __func__);
+		CAM_ERR(CAM_SENSOR, "sensor device is NULL");
 		return 0;
 	}
 
@@ -111,7 +109,7 @@
 
 	if (copy_from_user(&cmd_data, (void __user *)arg,
 		sizeof(cmd_data))) {
-		pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+		CAM_ERR(CAM_SENSOR, "Failed to copy from user_ptr=%pK size=%zu",
 			(void __user *)arg, sizeof(cmd_data));
 		return -EFAULT;
 	}
@@ -120,19 +118,18 @@
 	case VIDIOC_CAM_CONTROL:
 		rc = cam_sensor_subdev_ioctl(sd, cmd, &cmd_data);
 		if (rc < 0)
-			pr_err("%s:%d cam_sensor_subdev_ioctl failed\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_SENSOR, "cam_sensor_subdev_ioctl failed");
 		break;
 	default:
-		pr_err("%s:%d Invalid compat ioctl cmd_type: %d\n",
-			__func__, __LINE__, cmd);
+		CAM_ERR(CAM_SENSOR, "Invalid compat ioctl cmd_type: %d", cmd);
 		rc = -EINVAL;
 	}
 
 	if (!rc) {
 		if (copy_to_user((void __user *)arg, &cmd_data,
 			sizeof(cmd_data))) {
-			pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+			CAM_ERR(CAM_SENSOR,
+				"Failed to copy to user_ptr=%pK size=%zu",
 				(void __user *)arg, sizeof(cmd_data));
 			rc = -EFAULT;
 		}
@@ -188,7 +185,7 @@
 
 	rc = cam_sensor_parse_dt(s_ctrl);
 	if (rc < 0) {
-		pr_err("failed: cam_sensor_parse_dt rc %d", rc);
+		CAM_ERR(CAM_SENSOR, "failed: cam_sensor_parse_dt rc %d", rc);
 		goto free_s_ctrl;
 	}
 
@@ -211,8 +208,7 @@
 
 	rc = cam_register_subdev(&(s_ctrl->v4l2_dev_str));
 	if (rc < 0) {
-		pr_err("%s:%d :ERROR: Fail with cam_register_subdev\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "Fail with cam_register_subdev");
 		goto free_s_ctrl;
 	}
 
@@ -278,11 +274,11 @@
 
 	rc = platform_driver_register(&cam_sensor_platform_driver);
 	if (rc)
-		pr_err("%s platform_driver_register failed rc = %d",
-			__func__, rc);
+		CAM_ERR(CAM_SENSOR, "platform_driver_register failed rc = %d",
+			rc);
 	rc = i2c_add_driver(&cam_sensor_driver_i2c);
 	if (rc)
-		pr_err("%s i2c_add_driver failed rc = %d", __func__, rc);
+		CAM_ERR(CAM_SENSOR, "i2c_add_driver failed rc = %d", rc);
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
index ae14c9d..f3c70c4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
@@ -31,6 +31,7 @@
 #include <cam_sensor_cmn_header.h>
 #include <cam_subdev.h>
 #include <cam_sensor_io.h>
+#include "cam_debug_util.h"
 
 #define NUM_MASTERS 2
 #define NUM_QUEUES 2
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c
index 78edec1..c2f1b4d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c
@@ -34,13 +34,12 @@
 
 	src_node = of_parse_phandle(of_node, "actuator-src", 0);
 	if (!src_node) {
-		CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+		CAM_DBG(CAM_SENSOR, "src_node NULL");
 	} else {
 		rc = of_property_read_u32(src_node, "cell-index", &val);
-		CDBG("%s:%d actuator cell index %d, rc %d\n", __func__,
-			__LINE__, val, rc);
+		CAM_DBG(CAM_SENSOR, "actuator cell index %d, rc %d", val, rc);
 		if (rc < 0) {
-			pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
+			CAM_ERR(CAM_SENSOR, "failed %d", rc);
 			of_node_put(src_node);
 			return rc;
 		}
@@ -50,13 +49,12 @@
 
 	src_node = of_parse_phandle(of_node, "ois-src", 0);
 	if (!src_node) {
-		CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+		CAM_DBG(CAM_SENSOR, "src_node NULL");
 	} else {
 		rc = of_property_read_u32(src_node, "cell-index", &val);
-		CDBG("%s:%d ois cell index %d, rc %d\n", __func__, __LINE__,
-			val, rc);
+		CAM_DBG(CAM_SENSOR, " ois cell index %d, rc %d", val, rc);
 		if (rc < 0) {
-			pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
+			CAM_ERR(CAM_SENSOR, "failed %d",  rc);
 			of_node_put(src_node);
 			return rc;
 		}
@@ -66,13 +64,12 @@
 
 	src_node = of_parse_phandle(of_node, "eeprom-src", 0);
 	if (!src_node) {
-		CDBG("%s:%d eeprom src_node NULL\n", __func__, __LINE__);
+		CAM_DBG(CAM_SENSOR, "eeprom src_node NULL");
 	} else {
 		rc = of_property_read_u32(src_node, "cell-index", &val);
-		CDBG("%s:%d eeprom cell index %d, rc %d\n", __func__, __LINE__,
-			val, rc);
+		CAM_DBG(CAM_SENSOR, "eeprom cell index %d, rc %d", val, rc);
 		if (rc < 0) {
-			pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
+			CAM_ERR(CAM_SENSOR, "failed %d", rc);
 			of_node_put(src_node);
 			return rc;
 		}
@@ -82,13 +79,12 @@
 
 	src_node = of_parse_phandle(of_node, "led-flash-src", 0);
 	if (!src_node) {
-		CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+		CAM_DBG(CAM_SENSOR, " src_node NULL");
 	} else {
 		rc = of_property_read_u32(src_node, "cell-index", &val);
-		CDBG("%s:%d led flash cell index %d, rc %d\n", __func__,
-			__LINE__, val, rc);
+		CAM_DBG(CAM_SENSOR, "led flash cell index %d, rc %d", val, rc);
 		if (rc < 0) {
-			pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
+			CAM_ERR(CAM_SENSOR, "failed %d", rc);
 			of_node_put(src_node);
 			return rc;
 		}
@@ -98,8 +94,7 @@
 
 	rc = of_property_read_u32(of_node, "csiphy-sd-index", &val);
 	if (rc < 0)
-		pr_err("%s:%d :Error: paring the dt node for csiphy rc %d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_SENSOR, "paring the dt node for csiphy rc %d", rc);
 	else
 		sensor_info->subdev_id[SUB_MODULE_CSIPHY] = val;
 
@@ -120,15 +115,14 @@
 
 	rc = cam_soc_util_get_dt_properties(soc_info);
 	if (rc < 0) {
-		pr_err("%s:%d Failed to read DT properties rc %d",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_SENSOR, "Failed to read DT properties rc %d", rc);
 		goto FREE_SENSOR_DATA;
 	}
 
 	rc =  cam_sensor_util_init_gpio_pin_tbl(soc_info,
 			&sensordata->power_info.gpio_num_info);
 	if (rc < 0) {
-		pr_err("%s:%d Failed to read gpios %d", __func__, __LINE__, rc);
+		CAM_ERR(CAM_SENSOR, "Failed to read gpios %d", rc);
 		goto FREE_SENSOR_DATA;
 	}
 
@@ -136,8 +130,7 @@
 
 	/* Validate cell_id */
 	if (s_ctrl->id >= MAX_CAMERAS) {
-		pr_err("%s:%d Failed invalid cell_id %d", __func__, __LINE__,
-			s_ctrl->id);
+		CAM_ERR(CAM_SENSOR, "Failed invalid cell_id %d", s_ctrl->id);
 		rc = -EINVAL;
 		goto FREE_SENSOR_DATA;
 	}
@@ -145,16 +138,15 @@
 	/* Read subdev info */
 	rc = cam_sensor_get_sub_module_index(of_node, sensordata);
 	if (rc < 0) {
-		pr_err("%s:%d failed to get sub module index, rc=%d\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_SENSOR, "failed to get sub module index, rc=%d",
+			 rc);
 		goto FREE_SENSOR_DATA;
 	}
 
 	/* Get CCI master */
 	rc = of_property_read_u32(of_node, "cci-master",
 		&s_ctrl->cci_i2c_master);
-	CDBG("%s:%d cci-master %d, rc %d", __func__, __LINE__,
-		s_ctrl->cci_i2c_master, rc);
+	CAM_DBG(CAM_SENSOR, "cci-master %d, rc %d", s_ctrl->cci_i2c_master, rc);
 	if (rc < 0) {
 		/* Set default master 0 */
 		s_ctrl->cci_i2c_master = MASTER_0;
@@ -163,17 +155,17 @@
 
 	if (of_property_read_u32(of_node, "sensor-position-pitch",
 		&sensordata->pos_pitch) < 0) {
-		CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
+		CAM_DBG(CAM_SENSOR, "Invalid sensor position");
 		sensordata->pos_pitch = 360;
 	}
 	if (of_property_read_u32(of_node, "sensor-position-roll",
 		&sensordata->pos_roll) < 0) {
-		CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
+		CAM_DBG(CAM_SENSOR, "Invalid sensor position");
 		sensordata->pos_roll = 360;
 	}
 	if (of_property_read_u32(of_node, "sensor-position-yaw",
 		&sensordata->pos_yaw) < 0) {
-		CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
+		CAM_DBG(CAM_SENSOR, "Invalid sensor position");
 		sensordata->pos_yaw = 360;
 	}
 
@@ -188,13 +180,13 @@
 {
 	/* Validate input parameters */
 	if (!s_ctrl) {
-		pr_err("%s:%d failed: invalid params s_ctrl %pK\n", __func__,
-			__LINE__, s_ctrl);
+		CAM_ERR(CAM_SENSOR, "failed: invalid params s_ctrl %pK",
+			s_ctrl);
 		return -EINVAL;
 	}
 
-	CDBG("%s: %d master_type: %d\n", __func__, __LINE__,
-		s_ctrl->io_master_info.master_type);
+	CAM_DBG(CAM_SENSOR,
+		"master_type: %d", s_ctrl->io_master_info.master_type);
 	/* Initialize cci_client */
 	if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
 		s_ctrl->io_master_info.cci_client = kzalloc(sizeof(
@@ -203,8 +195,8 @@
 			return -ENOMEM;
 
 	} else {
-		pr_err("%s:%d Invalid master / Master type Not supported\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_SENSOR,
+			"Invalid master / Master type Not supported");
 		return -EINVAL;
 	}
 
@@ -219,30 +211,29 @@
 	/* Parse dt information and store in sensor control structure */
 	rc = cam_sensor_driver_get_dt_data(s_ctrl);
 	if (rc < 0) {
-		pr_err("%s:%d Failed to get dt data rc %d", __func__, __LINE__,
-			rc);
+		CAM_ERR(CAM_SENSOR, "Failed to get dt data rc %d", rc);
 		return rc;
 	}
 
 	/* Initialize mutex */
 	mutex_init(&(s_ctrl->cam_sensor_mutex));
 
-	CDBG("%s: %d\n", __func__, __LINE__);
+	CAM_DBG(CAM_SENSOR, "%s: %d");
 	/* Initialize default parameters */
 	for (i = 0; i < soc_info->num_clk; i++) {
 		soc_info->clk[i] = devm_clk_get(&soc_info->pdev->dev,
 					soc_info->clk_name[i]);
 		if (!soc_info->clk[i]) {
-			pr_err("%s:%d get failed for %s\n",
-				__func__, __LINE__, soc_info->clk_name[i]);
+			CAM_ERR(CAM_SENSOR, "get failed for %s",
+				 soc_info->clk_name[i]);
 			rc = -ENOENT;
 			return rc;
 		}
 	}
 	rc = msm_sensor_init_default_params(s_ctrl);
 	if (rc < 0) {
-		pr_err("%s;%d failed: msm_sensor_init_default_params rc %d",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_SENSOR,
+			"failed: msm_sensor_init_default_params rc %d", rc);
 		goto FREE_DT_DATA;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
index 4c3b8e8..6a0a0e1 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
@@ -6,4 +6,4 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
 
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io.o cam_sensor_cci_i2c.o cam_sensor_qup_i2c.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io.o cam_sensor_cci_i2c.o cam_sensor_qup_i2c.o cam_sensor_spi.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
index 40a69ef..915e2f7 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
@@ -14,9 +14,6 @@
 #include "cam_sensor_i2c.h"
 #include "cam_cci_dev.h"
 
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-
 int32_t cam_cci_i2c_read(struct cam_sensor_cci_client *cci_client,
 	uint32_t addr, uint32_t *data,
 	enum camera_sensor_i2c_type addr_type,
@@ -41,9 +38,10 @@
 	rc = v4l2_subdev_call(cci_client->cci_subdev,
 		core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
 	if (rc < 0) {
-		pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+		CAM_ERR(CAM_SENSOR, "line %d rc = %d", rc);
 		return rc;
 	}
+
 	rc = cci_ctrl.status;
 	if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
 		*data = buf[0];
@@ -58,6 +56,46 @@
 	return rc;
 }
 
+int32_t cam_camera_cci_i2c_read_seq(struct cam_sensor_cci_client *cci_client,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	uint32_t num_byte)
+{
+	int32_t                    rc = -EFAULT;
+	unsigned char             *buf = NULL;
+	int                        i = 0;
+	struct cam_cci_ctrl        cci_ctrl;
+
+	if ((addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+		|| (num_byte > I2C_REG_DATA_MAX)) {
+		CAM_ERR(CAM_SENSOR, "addr_type %d num_byte %d", addr_type,
+			num_byte);
+		return rc;
+	}
+
+	buf = kzalloc(num_byte, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	cci_ctrl.cmd = MSM_CCI_I2C_READ;
+	cci_ctrl.cci_info = cci_client;
+	cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
+	cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = addr_type;
+	cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
+	cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = num_byte;
+	cci_ctrl.status = -EFAULT;
+	rc = v4l2_subdev_call(cci_client->cci_subdev,
+		core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+	rc = cci_ctrl.status;
+	CAM_DBG(CAM_SENSOR, "addr = 0x%x, rc = %d", addr, rc);
+	for (i = 0; i < num_byte; i++) {
+		data[i] = buf[i];
+		CAM_DBG(CAM_SENSOR, "Byte %d: Data: 0x%x\n", i, data[i]);
+	}
+	kfree(buf);
+	return rc;
+}
+
 static int32_t cam_cci_i2c_write_table_cmd(
 	struct camera_io_master *client,
 	struct cam_sensor_i2c_reg_setting *write_setting,
@@ -85,7 +123,7 @@
 	rc = v4l2_subdev_call(client->cci_client->cci_subdev,
 		core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
 	if (rc < 0) {
-		pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+		CAM_ERR(CAM_SENSOR, "Failed rc = %d", rc);
 		return rc;
 	}
 	rc = cci_ctrl.status;
@@ -135,12 +173,12 @@
 	int32_t rc = -EINVAL;
 	int32_t i = 0;
 
-	CDBG("%s: addr: 0x%x data: 0x%x dt: %d\n",
-		__func__, addr, data, data_type);
+	CAM_DBG(CAM_SENSOR, "addr: 0x%x data: 0x%x dt: %d",
+		addr, data, data_type);
 
 	if (delay_ms > MAX_POLL_DELAY_MS) {
-		pr_err("%s:%d invalid delay = %d max_delay = %d\n",
-			__func__, __LINE__, delay_ms, MAX_POLL_DELAY_MS);
+		CAM_ERR(CAM_SENSOR, "invalid delay = %d max_delay = %d",
+			delay_ms, MAX_POLL_DELAY_MS);
 		return -EINVAL;
 	}
 	for (i = 0; i < delay_ms; i++) {
@@ -154,11 +192,10 @@
 
 	/* If rc is 1 then read is successful but poll is failure */
 	if (rc == 1)
-		pr_err("%s:%d poll failed rc=%d(non-fatal)\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_SENSOR, "poll failed rc=%d(non-fatal)",	rc);
 
 	if (rc < 0)
-		pr_err("%s:%d poll failed rc=%d\n", __func__, __LINE__, rc);
+		CAM_ERR(CAM_SENSOR, "poll failed rc=%d", rc);
 
 	return rc;
 }
@@ -169,13 +206,13 @@
 	int32_t rc = 0;
 	struct cam_cci_ctrl cci_ctrl;
 
-	CDBG("%s line %d\n", __func__, __LINE__);
+	CAM_DBG(CAM_SENSOR, "%s line %d");
 	cci_ctrl.cmd = cci_cmd;
 	cci_ctrl.cci_info = cci_client;
 	rc = v4l2_subdev_call(cci_client->cci_subdev,
 		core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
 	if (rc < 0) {
-		pr_err("%s line %d rc = %d\n", __func__, __LINE__, rc);
+		CAM_ERR(CAM_SENSOR, "Failed rc = %d", rc);
 		return rc;
 	}
 	return cci_ctrl.status;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
index 06e8104..6207a8a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
@@ -26,6 +26,8 @@
 #define I2C_COMPARE_MATCH 0
 #define I2C_COMPARE_MISMATCH 1
 
+#define I2C_REG_DATA_MAX       (8*1024)
+
 /**
  * @client: CCI client structure
  * @data: I2C data
@@ -41,6 +43,20 @@
 
 /**
  * @client: CCI client structure
+ * @addr: I2c address
+ * @data: I2C data
+ * @addr_type: I2c address type
+ * @num_byte: number of bytes
+ *
+ * This API handles CCI sequential read
+ */
+int32_t cam_camera_cci_i2c_read_seq(struct cam_sensor_cci_client *client,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	uint32_t num_byte);
+
+/**
+ * @client: CCI client structure
  * @cci_cmd: CCI command type
  *
  * This API handles CCI random write
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
index 3e1b331..9e38e1a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
@@ -10,9 +10,14 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt) "CAM-SENSOR_IO %s:%d " fmt, __func__, __LINE__
+
 #include "cam_sensor_io.h"
 #include "cam_sensor_i2c.h"
 
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
 int32_t camera_io_dev_poll(struct camera_io_master *io_master_info,
 	uint32_t addr, uint16_t data, uint32_t data_mask,
 	enum camera_sensor_i2c_type data_type,
@@ -22,7 +27,7 @@
 	int16_t mask = data_mask & 0xFF;
 
 	if (!io_master_info) {
-		pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "Invalid Args");
 		return -EINVAL;
 	}
 
@@ -34,8 +39,8 @@
 			addr, data, data_mask, addr_type, data_type,
 			delay_ms);
 	} else {
-		pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
-			__LINE__, io_master_info->master_type);
+		CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+			io_master_info->master_type);
 		return -EINVAL;
 	}
 }
@@ -46,7 +51,7 @@
 	enum camera_sensor_i2c_type data_type)
 {
 	if (!io_master_info) {
-		pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "Invalid Args");
 		return -EINVAL;
 	}
 
@@ -56,18 +61,44 @@
 	} else if (io_master_info->master_type == I2C_MASTER) {
 		return cam_qup_i2c_read(io_master_info->client,
 			addr, data, addr_type, data_type);
+	} else if (io_master_info->master_type == SPI_MASTER) {
+		return cam_spi_read(io_master_info,
+			addr, data, addr_type);
 	} else {
-		pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
-			__LINE__, io_master_info->master_type);
+		CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+			io_master_info->master_type);
 		return -EINVAL;
 	}
+	return 0;
+}
+
+int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type, int32_t num_bytes)
+{
+	if (io_master_info->master_type == CCI_MASTER) {
+		return cam_camera_cci_i2c_read_seq(io_master_info->cci_client,
+			addr, data, addr_type, num_bytes);
+	} else if (io_master_info->master_type == I2C_MASTER) {
+		return cam_qup_i2c_read_seq(io_master_info->client,
+			addr, data, addr_type, num_bytes);
+	} else if (io_master_info->master_type == SPI_MASTER) {
+		return cam_spi_read(io_master_info,
+			addr, (uint32_t *)data, addr_type);
+	} else {
+		CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+			io_master_info->master_type);
+		return -EINVAL;
+	}
+	return 0;
 }
 
 int32_t camera_io_dev_write(struct camera_io_master *io_master_info,
 	struct cam_sensor_i2c_reg_setting *write_setting)
 {
 	if (!write_setting || !io_master_info) {
-		pr_err("Input parameters not valid ws: %pK ioinfo: %pK",
+		CAM_ERR(CAM_SENSOR,
+			"Input parameters not valid ws: %pK ioinfo: %pK",
 			write_setting, io_master_info);
 		return -EINVAL;
 	}
@@ -79,8 +110,8 @@
 		return cam_qup_i2c_write_table(io_master_info,
 			write_setting);
 	} else {
-		pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
-			__LINE__, io_master_info->master_type);
+		CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+			io_master_info->master_type);
 		return -EINVAL;
 	}
 }
@@ -88,7 +119,7 @@
 int32_t camera_io_init(struct camera_io_master *io_master_info)
 {
 	if (!io_master_info) {
-		pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "Invalid Args");
 		return -EINVAL;
 	}
 
@@ -98,8 +129,8 @@
 		return cam_sensor_cci_i2c_util(io_master_info->cci_client,
 			MSM_CCI_INIT);
 	} else {
-		pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
-			__LINE__, io_master_info->master_type);
+		CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+			io_master_info->master_type);
 		return -EINVAL;
 	}
 }
@@ -107,7 +138,7 @@
 int32_t camera_io_release(struct camera_io_master *io_master_info)
 {
 	if (!io_master_info) {
-		pr_err("%s:%d Invalid Args\n", __func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "Invalid Args");
 		return -EINVAL;
 	}
 
@@ -115,8 +146,8 @@
 		return cam_sensor_cci_i2c_util(io_master_info->cci_client,
 			MSM_CCI_RELEASE);
 	} else {
-		pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
-			__LINE__, io_master_info->master_type);
+		CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+			io_master_info->master_type);
 		return -EINVAL;
 	}
 }
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
index f721afd..9a60fd0e 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
@@ -52,16 +52,15 @@
  * @io_master_info: I2C/SPI master information
  * @addr: I2C address
  * @data: I2C data
- * @addr_type: I2C addr type
+ * @data_type: I2C data type
  * @num_bytes: number of bytes
  *
- * This API abstracts sequential read functionality based on master type
+ * This API abstracts read functionality based on master type
  */
 int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info,
 	uint32_t addr, uint8_t *data,
 	enum camera_sensor_i2c_type addr_type,
-	uint32_t num_bytes);
-
+	int32_t num_bytes);
 
 /**
  * @io_master_info: I2C/SPI master information
@@ -103,4 +102,6 @@
 	enum camera_sensor_i2c_type addr_type,
 	uint32_t delay_ms);
 
+#include "cam_sensor_i2c.h"
+#include "cam_sensor_spi.h"
 #endif /* _CAM_SENSOR_IO_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
index b25b1855..b64e0d0 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
@@ -40,7 +40,7 @@
 	};
 	rc = i2c_transfer(dev_client->adapter, msgs, 2);
 	if (rc < 0)
-		pr_err("%s:failed 0x%x\n", __func__, saddr);
+		CAM_ERR(CAM_SENSOR, "%s:failed 0x%x", saddr);
 	return rc;
 }
 
@@ -61,7 +61,7 @@
 	};
 	rc = i2c_transfer(dev_client->client->adapter, msg, 1);
 	if (rc < 0)
-		pr_err("%s: failed 0x%x\n", __func__, saddr);
+		CAM_ERR(CAM_SENSOR, "failed 0x%x", saddr);
 	return rc;
 }
 
@@ -77,8 +77,7 @@
 		|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
 		|| data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
 		|| data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
-		pr_err("ERR: %s Failed with addr/data_type verfication\n",
-			__func__);
+		CAM_ERR(CAM_SENSOR, "Failed with addr/data_type verfication");
 		return rc;
 	}
 
@@ -105,7 +104,7 @@
 
 	rc = cam_qup_i2c_rxdata(client, buf, addr_type, data_type);
 	if (rc < 0) {
-		pr_err("%s fail\n", __func__);
+		CAM_ERR(CAM_SENSOR, "failed rc: %d", rc);
 		goto read_fail;
 	}
 
@@ -119,7 +118,7 @@
 		*data = buf[0] << 24 | buf[1] << 16 |
 			buf[2] << 8 | buf[3];
 
-	CDBG("%s addr = 0x%x data: 0x%x\n", __func__, addr, *data);
+	CAM_DBG(CAM_SENSOR, "addr = 0x%x data: 0x%x", addr, *data);
 read_fail:
 	kfree(buf);
 	buf = NULL;
@@ -137,14 +136,13 @@
 
 	if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
 		|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
-		pr_err("ERR: %s Failed with addr_type verification\n",
-			__func__);
+		CAM_ERR(CAM_SENSOR, "Failed with addr_type verification");
 		return rc;
 	}
 
 	if ((num_byte == 0) || (num_byte > I2C_REG_DATA_MAX)) {
-		pr_err("%s: Error num_byte:0x%x max supported:0x%x\n",
-			__func__, num_byte, I2C_REG_DATA_MAX);
+		CAM_ERR(CAM_SENSOR, "num_byte:0x%x max supported:0x%x",
+			num_byte, I2C_REG_DATA_MAX);
 		return rc;
 	}
 
@@ -170,7 +168,7 @@
 
 	rc = cam_qup_i2c_rxdata(client, buf, addr_type, num_byte);
 	if (rc < 0) {
-		pr_err("%s fail\n", __func__);
+		CAM_ERR(CAM_SENSOR, "failed rc: %d", rc);
 		goto read_seq_fail;
 	}
 
@@ -213,8 +211,8 @@
 	int i = 0;
 
 	if ((delay_ms > MAX_POLL_DELAY_MS) || (delay_ms == 0)) {
-		pr_err("%s:%d invalid delay = %d max_delay = %d\n",
-			__func__, __LINE__, delay_ms, MAX_POLL_DELAY_MS);
+		CAM_ERR(CAM_SENSOR, "invalid delay = %d max_delay = %d",
+			delay_ms, MAX_POLL_DELAY_MS);
 		return -EINVAL;
 	}
 
@@ -234,10 +232,9 @@
 	}
 	/* If rc is MISMATCH then read is successful but poll is failure */
 	if (rc == I2C_COMPARE_MISMATCH)
-		pr_err("%s:%d poll failed rc=%d(non-fatal)\n",
-			__func__, __LINE__, rc);
+		CAM_ERR(CAM_SENSOR, "poll failed rc=%d(non-fatal)", rc);
 	if (rc < 0)
-		pr_err("%s:%d poll failed rc=%d\n", __func__, __LINE__, rc);
+		CAM_ERR(CAM_SENSOR, "poll failed rc=%d", rc);
 
 	return rc;
 }
@@ -251,20 +248,17 @@
 	unsigned char buf[I2C_REG_MAX_BUF_SIZE];
 	uint8_t len = 0;
 
-	CDBG("%s reg addr = 0x%x data type: %d\n",
-			  __func__, reg_setting->reg_addr, data_type);
+	CAM_DBG(CAM_SENSOR, "reg addr = 0x%x data type: %d",
+			reg_setting->reg_addr, data_type);
 	if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
 		buf[0] = reg_setting->reg_addr;
-		CDBG("%s byte %d: 0x%x\n", __func__,
-			len, buf[len]);
+		CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len, buf[len]);
 		len = 1;
 	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
 		buf[0] = reg_setting->reg_addr >> 8;
 		buf[1] = reg_setting->reg_addr;
-		CDBG("%s byte %d: 0x%x\n", __func__,
-			len, buf[len]);
-		CDBG("%s byte %d: 0x%x\n", __func__,
-			len+1, buf[len+1]);
+		CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len, buf[len]);
+		CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len+1, buf[len+1]);
 		len = 2;
 	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
 		buf[0] = reg_setting->reg_addr >> 16;
@@ -278,47 +272,47 @@
 		buf[3] = reg_setting->reg_addr;
 		len = 4;
 	} else {
-		pr_err("%s: Invalid I2C addr type\n", __func__);
+		CAM_ERR(CAM_SENSOR, "Invalid I2C addr type");
 		return -EINVAL;
 	}
 
-	CDBG("Data: 0x%x\n", reg_setting->reg_data);
+	CAM_DBG(CAM_SENSOR, "Data: 0x%x", reg_setting->reg_data);
 	if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
 		buf[len] = reg_setting->reg_data;
-		CDBG("Byte %d: 0x%x\n", len, buf[len]);
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
 		len += 1;
 	} else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
 		buf[len] = reg_setting->reg_data >> 8;
 		buf[len+1] = reg_setting->reg_data;
-		CDBG("Byte %d: 0x%x\n", len, buf[len]);
-		CDBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+1, buf[len+1]);
 		len += 2;
 	} else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) {
 		buf[len] = reg_setting->reg_data >> 16;
 		buf[len + 1] = reg_setting->reg_data >> 8;
 		buf[len + 2] = reg_setting->reg_data;
-		CDBG("Byte %d: 0x%x\n", len, buf[len]);
-		CDBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
-		CDBG("Byte %d: 0x%x\n", len+2, buf[len+2]);
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+1, buf[len+1]);
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+2, buf[len+2]);
 		len += 3;
 	} else if (data_type == CAMERA_SENSOR_I2C_TYPE_DWORD) {
 		buf[len] = reg_setting->reg_data >> 24;
 		buf[len + 1] = reg_setting->reg_data >> 16;
 		buf[len + 2] = reg_setting->reg_data >> 8;
 		buf[len + 3] = reg_setting->reg_data;
-		CDBG("Byte %d: 0x%x\n", len, buf[len]);
-		CDBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
-		CDBG("Byte %d: 0x%x\n", len+2, buf[len+2]);
-		CDBG("Byte %d: 0x%x\n", len+3, buf[len+3]);
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+1, buf[len+1]);
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+2, buf[len+2]);
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+3, buf[len+3]);
 		len += 4;
 	} else {
-		pr_err("%s: Invalid Data Type\n", __func__);
+		CAM_ERR(CAM_SENSOR, "Invalid Data Type");
 		return -EINVAL;
 	}
 
 	rc = cam_qup_i2c_txdata(client, buf, len);
 	if (rc < 0)
-		pr_err("%s fail\n", __func__);
+		CAM_ERR(CAM_SENSOR, "failed rc: %d", rc);
 	return rc;
 }
 
@@ -341,7 +335,7 @@
 	reg_setting = write_setting->reg_setting;
 
 	for (i = 0; i < write_setting->size; i++) {
-		CDBG("%s addr 0x%x data 0x%x\n", __func__,
+		CAM_DBG(CAM_SENSOR, "addr 0x%x data 0x%x",
 			reg_setting->reg_addr, reg_setting->reg_data);
 
 		rc = cam_qup_i2c_write(client, reg_setting,
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c
new file mode 100644
index 0000000..e0b737e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c
@@ -0,0 +1,469 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_sensor_spi.h"
+#include "cam_debug_util.h"
+
+static int cam_spi_txfr(struct spi_device *spi, char *txbuf,
+	char *rxbuf, int num_byte)
+{
+	struct spi_transfer txfr;
+	struct spi_message msg;
+
+	memset(&txfr, 0, sizeof(txfr));
+	txfr.tx_buf = txbuf;
+	txfr.rx_buf = rxbuf;
+	txfr.len = num_byte;
+	spi_message_init(&msg);
+	spi_message_add_tail(&txfr, &msg);
+
+	return spi_sync(spi, &msg);
+}
+
+static int cam_spi_txfr_read(struct spi_device *spi, char *txbuf,
+	char *rxbuf, int txlen, int rxlen)
+{
+	struct spi_transfer tx;
+	struct spi_transfer rx;
+	struct spi_message m;
+
+	memset(&tx, 0, sizeof(tx));
+	memset(&rx, 0, sizeof(rx));
+	tx.tx_buf = txbuf;
+	rx.rx_buf = rxbuf;
+	tx.len = txlen;
+	rx.len = rxlen;
+	spi_message_init(&m);
+	spi_message_add_tail(&tx, &m);
+	spi_message_add_tail(&rx, &m);
+	return spi_sync(spi, &m);
+}
+
+/**
+ * cam_set_addr() - helper function to set transfer address
+ * @addr:	device address
+ * @addr_len:	the addr field length of an instruction
+ * @type:	type (i.e. byte-length) of @addr
+ * @str:	shifted address output, must be zeroed when passed in
+ *
+ * This helper function sets @str based on the addr field length of an
+ * instruction and the data length.
+ */
+static void cam_set_addr(uint32_t addr, uint8_t addr_len,
+	enum camera_sensor_i2c_type type,
+	char *str)
+{
+	int i, len;
+
+	if (!addr_len)
+		return;
+
+	if (addr_len < type)
+		CAM_DBG(CAM_EEPROM, "omitting higher bits in address");
+
+	/* only support transfer MSB first for now */
+	len = addr_len - type;
+	for (i = len; i < addr_len; i++) {
+		if (i >= 0)
+			str[i] = (addr >> (BITS_PER_BYTE * (addr_len - i - 1)))
+				& 0xFF;
+	}
+
+}
+
+/**
+ * cam_spi_tx_helper() - wrapper for SPI transaction
+ * @client:     io client
+ * @inst:       inst of this transaction
+ * @addr:       device addr following the inst
+ * @data:       output byte array (could be NULL)
+ * @num_byte:   size of @data
+ * @tx, rx:     optional transfer buffer.  It must be at least header
+ *              + @num_byte long.
+ *
+ * This is the core function for SPI transaction, except for writes.  It first
+ * checks address type, then allocates required memory for tx/rx buffers.
+ * It sends out <opcode><addr>, and optionally receives @num_byte of response,
+ * if @data is not NULL.  This function does not check for wait conditions,
+ * and will return immediately once bus transaction finishes.
+ *
+ * This function will allocate buffers of header + @num_byte long.  For
+ * large transfers, the allocation could fail.  External buffer @tx, @rx
+ * should be passed in to bypass allocation.  The size of buffer should be
+ * at least header + num_byte long.  Since buffer is managed externally,
+ * @data will be ignored, and read results will be in @rx.
+ * @tx, @rx also can be used for repeated transfers to improve performance.
+ */
+static int32_t cam_spi_tx_helper(struct camera_io_master *client,
+	struct cam_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+	uint32_t num_byte, char *tx, char *rx)
+{
+	int32_t rc = -EINVAL;
+	struct spi_device *spi = client->spi_client->spi_master;
+	char *ctx = NULL, *crx = NULL;
+	uint32_t len, hlen;
+	uint8_t retries = client->spi_client->retries;
+	enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
+
+	if (addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+		return rc;
+
+	hlen = cam_camera_spi_get_hlen(inst);
+	len = hlen + num_byte;
+
+	if (tx) {
+		ctx = tx;
+	} else {
+		ctx = kzalloc(len, GFP_KERNEL | GFP_DMA);
+		if (!ctx)
+			return -ENOMEM;
+	}
+
+	if (num_byte) {
+		if (rx) {
+			crx = rx;
+		} else {
+			crx = kzalloc(len, GFP_KERNEL | GFP_DMA);
+			if (!crx) {
+				if (!tx)
+					kfree(ctx);
+				return -ENOMEM;
+			}
+		}
+	} else {
+		crx = NULL;
+	}
+
+	ctx[0] = inst->opcode;
+	cam_set_addr(addr, inst->addr_len, addr_type, ctx + 1);
+	while ((rc = cam_spi_txfr(spi, ctx, crx, len)) && retries) {
+		retries--;
+		msleep(client->spi_client->retry_delay);
+	}
+	if (rc < 0) {
+		CAM_ERR(CAM_EEPROM, "failed: spi txfr rc %d", rc);
+		goto out;
+	}
+	if (data && num_byte && !rx)
+		memcpy(data, crx + hlen, num_byte);
+
+out:
+	if (!tx)
+		kfree(ctx);
+	if (!rx)
+		kfree(crx);
+	return rc;
+}
+
+static int32_t cam_spi_tx_read(struct camera_io_master *client,
+	struct cam_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+	uint32_t num_byte, char *tx, char *rx)
+{
+	int32_t rc = -EINVAL;
+	struct spi_device *spi = client->spi_client->spi_master;
+	char *ctx = NULL, *crx = NULL;
+	uint32_t hlen;
+	uint8_t retries = client->spi_client->retries;
+	enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
+
+	if ((addr_type != CAMERA_SENSOR_I2C_TYPE_WORD)
+		&& (addr_type != CAMERA_SENSOR_I2C_TYPE_BYTE)
+		&& (addr_type != CAMERA_SENSOR_I2C_TYPE_3B))
+		return rc;
+
+	hlen = cam_camera_spi_get_hlen(inst);
+	if (tx) {
+		ctx = tx;
+	} else {
+		ctx = kzalloc(hlen, GFP_KERNEL | GFP_DMA);
+		if (!ctx)
+			return -ENOMEM;
+	}
+	if (num_byte) {
+		if (rx) {
+			crx = rx;
+		} else {
+			crx = kzalloc(num_byte, GFP_KERNEL | GFP_DMA);
+			if (!crx) {
+				if (!tx)
+					kfree(ctx);
+				return -ENOMEM;
+			}
+		}
+	} else {
+		crx = NULL;
+	}
+
+	ctx[0] = inst->opcode;
+	if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		cam_set_addr(addr, inst->addr_len, addr_type,
+			ctx + 1);
+	} else {
+		ctx[1] = (addr >> BITS_PER_BYTE) & 0xFF;
+		ctx[2] = (addr & 0xFF);
+		ctx[3] = 0;
+	}
+	CAM_DBG(CAM_EEPROM, "tx(%u): %02x %02x %02x %02x", hlen, ctx[0],
+		ctx[1], ctx[2],	ctx[3]);
+	while ((rc = cam_spi_txfr_read(spi, ctx, crx, hlen, num_byte))
+			&& retries) {
+		retries--;
+		msleep(client->spi_client->retry_delay);
+	}
+	if (rc < 0) {
+		pr_err("%s: failed %d\n", __func__, rc);
+		goto out;
+	}
+	if (data && num_byte && !rx)
+		memcpy(data, crx, num_byte);
+out:
+	if (!tx)
+		kfree(ctx);
+	if (!rx)
+		kfree(crx);
+	return rc;
+}
+
+int cam_spi_read(struct camera_io_master *client,
+	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type data_type)
+{
+	int rc = -EINVAL;
+	uint8_t temp[CAMERA_SENSOR_I2C_TYPE_MAX];
+
+	if ((data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+		|| (data_type >= CAMERA_SENSOR_I2C_TYPE_MAX))
+		return rc;
+
+	rc = cam_spi_tx_read(client,
+		&client->spi_client->cmd_tbl.read, addr, &temp[0],
+		data_type, NULL, NULL);
+	if (rc < 0) {
+		pr_err("%s: failed %d\n", __func__, rc);
+		return rc;
+	}
+
+	if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
+		*data = temp[0];
+	else
+		*data = (temp[0] << BITS_PER_BYTE) | temp[1];
+
+	CAM_DBG(CAM_SENSOR, "addr 0x%x, data %u\n", addr, *data);
+	return rc;
+}
+
+int cam_spi_query_id(struct camera_io_master *client,
+	uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+	return cam_spi_tx_helper(client,
+		&client->spi_client->cmd_tbl.query_id, addr, data, num_byte,
+		NULL, NULL);
+}
+
+static int32_t cam_spi_read_status_reg(
+	struct camera_io_master *client, uint8_t *status)
+{
+	struct cam_camera_spi_inst *rs =
+		&client->spi_client->cmd_tbl.read_status;
+
+	if (rs->addr_len != 0) {
+		pr_err("%s: not implemented yet\n", __func__);
+		return -EINVAL;
+	}
+	return cam_spi_tx_helper(client, rs, 0, status, 1, NULL, NULL);
+}
+
+static int32_t cam_spi_device_busy(struct camera_io_master *client,
+	uint8_t *busy)
+{
+	int rc;
+	uint8_t st = 0;
+
+	rc = cam_spi_read_status_reg(client,  &st);
+	if (rc < 0) {
+		pr_err("%s: failed to read status reg\n", __func__);
+		return rc;
+	}
+	*busy = st & client->spi_client->busy_mask;
+	return 0;
+}
+
+static int32_t cam_spi_wait(struct camera_io_master *client,
+	struct cam_camera_spi_inst *inst)
+{
+	uint8_t busy;
+	int i, rc;
+
+	CAM_DBG(CAM_SENSOR, "op 0x%x wait start", inst->opcode);
+	for (i = 0; i < inst->delay_count; i++) {
+		rc = cam_spi_device_busy(client, &busy);
+		if (rc < 0)
+			return rc;
+		if (!busy)
+			break;
+		msleep(inst->delay_intv);
+		CAM_DBG(CAM_SENSOR, "op 0x%x wait", inst->opcode);
+	}
+	if (i > inst->delay_count) {
+		pr_err("%s: op %x timed out\n", __func__, inst->opcode);
+		return -ETIMEDOUT;
+	}
+	CAM_DBG(CAM_SENSOR, "op %x finished", inst->opcode);
+	return 0;
+}
+
+static int32_t cam_spi_write_enable(
+	struct camera_io_master *client)
+{
+	struct cam_camera_spi_inst *we =
+		&client->spi_client->cmd_tbl.write_enable;
+	int rc;
+
+	if (we->opcode == 0)
+		return 0;
+	if (we->addr_len != 0) {
+		pr_err("%s: not implemented yet\n", __func__);
+		return -EINVAL;
+	}
+	rc = cam_spi_tx_helper(client, we, 0, NULL, 0, NULL, NULL);
+	if (rc < 0)
+		pr_err("%s: write enable failed\n", __func__);
+	return rc;
+}
+
+/**
+ * cam_spi_page_program() - core function to perform write
+ * @client: need for obtaining SPI device
+ * @addr: address to program on device
+ * @data: data to write
+ * @len: size of data
+ * @tx: tx buffer, size >= header + len
+ *
+ * This function performs SPI write, and has no boundary check.  Writing range
+ * should not cross page boundary, or data will be corrupted.  Transaction is
+ * guaranteed to be finished when it returns.  This function should never be
+ * used outside cam_spi_write_seq().
+ */
+static int32_t cam_spi_page_program(struct camera_io_master *client,
+	uint32_t addr, uint8_t *data, uint16_t len, uint8_t *tx)
+{
+	int rc;
+	struct cam_camera_spi_inst *pg =
+		&client->spi_client->cmd_tbl.page_program;
+	struct spi_device *spi = client->spi_client->spi_master;
+	uint8_t retries = client->spi_client->retries;
+	uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+	enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
+
+	CAM_DBG(CAM_SENSOR, "addr 0x%x, size 0x%x", addr, len);
+	rc = cam_spi_write_enable(client);
+	if (rc < 0)
+		return rc;
+	memset(tx, 0, header_len);
+	tx[0] = pg->opcode;
+	cam_set_addr(addr, pg->addr_len, addr_type, tx + 1);
+	memcpy(tx + header_len, data, len);
+	CAM_DBG(CAM_SENSOR, "tx(%u): %02x %02x %02x %02x\n",
+		len, tx[0], tx[1], tx[2], tx[3]);
+	while ((rc = spi_write(spi, tx, len + header_len)) && retries) {
+		rc = cam_spi_wait(client, pg);
+		msleep(client->spi_client->retry_delay);
+		retries--;
+	}
+	if (rc < 0) {
+		pr_err("%s: failed %d\n", __func__, rc);
+		return rc;
+	}
+	rc = cam_spi_wait(client, pg);
+		return rc;
+}
+
+int cam_spi_write(struct camera_io_master *client,
+	uint32_t addr, uint16_t data,
+	enum camera_sensor_i2c_type data_type)
+{
+	struct cam_camera_spi_inst *pg =
+		&client->spi_client->cmd_tbl.page_program;
+	uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+	uint16_t len = 0;
+	char buf[2];
+	char *tx;
+	int rc = -EINVAL;
+	enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
+
+	if ((addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+		|| (data_type != CAMERA_SENSOR_I2C_TYPE_BYTE
+		&& data_type != CAMERA_SENSOR_I2C_TYPE_WORD))
+		return rc;
+	CAM_DBG(CAM_EEPROM, "Data: 0x%x", data);
+	len = header_len + (uint8_t)data_type;
+	tx = kmalloc(len, GFP_KERNEL | GFP_DMA);
+	if (!tx)
+		goto NOMEM;
+	if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+		buf[0] = data;
+		CAM_DBG(CAM_EEPROM, "Byte %d: 0x%x", len, buf[0]);
+	} else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+		buf[0] = (data >> BITS_PER_BYTE) & 0x00FF;
+		buf[1] = (data & 0x00FF);
+	}
+	rc = cam_spi_page_program(client, addr, buf,
+		(uint16_t)data_type, tx);
+	if (rc < 0)
+		goto ERROR;
+	goto OUT;
+NOMEM:
+	pr_err("%s: memory allocation failed\n", __func__);
+	return -ENOMEM;
+ERROR:
+	pr_err("%s: error write\n", __func__);
+OUT:
+	kfree(tx);
+	return rc;
+}
+
+int cam_spi_write_table(struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting)
+{
+	int i;
+	int rc = -EFAULT;
+	struct cam_sensor_i2c_reg_array *reg_setting;
+	uint16_t client_addr_type;
+	enum camera_sensor_i2c_type addr_type;
+
+	if (!client || !write_setting)
+		return rc;
+	if (write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+		|| (write_setting->data_type != CAMERA_SENSOR_I2C_TYPE_BYTE
+		&& write_setting->data_type != CAMERA_SENSOR_I2C_TYPE_WORD))
+		return rc;
+	reg_setting = write_setting->reg_setting;
+	client_addr_type = addr_type;
+	addr_type = write_setting->addr_type;
+	for (i = 0; i < write_setting->size; i++) {
+		CAM_DBG(CAM_SENSOR, "addr %x data %x",
+			reg_setting->reg_addr, reg_setting->reg_data);
+		rc = cam_spi_write(client, reg_setting->reg_addr,
+			reg_setting->reg_data, write_setting->data_type);
+		if (rc < 0)
+			break;
+		reg_setting++;
+	}
+		if (write_setting->delay > 20)
+			msleep(write_setting->delay);
+		else if (write_setting->delay)
+			usleep_range(write_setting->delay * 1000,
+			(write_setting->delay
+			* 1000) + 1000);
+	addr_type = client_addr_type;
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h
new file mode 100644
index 0000000..a497491
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h
@@ -0,0 +1,93 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SENSOR_SPI_H_
+#define _CAM_SENSOR_SPI_H_
+
+#include <linux/spi/spi.h>
+#include <media/cam_sensor.h>
+#include "cam_sensor_i2c.h"
+
+#define MAX_SPI_SIZE 110
+#define SPI_DYNAMIC_ALLOC
+
+struct cam_camera_spi_inst {
+	uint8_t opcode;
+	uint8_t addr_len;
+	uint8_t dummy_len;
+	uint8_t delay_intv;
+	uint8_t delay_count;
+};
+
+struct cam_spi_write_burst_data {
+	u8 data_msb;
+	u8 data_lsb;
+};
+
+struct cam_spi_write_burst_packet {
+	u8 cmd;
+	u8 addr_msb;
+	u8 addr_lsb;
+	struct cam_spi_write_burst_data data_arr[MAX_SPI_SIZE];
+};
+
+struct cam_camera_burst_info {
+	uint32_t burst_addr;
+	uint32_t burst_start;
+	uint32_t burst_len;
+	uint32_t chunk_size;
+};
+
+struct cam_camera_spi_inst_tbl {
+	struct cam_camera_spi_inst read;
+	struct cam_camera_spi_inst read_seq;
+	struct cam_camera_spi_inst query_id;
+	struct cam_camera_spi_inst page_program;
+	struct cam_camera_spi_inst write_enable;
+	struct cam_camera_spi_inst read_status;
+	struct cam_camera_spi_inst erase;
+};
+
+struct cam_sensor_spi_client {
+	struct spi_device *spi_master;
+	struct cam_camera_spi_inst_tbl cmd_tbl;
+	uint8_t device_id0;
+	uint8_t device_id1;
+	uint8_t mfr_id0;
+	uint8_t mfr_id1;
+	uint8_t retry_delay;
+	uint8_t retries;
+	uint8_t busy_mask;
+	uint16_t page_size;
+	uint32_t erase_size;
+};
+static __always_inline
+uint16_t cam_camera_spi_get_hlen(struct cam_camera_spi_inst *inst)
+{
+	return sizeof(inst->opcode) + inst->addr_len + inst->dummy_len;
+}
+
+int cam_spi_read(struct camera_io_master *client,
+	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type data_type);
+
+int cam_spi_query_id(struct camera_io_master *client,
+	uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int cam_spi_write(struct camera_io_master *client,
+	uint32_t addr, uint16_t data,
+	enum camera_sensor_i2c_type data_type);
+
+int cam_spi_write_table(struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting);
+
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
index d12ff2b..6520042b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
@@ -31,6 +31,8 @@
 #define CAM_SENSOR_NAME    "cam-sensor"
 #define CAM_ACTUATOR_NAME  "cam-actuator"
 #define CAM_CSIPHY_NAME    "cam-csiphy"
+#define CAM_FLASH_NAME     "cam-flash"
+#define CAM_EEPROM_NAME    "cam-eeprom"
 
 #define MAX_SYSTEM_PIPELINE_DELAY 2
 
@@ -47,6 +49,11 @@
 	CAMERA_SENSOR_CMD_TYPE_I2C_CONT_WR,
 	CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD,
 	CAMERA_SENSOR_CMD_TYPE_WAIT,
+	CAMERA_SENSOR_FLASH_CMD_TYPE_INIT,
+	CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE,
+	CAMERA_SENSOR_FLASH_CMD_TYPE_RER,
+	CAMERA_SENSOR_FLASH_CMD_TYPE_QUERYCURR,
+	CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET,
 	CAMERA_SENSOR_CMD_TYPE_MAX,
 };
 
@@ -69,6 +76,14 @@
 	CAMERA_SENSOR_WAIT_OP_MAX,
 };
 
+enum camera_flash_opcode {
+	CAMERA_SENSOR_FLASH_OP_INVALID,
+	CAMERA_SENSOR_FLASH_OP_OFF,
+	CAMERA_SENSOR_FLASH_OP_FIRELOW,
+	CAMERA_SENSOR_FLASH_OP_FIREHIGH,
+	CAMERA_SENSOR_FLASH_OP_MAX,
+};
+
 enum camera_sensor_i2c_type {
 	CAMERA_SENSOR_I2C_TYPE_INVALID,
 	CAMERA_SENSOR_I2C_TYPE_BYTE,
@@ -146,6 +161,10 @@
 	CAM_ACTUATOR_PACKET_MANUAL_MOVE_LENS
 };
 
+enum cam_eeprom_packet_opcodes {
+	CAM_EEPROM_PACKET_OPCODE_INIT
+};
+
 enum msm_bus_perf_setting {
 	S_INIT,
 	S_PREVIEW,
@@ -166,6 +185,12 @@
 	MSM_CAMERA_SPI_DEVICE,
 };
 
+enum cam_flash_device_type {
+	CAMERA_FLASH_DEVICE_TYPE_PMIC = 0,
+	CAMERA_FLASH_DEVICE_TYPE_I2C,
+	CAMERA_FLASH_DEVICE_TYPE_GPIO,
+};
+
 enum cci_i2c_master_t {
 	MASTER_0,
 	MASTER_1,
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
index 9f16e93..06590e4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -12,6 +12,7 @@
 
 #include <linux/kernel.h>
 #include "cam_sensor_util.h"
+#include <cam_mem_mgr.h>
 
 #define CAM_SENSOR_PINCTRL_STATE_SLEEP "cam_suspend"
 #define CAM_SENSOR_PINCTRL_STATE_DEFAULT "cam_default"
@@ -19,9 +20,6 @@
 #define VALIDATE_VOLTAGE(min, max, config_val) ((config_val) && \
 	(config_val >= min) && (config_val <= max))
 
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-
 static struct i2c_settings_list*
 	cam_sensor_get_i2c_ptr(struct i2c_settings_array *i2c_reg_settings,
 		uint32_t size)
@@ -56,8 +54,7 @@
 	int32_t rc = 0;
 
 	if (i2c_array == NULL) {
-		pr_err("%s:%d ::FATAL:: Invalid argument\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "FATAL:: Invalid argument");
 		return -EINVAL;
 	}
 
@@ -86,8 +83,7 @@
 	struct i2c_settings_list *i2c_list = NULL;
 
 	if (i2c_list == NULL) {
-		pr_err("%s:%d Invalid list ptr\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "Invalid list ptr");
 		return -EINVAL;
 	}
 
@@ -109,8 +105,7 @@
 			sizeof(
 			struct cam_cmd_unconditional_wait);
 	} else {
-		pr_err("%s: %d Error: Delay Rxed Before any buffer: %d\n",
-			__func__, __LINE__, offset);
+		CAM_ERR(CAM_SENSOR, "Delay Rxed Before any buffer: %d", offset);
 		return -EINVAL;
 	}
 
@@ -131,8 +126,7 @@
 	i2c_list =
 		cam_sensor_get_i2c_ptr(i2c_reg_settings, 1);
 	if (!i2c_list || !i2c_list->i2c_settings.reg_setting) {
-		pr_err("%s: %d Failed in allocating mem for list\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "Failed in allocating mem for list");
 		return -ENOMEM;
 	}
 
@@ -171,8 +165,7 @@
 		cam_cmd_i2c_random_wr->header.count);
 	if (i2c_list == NULL ||
 		i2c_list->i2c_settings.reg_setting == NULL) {
-		pr_err("%s: %d Failed in allocating i2c_list\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "Failed in allocating i2c_list");
 		return -ENOMEM;
 	}
 
@@ -234,8 +227,8 @@
 		 * be spread across multiple cmd buffers
 		 */
 
-		CDBG("%s:%d Total cmd Buf in Bytes: %d\n", __func__,
-			__LINE__, cmd_desc[i].length);
+		CAM_DBG(CAM_SENSOR, "Total cmd Buf in Bytes: %d",
+			cmd_desc[i].length);
 
 		if (!cmd_desc[i].length)
 			continue;
@@ -244,10 +237,9 @@
 			(uint64_t *)&generic_ptr, &len_of_buff);
 		cmd_buf = (uint32_t *)generic_ptr;
 		if (rc < 0) {
-			pr_err("%s:%d Failed in getting cmd hdl: %d Err: %d Buffer Len: %ld\n",
-				__func__, __LINE__,
-				cmd_desc[i].mem_handle, rc,
-				len_of_buff);
+			CAM_ERR(CAM_SENSOR,
+				"cmd hdl failed:%d, Err: %d, Buffer_len: %ld",
+				cmd_desc[i].mem_handle, rc, len_of_buff);
 			return rc;
 		}
 		cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
@@ -267,8 +259,8 @@
 					i2c_reg_settings,
 					&cmd_length_in_bytes, &j, &list);
 				if (rc < 0) {
-					pr_err("%s:%d :Error: Failed in random read %d\n",
-						__func__, __LINE__, rc);
+					CAM_ERR(CAM_SENSOR,
+						"Failed in random read %d", rc);
 					return rc;
 				}
 
@@ -288,8 +280,9 @@
 						i2c_reg_settings, j, &byte_cnt,
 						list);
 					if (rc < 0) {
-						pr_err("%s:%d :Error: Failed in handling delay %d\n",
-							__func__, __LINE__, rc);
+						CAM_ERR(CAM_SENSOR,
+							"delay hdl failed: %d",
+							rc);
 						return rc;
 					}
 
@@ -299,21 +292,22 @@
 						&cmd_buf, i2c_reg_settings,
 						&byte_cnt, &j, &list);
 					if (rc < 0) {
-						pr_err("%s:%d :Error: Failed in random read %d\n",
-							__func__, __LINE__, rc);
+						CAM_ERR(CAM_SENSOR,
+							"Random read fail: %d",
+							rc);
 						return rc;
 					}
 				} else {
-					pr_err("%s: %d Wrong Wait Command: %d\n",
-						__func__, __LINE__,
+					CAM_ERR(CAM_SENSOR,
+						"Wrong Wait Command: %d",
 						generic_op_code);
 					return -EINVAL;
 				}
 				break;
 			}
 			default:
-				pr_err("%s:%d Invalid Command Type:%d\n",
-					__func__, __LINE__, cmm_hdr->cmd_type);
+				CAM_ERR(CAM_SENSOR, "Invalid Command Type:%d",
+					 cmm_hdr->cmd_type);
 				return -EINVAL;
 			}
 		}
@@ -333,20 +327,18 @@
 
 	/* Validate input parameters */
 	if (!soc_info || !power_setting) {
-		pr_err("%s:%d failed: soc_info %pK power_setting %pK", __func__,
-			__LINE__,  soc_info, power_setting);
+		CAM_ERR(CAM_SENSOR, "failed: soc_info %pK power_setting %pK",
+			soc_info, power_setting);
 		return -EINVAL;
 	}
 
 	num_vreg = soc_info->num_rgltr;
 
 	if (num_vreg <= 0) {
-		pr_err("%s:%d failed: num_vreg %d", __func__, __LINE__,
-			num_vreg);
+		CAM_ERR(CAM_SENSOR, "failed: num_vreg %d", num_vreg);
 		return -EINVAL;
 	}
 
-
 	for (i = 0; i < power_setting_size; i++) {
 		switch (power_setting[i].seq_type) {
 		case SENSOR_VDIG:
@@ -354,8 +346,8 @@
 				if (!strcmp(soc_info->rgltr_name[j],
 					"cam_vdig")) {
 
-					CDBG("%s:%d i %d j %d cam_vdig\n",
-						__func__, __LINE__, i, j);
+					CAM_DBG(CAM_SENSOR,
+						"i: %d j: %d cam_vdig", i, j);
 					power_setting[i].seq_val = j;
 
 					if (VALIDATE_VOLTAGE(
@@ -378,8 +370,8 @@
 
 				if (!strcmp(soc_info->rgltr_name[j],
 					"cam_vio")) {
-					CDBG("%s:%d i %d j %d cam_vio\n",
-						__func__, __LINE__, i, j);
+					CAM_DBG(CAM_SENSOR,
+						"i: %d j: %d cam_vio", i, j);
 					power_setting[i].seq_val = j;
 
 					if (VALIDATE_VOLTAGE(
@@ -403,8 +395,8 @@
 
 				if (!strcmp(soc_info->rgltr_name[j],
 					"cam_vana")) {
-					CDBG("%s:%d i %d j %d cam_vana\n",
-						__func__, __LINE__, i, j);
+					CAM_DBG(CAM_SENSOR,
+						"i: %d j: %d cam_vana", i, j);
 					power_setting[i].seq_val = j;
 
 					if (VALIDATE_VOLTAGE(
@@ -428,8 +420,8 @@
 
 				if (!strcmp(soc_info->rgltr_name[j],
 					"cam_vaf")) {
-					CDBG("%s:%d i %d j %d cam_vaf\n",
-						__func__, __LINE__, i, j);
+					CAM_DBG(CAM_SENSOR,
+						"i: %d j: %d cam_vaf", i, j);
 					power_setting[i].seq_val = j;
 
 					if (VALIDATE_VOLTAGE(
@@ -454,8 +446,8 @@
 
 				if (!strcmp(soc_info->rgltr_name[j],
 					"cam_v_custom1")) {
-					CDBG("%s:%d i %d j %d cam_vcustom1\n",
-						__func__, __LINE__, i, j);
+					CAM_DBG(CAM_SENSOR,
+						"i:%d j:%d cam_vcustom1", i, j);
 					power_setting[i].seq_val = j;
 
 					if (VALIDATE_VOLTAGE(
@@ -478,8 +470,8 @@
 
 				if (!strcmp(soc_info->rgltr_name[j],
 					"cam_v_custom2")) {
-					CDBG("%s:%d i %d j %d cam_vcustom2\n",
-						__func__, __LINE__, i, j);
+					CAM_DBG(CAM_SENSOR,
+						"i:%d j:%d cam_vcustom2", i, j);
 					power_setting[i].seq_val = j;
 
 					if (VALIDATE_VOLTAGE(
@@ -498,8 +490,8 @@
 			break;
 
 		default: {
-			pr_err("%s:%d invalid seq_val %d\n", __func__,
-				__LINE__, power_setting[i].seq_val);
+			CAM_ERR(CAM_SENSOR, "invalid seq_val %d",
+				power_setting[i].seq_val);
 			break;
 			}
 		}
@@ -520,19 +512,19 @@
 	size = gpio_conf->cam_gpio_req_tbl_size;
 
 	if (gpio_conf->cam_gpio_common_tbl_size <= 0) {
-		pr_info("%s:%d No GPIO entry\n", __func__, __LINE__);
+		CAM_INFO(CAM_SENSOR, "No GPIO entry");
 		return 0;
 	}
 
 	if (!gpio_tbl || !size) {
-		pr_err("%s:%d invalid gpio_tbl %pK / size %d\n", __func__,
-			__LINE__, gpio_tbl, size);
+		CAM_ERR(CAM_SENSOR, "invalid gpio_tbl %pK / size %d",
+			gpio_tbl, size);
 		return -EINVAL;
 	}
 
 	for (i = 0; i < size; i++) {
-		CDBG("%s:%d i %d, gpio %d dir %ld\n", __func__, __LINE__, i,
-				gpio_tbl[i].gpio, gpio_tbl[i].flags);
+		CAM_DBG(CAM_SENSOR, "i: %d, gpio %d dir %ld",  i,
+			gpio_tbl[i].gpio, gpio_tbl[i].flags);
 	}
 
 	if (gpio_en) {
@@ -545,8 +537,7 @@
 				 * apply new gpios, outout a error message
 				 * for driver bringup debug
 				 */
-				pr_err("%s:%d gpio %d:%s request fails\n",
-					__func__, __LINE__,
+				CAM_ERR(CAM_SENSOR, "gpio %d:%s request fails",
 					gpio_tbl[i].gpio, gpio_tbl[i].label);
 			}
 		}
@@ -557,6 +548,308 @@
 	return rc;
 }
 
+int32_t cam_sensor_update_power_settings(void *cmd_buf,
+	int cmd_length, struct cam_sensor_power_ctrl_t *power_info)
+{
+	int32_t rc = 0, tot_size = 0, last_cmd_type = 0;
+	int32_t i = 0, pwr_up = 0, pwr_down = 0;
+	void *ptr = cmd_buf, *scr;
+	struct cam_cmd_power *pwr_cmd = (struct cam_cmd_power *)cmd_buf;
+	struct common_header *cmm_hdr = (struct common_header *)cmd_buf;
+
+	if (!pwr_cmd || !cmd_length) {
+		CAM_ERR(CAM_SENSOR, "Invalid Args: pwr_cmd %pK, cmd_length: %d",
+			pwr_cmd, cmd_length);
+		return -EINVAL;
+	}
+
+	power_info->power_setting_size = 0;
+	power_info->power_setting =
+		(struct cam_sensor_power_setting *)
+		kzalloc(sizeof(struct cam_sensor_power_setting) *
+			MAX_POWER_CONFIG, GFP_KERNEL);
+	if (!power_info->power_setting)
+		return -ENOMEM;
+
+	power_info->power_down_setting_size = 0;
+	power_info->power_down_setting =
+		(struct cam_sensor_power_setting *)
+		kzalloc(sizeof(struct cam_sensor_power_setting) *
+			MAX_POWER_CONFIG, GFP_KERNEL);
+	if (!power_info->power_down_setting) {
+		rc = -ENOMEM;
+		goto free_power_settings;
+	}
+
+	while (tot_size < cmd_length) {
+		if (cmm_hdr->cmd_type ==
+			CAMERA_SENSOR_CMD_TYPE_PWR_UP) {
+			struct cam_cmd_power *pwr_cmd =
+				(struct cam_cmd_power *)ptr;
+
+			power_info->
+				power_setting_size +=
+				pwr_cmd->count;
+			scr = ptr + sizeof(struct cam_cmd_power);
+			tot_size = tot_size + sizeof(struct cam_cmd_power);
+
+			if (pwr_cmd->count == 0)
+				CAM_DBG(CAM_SENSOR, "Un expected Command");
+
+			for (i = 0; i < pwr_cmd->count; i++, pwr_up++) {
+				power_info->
+					power_setting[pwr_up].seq_type =
+					pwr_cmd->power_settings[i].
+						power_seq_type;
+				power_info->
+					power_setting[pwr_up].config_val =
+					pwr_cmd->power_settings[i].
+						config_val_low;
+				power_info->power_setting[pwr_up].delay = 0;
+				if (i) {
+					scr = scr +
+						sizeof(
+						struct cam_power_settings);
+					tot_size = tot_size +
+						sizeof(
+						struct cam_power_settings);
+				}
+				if (tot_size > cmd_length) {
+					CAM_ERR(CAM_SENSOR,
+						"Error: Cmd Buffer is wrong");
+					rc = -EINVAL;
+					goto free_power_down_settings;
+				}
+				CAM_DBG(CAM_SENSOR,
+					"Seq Type[%d]: %d Config_val: %ld",
+					pwr_up,
+					power_info->
+						power_setting[pwr_up].seq_type,
+					power_info->
+						power_setting[pwr_up].
+						config_val);
+			}
+			last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_UP;
+			ptr = (void *) scr;
+			cmm_hdr = (struct common_header *)ptr;
+		} else if (cmm_hdr->cmd_type == CAMERA_SENSOR_CMD_TYPE_WAIT) {
+			struct cam_cmd_unconditional_wait *wait_cmd =
+				(struct cam_cmd_unconditional_wait *)ptr;
+			if (wait_cmd->op_code ==
+				CAMERA_SENSOR_WAIT_OP_SW_UCND) {
+				if (last_cmd_type ==
+					CAMERA_SENSOR_CMD_TYPE_PWR_UP) {
+					if (pwr_up > 0)
+						power_info->
+							power_setting
+							[pwr_up - 1].delay +=
+							wait_cmd->delay;
+					else
+						CAM_ERR(CAM_SENSOR,
+							"Delay is expected only after valid power up setting");
+				} else if (last_cmd_type ==
+					CAMERA_SENSOR_CMD_TYPE_PWR_DOWN) {
+					if (pwr_down > 0)
+						power_info->
+							power_down_setting
+							[pwr_down - 1].delay +=
+							wait_cmd->delay;
+					else
+						CAM_ERR(CAM_SENSOR,
+							"Delay is expected only after valid power up setting");
+				}
+			} else
+				CAM_DBG(CAM_SENSOR, "Invalid op code: %d",
+					wait_cmd->op_code);
+			tot_size = tot_size +
+				sizeof(struct cam_cmd_unconditional_wait);
+			if (tot_size > cmd_length) {
+				CAM_ERR(CAM_SENSOR, "Command Buffer is wrong");
+				return -EINVAL;
+			}
+			scr = (void *) (wait_cmd);
+			ptr = (void *)
+				(scr +
+				sizeof(struct cam_cmd_unconditional_wait));
+			CAM_DBG(CAM_SENSOR, "ptr: %pK sizeof: %d Next: %pK",
+				scr, (int32_t)sizeof(
+				struct cam_cmd_unconditional_wait), ptr);
+
+			cmm_hdr = (struct common_header *)ptr;
+		} else if (cmm_hdr->cmd_type ==
+			CAMERA_SENSOR_CMD_TYPE_PWR_DOWN) {
+			struct cam_cmd_power *pwr_cmd =
+				(struct cam_cmd_power *)ptr;
+
+			scr = ptr + sizeof(struct cam_cmd_power);
+			tot_size = tot_size + sizeof(struct cam_cmd_power);
+			power_info->power_down_setting_size += pwr_cmd->count;
+
+			if (pwr_cmd->count == 0)
+				CAM_ERR(CAM_SENSOR, "Invalid Command");
+
+			for (i = 0; i < pwr_cmd->count; i++, pwr_down++) {
+				power_info->
+					power_down_setting[pwr_down].
+					seq_type =
+					pwr_cmd->power_settings[i].
+					power_seq_type;
+				power_info->
+					power_down_setting[pwr_down].
+					config_val =
+					pwr_cmd->power_settings[i].
+					config_val_low;
+				power_info->
+					power_down_setting[pwr_down].delay = 0;
+				if (i) {
+					scr = scr +
+						sizeof(
+						struct cam_power_settings);
+					tot_size =
+						tot_size +
+						sizeof(
+						struct cam_power_settings);
+				}
+				if (tot_size > cmd_length) {
+					CAM_ERR(CAM_SENSOR,
+						"Command Buffer is wrong");
+					rc = -EINVAL;
+					goto free_power_down_settings;
+				}
+				CAM_DBG(CAM_SENSOR,
+					"Seq Type[%d]: %d Config_val: %ld",
+					pwr_down,
+					power_info->
+						power_down_setting[pwr_down].
+						seq_type,
+					power_info->
+						power_down_setting[pwr_down].
+						config_val);
+			}
+			last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_DOWN;
+			ptr = (void *) scr;
+			cmm_hdr = (struct common_header *)ptr;
+		} else {
+			CAM_ERR(CAM_SENSOR,
+				"Error: Un expected Header Type: %d",
+				cmm_hdr->cmd_type);
+		}
+	}
+
+	return rc;
+free_power_down_settings:
+	kfree(power_info->power_down_setting);
+free_power_settings:
+	kfree(power_info->power_setting);
+	return rc;
+}
+
+int cam_get_dt_power_setting_data(struct device_node *of_node,
+	struct cam_hw_soc_info *soc_info,
+	struct cam_sensor_power_ctrl_t *power_info)
+{
+	int rc = 0, i;
+	int count = 0;
+	const char *seq_name = NULL;
+	uint32_t *array = NULL;
+	struct cam_sensor_power_setting *ps;
+	int c, end;
+
+	if (!power_info)
+		return -EINVAL;
+
+	count = of_property_count_strings(of_node, "qcom,cam-power-seq-type");
+	power_info->power_setting_size = count;
+
+	CAM_DBG(CAM_SENSOR, "qcom,cam-power-seq-type count %d", count);
+
+	if (count <= 0)
+		return 0;
+
+	ps = kcalloc(count, sizeof(*ps), GFP_KERNEL);
+	if (!ps)
+		return -ENOMEM;
+	power_info->power_setting = ps;
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node,
+			"qcom,cam-power-seq-type", i, &seq_name);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "failed");
+			goto ERROR1;
+		}
+		CAM_DBG(CAM_SENSOR, "seq_name[%d] = %s", i, seq_name);
+		if (!strcmp(seq_name, "cam_vio")) {
+			ps[i].seq_type = SENSOR_VIO;
+		} else if (!strcmp(seq_name, "cam_vana")) {
+			ps[i].seq_type = SENSOR_VANA;
+		} else if (!strcmp(seq_name, "cam_clk")) {
+			ps[i].seq_type = SENSOR_MCLK;
+		} else {
+			CAM_ERR(CAM_SENSOR, "unrecognized seq-type %s",
+				seq_name);
+			rc = -EILSEQ;
+			goto ERROR1;
+		}
+		CAM_DBG(CAM_SENSOR, "seq_type[%d] %d", i, ps[i].seq_type);
+	}
+
+	array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
+	if (!array) {
+		rc = -ENOMEM;
+		goto ERROR1;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,cam-power-seq-cfg-val",
+		array, count);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "failed ");
+		goto ERROR2;
+	}
+
+	for (i = 0; i < count; i++) {
+		ps[i].config_val = array[i];
+		CAM_DBG(CAM_SENSOR, "power_setting[%d].config_val = %ld", i,
+			ps[i].config_val);
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,cam-power-seq-delay",
+		array, count);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "failed");
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		ps[i].delay = array[i];
+		CAM_DBG(CAM_SENSOR, "power_setting[%d].delay = %d", i,
+			ps[i].delay);
+	}
+	kfree(array);
+
+	power_info->power_down_setting =
+		kzalloc(sizeof(*ps) * count, GFP_KERNEL);
+
+	if (!power_info->power_down_setting) {
+		CAM_ERR(CAM_SENSOR, "failed");
+		rc = -ENOMEM;
+		goto ERROR1;
+	}
+
+	power_info->power_down_setting_size = count;
+
+	end = count - 1;
+
+	for (c = 0; c < count; c++) {
+		power_info->power_down_setting[c] = ps[end];
+		end--;
+	}
+	return rc;
+ERROR2:
+	kfree(array);
+ERROR1:
+	kfree(ps);
+	return rc;
+}
 
 int cam_sensor_util_init_gpio_pin_tbl(
 	struct cam_hw_soc_info *soc_info,
@@ -574,22 +867,19 @@
 
 	gconf = soc_info->gpio_data;
 	if (!gconf) {
-		pr_err("%s:%d No gpio_common_table is found\n",
-			 __func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "No gpio_common_table is found");
 		return -EINVAL;
 	}
 
 	if (!gconf->cam_gpio_common_tbl) {
-		pr_err("%s:%d gpio_common_table is not initialized\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "gpio_common_table is not initialized");
 		return -EINVAL;
 	}
 
 	gpio_array_size = gconf->cam_gpio_common_tbl_size;
 
 	if (!gpio_array_size) {
-		pr_err("%s:%d invalid size of gpio table\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "invalid size of gpio table");
 		return -EINVAL;
 	}
 
@@ -602,12 +892,10 @@
 	rc = of_property_read_u32(of_node, "gpio-vana", &val);
 	if (rc != -EINVAL) {
 		if (rc < 0) {
-			pr_err("%s:%d read gpio-vana failed rc %d\n",
-				__func__, __LINE__, rc);
+			CAM_ERR(CAM_SENSOR, "read gpio-vana failed rc %d", rc);
 			goto free_gpio_info;
 		} else if (val >= gpio_array_size) {
-			pr_err("%s:%d gpio-vana invalid %d\n",
-				__func__, __LINE__, val);
+			CAM_ERR(CAM_SENSOR, "gpio-vana invalid %d", val);
 			rc = -EINVAL;
 			goto free_gpio_info;
 		}
@@ -615,38 +903,34 @@
 				gconf->cam_gpio_common_tbl[val].gpio;
 		gpio_num_info->valid[SENSOR_VANA] = 1;
 
-		CDBG("%s:%d gpio-vana %d\n", __func__, __LINE__,
+		CAM_DBG(CAM_SENSOR, "%s:%d gpio-vana %d",
 			gpio_num_info->gpio_num[SENSOR_VANA]);
 	}
 
 	rc = of_property_read_u32(of_node, "gpio-vio", &val);
 	if (rc != -EINVAL) {
 		if (rc < 0) {
-			pr_err("%s:%d read gpio-vio failed rc %d\n",
-				__func__, __LINE__, rc);
+			CAM_ERR(CAM_SENSOR, "read gpio-vio failed rc %d", rc);
 			goto free_gpio_info;
 		} else if (val >= gpio_array_size) {
-			pr_err("%s:%d gpio-vio invalid %d\n",
-				__func__, __LINE__, val);
+			CAM_ERR(CAM_SENSOR, "gpio-vio invalid %d", val);
 			goto free_gpio_info;
 		}
 		gpio_num_info->gpio_num[SENSOR_VIO] =
 			gconf->cam_gpio_common_tbl[val].gpio;
 		gpio_num_info->valid[SENSOR_VIO] = 1;
 
-		CDBG("%s:%d gpio-vio %d\n", __func__, __LINE__,
+		CAM_DBG(CAM_SENSOR, "gpio-vio %d",
 			gpio_num_info->gpio_num[SENSOR_VIO]);
 	}
 
 	rc = of_property_read_u32(of_node, "gpio-vaf", &val);
 	if (rc != -EINVAL) {
 		if (rc < 0) {
-			pr_err("%s:%d read gpio-vaf failed rc %d\n",
-				__func__, __LINE__, rc);
+			CAM_ERR(CAM_SENSOR, "read gpio-vaf failed rc %d", rc);
 			goto free_gpio_info;
 		} else if (val >= gpio_array_size) {
-			pr_err("%s:%d gpio-vaf invalid %d\n",
-				__func__, __LINE__, val);
+			CAM_ERR(CAM_SENSOR, "gpio-vaf invalid %d", val);
 			rc = -EINVAL;
 			goto free_gpio_info;
 		}
@@ -654,19 +938,17 @@
 			gconf->cam_gpio_common_tbl[val].gpio;
 		gpio_num_info->valid[SENSOR_VAF] = 1;
 
-		CDBG("%s:%d gpio-vaf %d\n", __func__, __LINE__,
+		CAM_DBG(CAM_SENSOR, "gpio-vaf %d",
 			gpio_num_info->gpio_num[SENSOR_VAF]);
 	}
 
 	rc = of_property_read_u32(of_node, "gpio-vdig", &val);
 	if (rc != -EINVAL) {
 		if (rc < 0) {
-			pr_err("%s:%d read gpio-vdig failed rc %d\n",
-				__func__, __LINE__, rc);
+			CAM_ERR(CAM_SENSOR, "read gpio-vdig failed rc %d", rc);
 			goto free_gpio_info;
 		} else if (val >= gpio_array_size) {
-			pr_err("%s:%d gpio-vdig invalid %d\n",
-				__func__, __LINE__, val);
+			CAM_ERR(CAM_SENSOR, "gpio-vdig invalid %d", val);
 			rc = -EINVAL;
 			goto free_gpio_info;
 		}
@@ -674,19 +956,17 @@
 			gconf->cam_gpio_common_tbl[val].gpio;
 		gpio_num_info->valid[SENSOR_VDIG] = 1;
 
-		CDBG("%s:%d gpio-vdig %d\n", __func__, __LINE__,
-			gpio_num_info->gpio_num[SENSOR_VDIG]);
+		CAM_DBG(CAM_SENSOR, "gpio-vdig %d",
+				gpio_num_info->gpio_num[SENSOR_VDIG]);
 	}
 
 	rc = of_property_read_u32(of_node, "gpio-reset", &val);
 	if (rc != -EINVAL) {
 		if (rc < 0) {
-			pr_err("%s:%d read gpio-reset failed rc %d\n",
-				__func__, __LINE__, rc);
+			CAM_ERR(CAM_SENSOR, "read gpio-reset failed rc %d", rc);
 			goto free_gpio_info;
 		} else if (val >= gpio_array_size) {
-			pr_err("%s:%d gpio-reset invalid %d\n",
-				__func__, __LINE__, val);
+			CAM_ERR(CAM_SENSOR, "gpio-reset invalid %d", val);
 			rc = -EINVAL;
 			goto free_gpio_info;
 		}
@@ -694,19 +974,18 @@
 			gconf->cam_gpio_common_tbl[val].gpio;
 		gpio_num_info->valid[SENSOR_RESET] = 1;
 
-		CDBG("%s:%d gpio-reset %d\n", __func__, __LINE__,
+		CAM_DBG(CAM_SENSOR, "gpio-reset %d",
 			gpio_num_info->gpio_num[SENSOR_RESET]);
 	}
 
 	rc = of_property_read_u32(of_node, "gpio-standby", &val);
 	if (rc != -EINVAL) {
 		if (rc < 0) {
-			pr_err("%s:%d read gpio-standby failed rc %d\n",
-				__func__, __LINE__, rc);
+			CAM_ERR(CAM_SENSOR,
+				"read gpio-standby failed rc %d", rc);
 			goto free_gpio_info;
 		} else if (val >= gpio_array_size) {
-			pr_err("%s:%d gpio-standby invalid %d\n",
-				__func__, __LINE__, val);
+			CAM_ERR(CAM_SENSOR, "gpio-standby invalid %d", val);
 			rc = -EINVAL;
 			goto free_gpio_info;
 		}
@@ -714,19 +993,18 @@
 			gconf->cam_gpio_common_tbl[val].gpio;
 		gpio_num_info->valid[SENSOR_STANDBY] = 1;
 
-		CDBG("%s:%d gpio-standby %d\n", __func__, __LINE__,
+		CAM_DBG(CAM_SENSOR, "gpio-standby %d",
 			gpio_num_info->gpio_num[SENSOR_STANDBY]);
 	}
 
 	rc = of_property_read_u32(of_node, "gpio-af-pwdm", &val);
 	if (rc != -EINVAL) {
 		if (rc < 0) {
-			pr_err("%s:%d read gpio-af-pwdm failed rc %d\n",
-				__func__, __LINE__, rc);
+			CAM_ERR(CAM_SENSOR,
+				"read gpio-af-pwdm failed rc %d", rc);
 			goto free_gpio_info;
 		} else if (val >= gpio_array_size) {
-			pr_err("%s:%d gpio-af-pwdm invalid %d\n",
-				__func__, __LINE__, val);
+			CAM_ERR(CAM_SENSOR, "gpio-af-pwdm invalid %d", val);
 			rc = -EINVAL;
 			goto free_gpio_info;
 		}
@@ -734,19 +1012,18 @@
 			gconf->cam_gpio_common_tbl[val].gpio;
 		gpio_num_info->valid[SENSOR_VAF_PWDM] = 1;
 
-		CDBG("%s:%d gpio-af-pwdm %d\n", __func__, __LINE__,
+		CAM_DBG(CAM_SENSOR, "gpio-af-pwdm %d",
 			gpio_num_info->gpio_num[SENSOR_VAF_PWDM]);
 	}
 
 	rc = of_property_read_u32(of_node, "gpio-custom1", &val);
 	if (rc != -EINVAL) {
 		if (rc < 0) {
-			pr_err("%s:%d read gpio-custom1 failed rc %d\n",
-				__func__, __LINE__, rc);
+			CAM_ERR(CAM_SENSOR,
+				"read gpio-custom1 failed rc %d", rc);
 			goto free_gpio_info;
 		} else if (val >= gpio_array_size) {
-			pr_err("%s:%d gpio-custom1 invalid %d\n",
-				__func__, __LINE__, val);
+			CAM_ERR(CAM_SENSOR, "gpio-custom1 invalid %d", val);
 			rc = -EINVAL;
 			goto free_gpio_info;
 		}
@@ -754,19 +1031,18 @@
 			gconf->cam_gpio_common_tbl[val].gpio;
 		gpio_num_info->valid[SENSOR_CUSTOM_GPIO1] = 1;
 
-		CDBG("%s:%d gpio-custom1 %d\n", __func__, __LINE__,
+		CAM_DBG(CAM_SENSOR, "gpio-custom1 %d",
 			gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1]);
 	}
 
 	rc = of_property_read_u32(of_node, "gpio-custom2", &val);
 	if (rc != -EINVAL) {
 		if (rc < 0) {
-			pr_err("%s:%d read gpio-custom2 failed rc %d\n",
-				__func__, __LINE__, rc);
+			CAM_ERR(CAM_SENSOR,
+				"read gpio-custom2 failed rc %d", rc);
 			goto free_gpio_info;
 		} else if (val >= gpio_array_size) {
-			pr_err("%s:%d gpio-custom2 invalid %d\n",
-				__func__, __LINE__, val);
+			CAM_ERR(CAM_SENSOR, "gpio-custom2 invalid %d", val);
 			rc = -EINVAL;
 			goto free_gpio_info;
 		}
@@ -774,7 +1050,7 @@
 			gconf->cam_gpio_common_tbl[val].gpio;
 		gpio_num_info->valid[SENSOR_CUSTOM_GPIO2] = 1;
 
-		CDBG("%s:%d gpio-custom2 %d\n", __func__, __LINE__,
+		CAM_DBG(CAM_SENSOR, "gpio-custom2 %d",
 			gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2]);
 	} else {
 		rc = 0;
@@ -793,24 +1069,23 @@
 
 	sensor_pctrl->pinctrl = devm_pinctrl_get(dev);
 	if (IS_ERR_OR_NULL(sensor_pctrl->pinctrl)) {
-		pr_err("%s:%d Getting pinctrl handle failed\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "Getting pinctrl handle failed");
 		return -EINVAL;
 	}
 	sensor_pctrl->gpio_state_active =
 		pinctrl_lookup_state(sensor_pctrl->pinctrl,
 				CAM_SENSOR_PINCTRL_STATE_DEFAULT);
 	if (IS_ERR_OR_NULL(sensor_pctrl->gpio_state_active)) {
-		pr_err("%s:%d Failed to get the active state pinctrl handle\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_SENSOR,
+			"Failed to get the active state pinctrl handle");
 		return -EINVAL;
 	}
 	sensor_pctrl->gpio_state_suspend
 		= pinctrl_lookup_state(sensor_pctrl->pinctrl,
 				CAM_SENSOR_PINCTRL_STATE_SLEEP);
 	if (IS_ERR_OR_NULL(sensor_pctrl->gpio_state_suspend)) {
-		pr_err("%s:%d Failed to get the suspend state pinctrl handle\n",
-				__func__, __LINE__);
+		CAM_ERR(CAM_SENSOR,
+			"Failed to get the suspend state pinctrl handle");
 		return -EINVAL;
 	}
 	return 0;
@@ -821,19 +1096,17 @@
 	int gpio_offset = -1;
 
 	if (!gpio_num_info) {
-		pr_err("%s:%d Input Parameters are not proper\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "Input Parameters are not proper");
 		return -EINVAL;
 	}
 
-	CDBG("%s: %d Seq type: %d, config: %d", __func__, __LINE__,
-		seq_type, val);
+	CAM_DBG(CAM_SENSOR, "Seq type: %d, config: %d", seq_type, val);
 
 	gpio_offset = seq_type;
 
 	if (gpio_num_info->valid[gpio_offset] == 1) {
-		CDBG("%s: %d VALID GPIO offset: %d, seqtype: %d\n",
-			__func__, __LINE__, gpio_offset, seq_type);
+		CAM_DBG(CAM_SENSOR, "VALID GPIO offset: %d, seqtype: %d",
+			 gpio_offset, seq_type);
 		gpio_set_value_cansleep(
 			gpio_num_info->gpio_num
 			[gpio_offset], val);
@@ -850,9 +1123,9 @@
 	struct cam_sensor_power_setting *power_setting = NULL;
 	struct msm_camera_gpio_num_info *gpio_num_info = NULL;
 
-	CDBG("%s:%d\n", __func__, __LINE__);
+	CAM_DBG(CAM_SENSOR, "Enter");
 	if (!ctrl) {
-		pr_err("%s:%d Invalid ctrl handle\n", __func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "Invalid ctrl handle");
 		return -EINVAL;
 	}
 
@@ -860,15 +1133,13 @@
 	num_vreg = soc_info->num_rgltr;
 
 	if ((num_vreg == 0) || (num_vreg > CAM_SOC_MAX_REGULATOR)) {
-		pr_err("%s:%d Regulators are not initialized\n",
-			__func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "Regulators are not initialized");
 		return -EINVAL;
 	}
 
 	ret = msm_camera_pinctrl_init(&(ctrl->pinctrl_info), ctrl->dev);
 	if (ret < 0) {
-		pr_err("%s:%d Initialization of pinctrl failed\n",
-				__func__, __LINE__);
+		CAM_ERR(CAM_SENSOR, "Initialization of pinctrl failed");
 		ctrl->cam_pinctrl_status = 0;
 	} else {
 		ctrl->cam_pinctrl_status = 1;
@@ -882,19 +1153,18 @@
 		ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
 			ctrl->pinctrl_info.gpio_state_active);
 		if (ret)
-			pr_err("%s:%d cannot set pin to active state",
-				__func__, __LINE__);
+			CAM_ERR(CAM_SENSOR, "cannot set pin to active state");
 	}
 
 	for (index = 0; index < ctrl->power_setting_size; index++) {
-		CDBG("%s:%d index %d\n", __func__, __LINE__, index);
+		CAM_DBG(CAM_SENSOR, "index: %d",  index);
 		power_setting = &ctrl->power_setting[index];
+		CAM_DBG(CAM_SENSOR, "seq_type %d", power_setting->seq_type);
 
 		switch (power_setting->seq_type) {
 		case SENSOR_MCLK:
 			if (power_setting->seq_val >= soc_info->num_clk) {
-				pr_err("%s:%d :Error: clk index %d >= max %u\n",
-					__func__, __LINE__,
+				CAM_ERR(CAM_SENSOR, "clk index %d >= max %u",
 					power_setting->seq_val,
 					soc_info->num_clk);
 				goto power_up_failed;
@@ -902,8 +1172,8 @@
 			for (j = 0; j < num_vreg; j++) {
 				if (!strcmp(soc_info->rgltr_name[j],
 					"cam_clk")) {
-					CDBG("%s:%d Enable cam_clk: %d\n",
-						__func__, __LINE__, j);
+					CAM_DBG(CAM_SENSOR,
+						"Enable cam_clk: %d", j);
 
 					soc_info->rgltr[j] =
 					regulator_get(
@@ -915,8 +1185,8 @@
 						rc = PTR_ERR(
 							soc_info->rgltr[j]);
 						rc = rc ? rc : -EINVAL;
-						pr_err("%s:%d :vreg %s %d\n",
-							__func__, __LINE__,
+						CAM_ERR(CAM_SENSOR,
+							"vreg %s %d",
 							soc_info->rgltr_name[j],
 							rc);
 						soc_info->rgltr[j] = NULL;
@@ -947,8 +1217,7 @@
 			}
 
 			if (rc < 0) {
-				pr_err("%s:%d clk enable failed\n", __func__,
-					__LINE__);
+				CAM_ERR(CAM_SENSOR, "clk enable failed");
 				goto power_up_failed;
 			}
 			break;
@@ -957,20 +1226,17 @@
 		case SENSOR_CUSTOM_GPIO1:
 		case SENSOR_CUSTOM_GPIO2:
 			if (no_gpio) {
-				pr_err("%s:%d request gpio failed\n", __func__,
-					__LINE__);
+				CAM_ERR(CAM_SENSOR, "request gpio failed");
 				return no_gpio;
 			}
 			if (power_setting->seq_val >= CAM_VREG_MAX ||
 				!gpio_num_info) {
-				pr_err("%s:%d gpio index %d >= max %d\n",
-					__func__, __LINE__,
+				CAM_ERR(CAM_SENSOR, "gpio index %d >= max %d",
 					power_setting->seq_val,
 					CAM_VREG_MAX);
 				goto power_up_failed;
 			}
-			CDBG("%s:%d gpio set val %d\n",
-				__func__, __LINE__,
+			CAM_DBG(CAM_SENSOR, "gpio set val %d",
 				gpio_num_info->gpio_num
 				[power_setting->seq_val]);
 
@@ -978,8 +1244,8 @@
 				power_setting->seq_type,
 				gpio_num_info, 1);
 			if (rc < 0) {
-				pr_err("%s:%d Error in handling VREG GPIO\n",
-					__func__, __LINE__);
+				CAM_ERR(CAM_SENSOR,
+					"Error in handling VREG GPIO");
 				goto power_up_failed;
 			}
 			break;
@@ -994,15 +1260,13 @@
 				break;
 
 			if (power_setting->seq_val >= CAM_VREG_MAX) {
-				pr_err("%s:%d vreg index %d >= max %d\n",
-					__func__, __LINE__,
+				CAM_ERR(CAM_SENSOR, "vreg index %d >= max %d",
 					power_setting->seq_val,
 					CAM_VREG_MAX);
 				goto power_up_failed;
 			}
 			if (power_setting->seq_val < num_vreg) {
-				CDBG("%s:%d Enable Regulator\n",
-					__func__, __LINE__);
+				CAM_DBG(CAM_SENSOR, "Enable Regulator");
 				vreg_idx = power_setting->seq_val;
 
 				soc_info->rgltr[vreg_idx] =
@@ -1013,8 +1277,7 @@
 					rc = PTR_ERR(soc_info->rgltr[vreg_idx]);
 					rc = rc ? rc : -EINVAL;
 
-					pr_err("%s:%d, %s get failed %d\n",
-						__func__, __LINE__,
+					CAM_ERR(CAM_SENSOR, "%s get failed %d",
 						soc_info->rgltr_name[vreg_idx],
 						rc);
 
@@ -1033,22 +1296,21 @@
 						soc_info->rgltr[vreg_idx];
 			}
 			else
-				pr_err("%s: %d usr_idx:%d dts_idx:%d\n",
-					__func__, __LINE__,
+				CAM_ERR(CAM_SENSOR, "usr_idx:%d dts_idx:%d",
 					power_setting->seq_val, num_vreg);
 
 			rc = msm_cam_sensor_handle_reg_gpio(
 				power_setting->seq_type,
 				gpio_num_info, 1);
 			if (rc < 0) {
-				pr_err("%s:%d Error in handling VREG GPIO\n",
-					__func__, __LINE__);
+				CAM_ERR(CAM_SENSOR,
+					"Error in handling VREG GPIO");
 				goto power_up_failed;
 			}
 			break;
 		default:
-			pr_err("%s:%d error power seq type %d\n", __func__,
-				__LINE__, power_setting->seq_type);
+			CAM_ERR(CAM_SENSOR, "error power seq type %d",
+				power_setting->seq_type);
 			break;
 		}
 		if (power_setting->delay > 20)
@@ -1060,11 +1322,11 @@
 
 	return 0;
 power_up_failed:
-	pr_err("%s:%d failed\n", __func__, __LINE__);
+	CAM_ERR(CAM_SENSOR, "failed");
 	for (index--; index >= 0; index--) {
-		CDBG("%s:%d index %d\n", __func__, __LINE__, index);
+		CAM_DBG(CAM_SENSOR, "index %d",  index);
 		power_setting = &ctrl->power_setting[index];
-		CDBG("%s:%d type %d\n", __func__, __LINE__,
+		CAM_DBG(CAM_SENSOR, "type %d",
 			power_setting->seq_type);
 		switch (power_setting->seq_type) {
 		case SENSOR_RESET:
@@ -1088,8 +1350,7 @@
 		case SENSOR_CUSTOM_REG1:
 		case SENSOR_CUSTOM_REG2:
 			if (power_setting->seq_val < num_vreg) {
-				CDBG("%s:%d Disable Regulator\n",
-					__func__, __LINE__);
+				CAM_DBG(CAM_SENSOR, "Disable Regulator");
 				vreg_idx = power_setting->seq_val;
 
 				rc =  cam_soc_util_regulator_disable(
@@ -1105,8 +1366,7 @@
 
 			}
 			else
-				pr_err("%s:%d:seq_val: %d > num_vreg: %d\n",
-					__func__, __LINE__,
+				CAM_ERR(CAM_SENSOR, "seq_val:%d > num_vreg: %d",
 					power_setting->seq_val, num_vreg);
 
 			msm_cam_sensor_handle_reg_gpio(power_setting->seq_type,
@@ -1114,8 +1374,8 @@
 
 			break;
 		default:
-			pr_err("%s:%d error power seq type %d\n", __func__,
-				__LINE__, power_setting->seq_type);
+			CAM_ERR(CAM_SENSOR, "error power seq type %d",
+				power_setting->seq_type);
 			break;
 		}
 		if (power_setting->delay > 20) {
@@ -1129,8 +1389,7 @@
 		ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
 				ctrl->pinctrl_info.gpio_state_suspend);
 		if (ret)
-			pr_err("%s:%d cannot set pin to suspend state\n",
-				__func__, __LINE__);
+			CAM_ERR(CAM_SENSOR, "cannot set pin to suspend state");
 		devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
 	}
 	ctrl->cam_pinctrl_status = 0;
@@ -1186,8 +1445,7 @@
 			}
 
 			if (ps != NULL) {
-				CDBG("%s:%d Disable Regulator\n",
-					__func__, __LINE__);
+				CAM_DBG(CAM_SENSOR, "Disable Regulator");
 
 				rc = cam_soc_util_regulator_disable(
 					soc_info->rgltr[j],
@@ -1214,9 +1472,9 @@
 	struct cam_sensor_power_setting *ps;
 	struct msm_camera_gpio_num_info *gpio_num_info = NULL;
 
-	CDBG("%s:%d\n", __func__, __LINE__);
+	CAM_DBG(CAM_SENSOR, "Enter");
 	if (!ctrl || !soc_info) {
-		pr_err("%s:%d failed ctrl %pK\n", __func__, __LINE__, ctrl);
+		CAM_ERR(CAM_SENSOR, "failed ctrl %pK",  ctrl);
 		return -EINVAL;
 	}
 
@@ -1224,16 +1482,16 @@
 	num_vreg = soc_info->num_rgltr;
 
 	for (index = 0; index < ctrl->power_down_setting_size; index++) {
-		CDBG("%s:%d index %d\n", __func__, __LINE__, index);
+		CAM_DBG(CAM_SENSOR, "index %d",  index);
 		pd = &ctrl->power_down_setting[index];
 		ps = NULL;
-		CDBG("%s:%d type %d\n", __func__, __LINE__, pd->seq_type);
+		CAM_DBG(CAM_SENSOR, "type %d",  pd->seq_type);
 		switch (pd->seq_type) {
 		case SENSOR_MCLK:
 			ret = cam_config_mclk_reg(ctrl, soc_info, index);
 			if (ret < 0) {
-				pr_err("%s:%d :Error: in config clk reg\n",
-					__func__, __LINE__);
+				CAM_ERR(CAM_SENSOR,
+					"config clk reg failed rc: %d", ret);
 				return ret;
 			}
 			//cam_soc_util_clk_disable_default(soc_info);
@@ -1272,8 +1530,8 @@
 				pd->seq_val);
 			if (ps) {
 				if (pd->seq_val < num_vreg) {
-					CDBG("%s:%d Disable Regulator\n",
-						__func__, __LINE__);
+					CAM_DBG(CAM_SENSOR,
+						"Disable Regulator");
 					ret =  cam_soc_util_regulator_disable(
 					soc_info->rgltr[ps->seq_val],
 					soc_info->rgltr_name[ps->seq_val],
@@ -1286,23 +1544,24 @@
 						soc_info->rgltr[ps->seq_val];
 				}
 				else
-					pr_err("%s:%d:seq_val:%d > num_vreg: %d\n",
-						__func__, __LINE__, pd->seq_val,
+					CAM_ERR(CAM_SENSOR,
+						"seq_val:%d > num_vreg: %d",
+						 pd->seq_val,
 						num_vreg);
 			} else
-				pr_err("%s:%d error in power up/down seq\n",
-					__func__, __LINE__);
+				CAM_ERR(CAM_SENSOR,
+					"error in power up/down seq");
 
 			ret = msm_cam_sensor_handle_reg_gpio(pd->seq_type,
 				gpio_num_info, GPIOF_OUT_INIT_LOW);
 
 			if (ret < 0)
-				pr_err("%s:%d Error disabling VREG GPIO\n",
-					__func__, __LINE__);
+				CAM_ERR(CAM_SENSOR,
+					"Error disabling VREG GPIO");
 			break;
 		default:
-			pr_err("%s:%d error power seq type %d\n", __func__,
-				__LINE__, pd->seq_type);
+			CAM_ERR(CAM_SENSOR, "error power seq type %d",
+				pd->seq_type);
 			break;
 		}
 		if (pd->delay > 20)
@@ -1316,8 +1575,7 @@
 		ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
 				ctrl->pinctrl_info.gpio_state_suspend);
 		if (ret)
-			pr_err("%s:%d cannot set pin to suspend state",
-				__func__, __LINE__);
+			CAM_ERR(CAM_SENSOR, "cannot set pin to suspend state");
 		devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
index 912f06b..8a26369 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
@@ -23,11 +23,12 @@
 #include <cam_req_mgr_interface.h>
 #include <cam_mem_mgr.h>
 #include "cam_soc_util.h"
+#include "cam_debug_util.h"
 
 #define INVALID_VREG 100
 
-int msm_camera_get_dt_power_setting_data(struct device_node *of_node,
-	struct camera_vreg_t *cam_vreg, int num_vreg,
+int cam_get_dt_power_setting_data(struct device_node *of_node,
+	struct cam_hw_soc_info *soc_info,
 	struct cam_sensor_power_ctrl_t *power_info);
 
 int msm_camera_pinctrl_init
@@ -52,4 +53,7 @@
 int msm_camera_fill_vreg_params(struct cam_hw_soc_info *soc_info,
 	struct cam_sensor_power_setting *power_setting,
 	uint16_t power_setting_size);
+
+int32_t cam_sensor_update_power_settings(void *cmd_buf,
+	int cmd_length, struct cam_sensor_power_ctrl_t *power_info);
 #endif /* _CAM_SENSOR_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_smmu/Makefile b/drivers/media/platform/msm/camera/cam_smmu/Makefile
index 3619da7..e17dac6 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/Makefile
+++ b/drivers/media/platform/msm/camera/cam_smmu/Makefile
@@ -1 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_smmu_api.o
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index ca0dfac..ff7a0e5 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "CAM-SMMU %s:%d " fmt, __func__, __LINE__
-
 #include <linux/module.h>
 #include <linux/dma-buf.h>
 #include <asm/dma-iommu.h>
@@ -25,6 +23,7 @@
 #include <linux/genalloc.h>
 
 #include "cam_smmu_api.h"
+#include "cam_debug_util.h"
 
 #define SHARED_MEM_POOL_GRANULARITY 12
 
@@ -39,12 +38,6 @@
 #define GET_SMMU_HDL(x, y) (((x) << COOKIE_SIZE) | ((y) & COOKIE_MASK))
 #define GET_SMMU_TABLE_IDX(x) (((x) >> COOKIE_SIZE) & COOKIE_MASK)
 
-#ifdef CONFIG_CAM_SMMU_DBG
-#define CDBG(fmt, args...) pr_err(fmt, ##args)
-#else
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
-#endif
-
 struct firmware_alloc_info {
 	struct device *fw_dev;
 	void *fw_kva;
@@ -223,7 +216,7 @@
 
 	mutex_lock(&iommu_cb_set.payload_list_lock);
 	if (list_empty(&iommu_cb_set.payload_list)) {
-		pr_err("Payload list empty\n");
+		CAM_ERR(CAM_SMMU, "Payload list empty");
 		mutex_unlock(&iommu_cb_set.payload_list_lock);
 		return;
 	}
@@ -256,10 +249,11 @@
 {
 	struct cam_dma_buff_info *mapping;
 
-	pr_err("index = %d\n", idx);
+	CAM_ERR(CAM_SMMU, "index = %d", idx);
 	list_for_each_entry(mapping,
 		&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
-		pr_err("ion_fd = %d, paddr= 0x%pK, len = %u, region = %d\n",
+		CAM_ERR(CAM_SMMU,
+			"ion_fd = %d, paddr= 0x%pK, len = %u, region = %d",
 			 mapping->ion_fd, (void *)mapping->paddr,
 			 (unsigned int)mapping->len,
 			 mapping->region_id);
@@ -271,10 +265,10 @@
 	int i;
 
 	for (i = 0; i < iommu_cb_set.cb_num; i++) {
-		pr_err("i= %d, handle= %d, name_addr=%pK\n", i,
+		CAM_ERR(CAM_SMMU, "i= %d, handle= %d, name_addr=%pK", i,
 			   (int)iommu_cb_set.cb_info[i].handle,
 			   (void *)iommu_cb_set.cb_info[i].name);
-		pr_err("dev = %pK\n", iommu_cb_set.cb_info[i].dev);
+		CAM_ERR(CAM_SMMU, "dev = %pK", iommu_cb_set.cb_info[i].dev);
 	}
 }
 
@@ -290,18 +284,21 @@
 		end_addr = (unsigned long)mapping->paddr + mapping->len;
 
 		if (start_addr <= current_addr && current_addr < end_addr) {
-			pr_err("va %pK valid: range:%pK-%pK, fd = %d cb: %s\n",
+			CAM_ERR(CAM_SMMU,
+				"va %pK valid: range:%pK-%pK, fd = %d cb: %s",
 				vaddr, (void *)start_addr, (void *)end_addr,
 				mapping->ion_fd,
 				iommu_cb_set.cb_info[idx].name);
 			goto end;
 		} else {
-			CDBG("va %pK is not in this range: %pK-%pK, fd = %d\n",
+			CAM_DBG(CAM_SMMU,
+				"va %pK is not in this range: %pK-%pK, fd = %d",
 				vaddr, (void *)start_addr, (void *)end_addr,
 				mapping->ion_fd);
 		}
 	}
-	pr_err("Cannot find vaddr:%pK in SMMU %s uses invalid virt address\n",
+	CAM_ERR(CAM_SMMU,
+		"Cannot find vaddr:%pK in SMMU %s uses invalid virt address",
 		vaddr, iommu_cb_set.cb_info[idx].name);
 end:
 	return;
@@ -315,20 +312,22 @@
 	int idx, i = 0;
 
 	if (!token || (handle == HANDLE_INIT)) {
-		pr_err("Error: token is NULL or invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Error: token is NULL or invalid handle");
 		return;
 	}
 
 	idx = GET_SMMU_TABLE_IDX(handle);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, handle);
 		return;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, handle);
 		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 		return;
@@ -336,7 +335,8 @@
 
 	if (client_page_fault_handler) {
 		if (iommu_cb_set.cb_info[idx].cb_count == CAM_SMMU_CB_MAX) {
-			pr_err("%s Should not regiester more handlers\n",
+			CAM_ERR(CAM_SMMU,
+				"%s Should not regiester more handlers",
 				iommu_cb_set.cb_info[idx].name);
 			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 			return;
@@ -361,7 +361,8 @@
 			}
 		}
 		if (i == CAM_SMMU_CB_MAX)
-			pr_err("Error: hdl %x no matching tokens: %s\n",
+			CAM_ERR(CAM_SMMU,
+				"Error: hdl %x no matching tokens: %s",
 				handle, iommu_cb_set.cb_info[idx].name);
 	}
 	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
@@ -376,9 +377,10 @@
 	struct cam_smmu_work_payload *payload;
 
 	if (!token) {
-		pr_err("Error: token is NULL\n");
-		pr_err("Error: domain = %pK, device = %pK\n", domain, dev);
-		pr_err("iova = %lX, flags = %d\n", iova, flags);
+		CAM_ERR(CAM_SMMU, "Error: token is NULL");
+		CAM_ERR(CAM_SMMU, "Error: domain = %pK, device = %pK",
+			domain, dev);
+		CAM_ERR(CAM_SMMU, "iova = %lX, flags = %d", iova, flags);
 		return 0;
 	}
 
@@ -390,7 +392,8 @@
 	}
 
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: index is not valid, index = %d, token = %s\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: index is not valid, index = %d, token = %s",
 			idx, cb_name);
 		return 0;
 	}
@@ -427,7 +430,7 @@
 		return IOMMU_READ|IOMMU_WRITE;
 	case CAM_SMMU_MAP_INVALID:
 	default:
-		pr_err("Error: Direction is invalid. dir = %d\n", dir);
+		CAM_ERR(CAM_SMMU, "Error: Direction is invalid. dir = %d", dir);
 		break;
 	};
 	return IOMMU_INVALID_DIR;
@@ -445,7 +448,8 @@
 		return DMA_BIDIRECTIONAL;
 	case CAM_SMMU_MAP_INVALID:
 	default:
-		pr_err("Error: Direction is invalid. dir = %d\n", (int)dir);
+		CAM_ERR(CAM_SMMU, "Error: Direction is invalid. dir = %d",
+			(int)dir);
 		break;
 	}
 	return DMA_NONE;
@@ -478,7 +482,8 @@
 	int i;
 
 	if (hdl == HANDLE_INIT) {
-		CDBG("iommu handle is init number. Need to try again\n");
+		CAM_DBG(CAM_SMMU,
+			"iommu handle is init number. Need to try again");
 		return 1;
 	}
 
@@ -487,7 +492,8 @@
 			continue;
 
 		if (iommu_cb_set.cb_info[i].handle == hdl) {
-			CDBG("iommu handle %d conflicts\n", (int)hdl);
+			CAM_DBG(CAM_SMMU, "iommu handle %d conflicts",
+				(int)hdl);
 			return 1;
 		}
 	}
@@ -503,7 +509,7 @@
 
 	get_random_bytes(&rand, COOKIE_NUM_BYTE);
 	hdl = GET_SMMU_HDL(idx, rand);
-	CDBG("create handle value = %x\n", (int)hdl);
+	CAM_DBG(CAM_SMMU, "create handle value = %x", (int)hdl);
 	return hdl;
 }
 
@@ -515,7 +521,8 @@
 	/* attach the mapping to device */
 	rc = arm_iommu_attach_device(cb->dev, cb->mapping);
 	if (rc < 0) {
-		pr_err("Error: ARM IOMMU attach failed. ret = %d\n", rc);
+		CAM_ERR(CAM_SMMU, "Error: ARM IOMMU attach failed. ret = %d",
+			rc);
 		rc = -ENODEV;
 	}
 
@@ -533,7 +540,8 @@
 		if (!strcmp(iommu_cb_set.cb_info[i].name, name)) {
 			mutex_lock(&iommu_cb_set.cb_info[i].lock);
 			if (iommu_cb_set.cb_info[i].handle != HANDLE_INIT) {
-				pr_err("Error: %s already got handle 0x%x\n",
+				CAM_ERR(CAM_SMMU,
+					"Error: %s already got handle 0x%x",
 					name,
 					iommu_cb_set.cb_info[i].handle);
 				mutex_unlock(&iommu_cb_set.cb_info[i].lock);
@@ -549,14 +557,15 @@
 			iommu_cb_set.cb_info[i].handle = handle;
 			iommu_cb_set.cb_info[i].cb_count = 0;
 			*hdl = handle;
-			CDBG("%s creates handle 0x%x\n", name, handle);
+			CAM_DBG(CAM_SMMU, "%s creates handle 0x%x",
+				name, handle);
 			mutex_unlock(&iommu_cb_set.cb_info[i].lock);
 			return 0;
 		}
 	}
 
-	pr_err("Error: Cannot find name %s or all handle exist!\n",
-			name);
+	CAM_ERR(CAM_SMMU, "Error: Cannot find name %s or all handle exist",
+		name);
 	cam_smmu_print_table();
 	return -EINVAL;
 }
@@ -571,7 +580,8 @@
 
 	if (!count) {
 		err = -EINVAL;
-		pr_err("Page count is zero, size passed = %zu\n", size);
+		CAM_ERR(CAM_SMMU, "Page count is zero, size passed = %zu",
+			size);
 		goto bail;
 	}
 
@@ -630,12 +640,12 @@
 			      (1 << mapping->order) - 1) >> mapping->order;
 
 	if (!addr) {
-		pr_err("Error: Invalid address\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid address");
 		return -EINVAL;
 	}
 
 	if (start + count > mapping->bits) {
-		pr_err("Error: Invalid page bits in scratch map\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid page bits in scratch map");
 		return -EINVAL;
 	}
 
@@ -657,13 +667,13 @@
 	list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
 			list) {
 		if (mapping->paddr == virt_addr) {
-			CDBG("Found virtual address %lx\n",
+			CAM_DBG(CAM_SMMU, "Found virtual address %lx",
 				 (unsigned long)virt_addr);
 			return mapping;
 		}
 	}
 
-	pr_err("Error: Cannot find virtual address %lx by index %d\n",
+	CAM_ERR(CAM_SMMU, "Error: Cannot find virtual address %lx by index %d",
 		(unsigned long)virt_addr, idx);
 	return NULL;
 }
@@ -676,12 +686,12 @@
 	list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
 			list) {
 		if (mapping->ion_fd == ion_fd) {
-			CDBG(" find ion_fd %d\n", ion_fd);
+			CAM_DBG(CAM_SMMU, "find ion_fd %d", ion_fd);
 			return mapping;
 		}
 	}
 
-	pr_err("Error: Cannot find fd %d by index %d\n",
+	CAM_ERR(CAM_SMMU, "Error: Cannot find fd %d by index %d",
 		ion_fd, idx);
 	return NULL;
 }
@@ -693,7 +703,7 @@
 
 	list_for_each_entry_safe(mapping_info, temp,
 			&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
-		CDBG("Free mapping address %pK, i = %d, fd = %d\n",
+		CAM_DBG(CAM_SMMU, "Free mapping address %pK, i = %d, fd = %d",
 			(void *)mapping_info->paddr, idx,
 			mapping_info->ion_fd);
 
@@ -708,10 +718,12 @@
 					idx);
 
 		if (ret < 0) {
-			pr_err("Buffer delete failed: idx = %d\n", idx);
-			pr_err("Buffer delete failed: addr = %lx, fd = %d\n",
-					(unsigned long)mapping_info->paddr,
-					mapping_info->ion_fd);
+			CAM_ERR(CAM_SMMU, "Buffer delete failed: idx = %d",
+				idx);
+			CAM_ERR(CAM_SMMU,
+				"Buffer delete failed: addr = %lx, fd = %d",
+				(unsigned long)mapping_info->paddr,
+				mapping_info->ion_fd);
 			/*
 			 * Ignore this error and continue to delete other
 			 * buffers in the list
@@ -730,13 +742,13 @@
 	} else if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_DETACH) {
 		ret = cam_smmu_attach_device(idx);
 		if (ret < 0) {
-			pr_err("Error: ATTACH fail\n");
+			CAM_ERR(CAM_SMMU, "Error: ATTACH fail");
 			return -ENODEV;
 		}
 		iommu_cb_set.cb_info[idx].state = CAM_SMMU_ATTACH;
 		ret = 0;
 	} else {
-		pr_err("Error: Not detach/attach: %d\n",
+		CAM_ERR(CAM_SMMU, "Error: Not detach/attach: %d",
 			iommu_cb_set.cb_info[idx].state);
 		ret = -EINVAL;
 	}
@@ -768,28 +780,32 @@
 	uint32_t vaddr = 0;
 
 	if (!iova || !size || (smmu_hdl == HANDLE_INIT)) {
-		pr_err("Error: Input args are invalid\n");
+		CAM_ERR(CAM_SMMU, "Error: Input args are invalid");
 		return -EINVAL;
 	}
 
-	CDBG("Allocating iova size = %zu for smmu hdl=%X\n", size, smmu_hdl);
+	CAM_DBG(CAM_SMMU, "Allocating iova size = %zu for smmu hdl=%X",
+		size, smmu_hdl);
 
 	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, smmu_hdl);
 		return -EINVAL;
 	}
 
 	if (iommu_cb_set.cb_info[idx].handle != smmu_hdl) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, smmu_hdl);
 		rc = -EINVAL;
 		goto get_addr_end;
 	}
 
 	if (!iommu_cb_set.cb_info[idx].shared_support) {
-		pr_err("Error: Shared memory not supported for hdl = %X\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: Shared memory not supported for hdl = %X",
 			smmu_hdl);
 		rc = -EINVAL;
 		goto get_addr_end;
@@ -812,19 +828,21 @@
 	int idx;
 
 	if (!size || (smmu_hdl == HANDLE_INIT)) {
-		pr_err("Error: Input args are invalid\n");
+		CAM_ERR(CAM_SMMU, "Error: Input args are invalid");
 		return -EINVAL;
 	}
 
 	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, smmu_hdl);
 		return -EINVAL;
 	}
 
 	if (iommu_cb_set.cb_info[idx].handle != smmu_hdl) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, smmu_hdl);
 		rc = -EINVAL;
 		goto get_addr_end;
@@ -848,45 +866,47 @@
 	struct iommu_domain *domain;
 
 	if (!iova || !len || !cpuva || (smmu_hdl == HANDLE_INIT)) {
-		pr_err("Error: Input args are invalid\n");
+		CAM_ERR(CAM_SMMU, "Error: Input args are invalid");
 		return -EINVAL;
 	}
 
 	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, smmu_hdl);
 		rc = -EINVAL;
 		goto end;
 	}
 
 	if (!iommu_cb_set.cb_info[idx].firmware_support) {
-		pr_err("Firmware memory not supported for this SMMU handle\n");
+		CAM_ERR(CAM_SMMU,
+			"Firmware memory not supported for this SMMU handle");
 		rc = -EINVAL;
 		goto end;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].is_fw_allocated) {
-		pr_err("Trying to allocate twice\n");
+		CAM_ERR(CAM_SMMU, "Trying to allocate twice");
 		rc = -ENOMEM;
 		goto unlock_and_end;
 	}
 
 	firmware_len = iommu_cb_set.cb_info[idx].firmware_info.iova_len;
 	firmware_start = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
-	CDBG("Firmware area len from DT = %zu\n", firmware_len);
+	CAM_DBG(CAM_SMMU, "Firmware area len from DT = %zu", firmware_len);
 
 	icp_fw.fw_kva = dma_alloc_coherent(icp_fw.fw_dev,
 		firmware_len,
 		&icp_fw.fw_dma_hdl,
 		GFP_KERNEL);
 	if (!icp_fw.fw_kva) {
-		pr_err("FW memory alloc failed\n");
+		CAM_ERR(CAM_SMMU, "FW memory alloc failed");
 		rc = -ENOMEM;
 		goto unlock_and_end;
 	} else {
-		CDBG("DMA alloc returned fw = %pK, hdl = %pK\n",
+		CAM_DBG(CAM_SMMU, "DMA alloc returned fw = %pK, hdl = %pK",
 			icp_fw.fw_kva, (void *)icp_fw.fw_dma_hdl);
 	}
 
@@ -898,7 +918,7 @@
 		IOMMU_READ|IOMMU_WRITE|IOMMU_PRIV);
 
 	if (rc) {
-		pr_err("Failed to map FW into IOMMU\n");
+		CAM_ERR(CAM_SMMU, "Failed to map FW into IOMMU");
 		rc = -ENOMEM;
 		goto alloc_fail;
 	}
@@ -933,27 +953,30 @@
 	size_t unmapped = 0;
 
 	if (smmu_hdl == HANDLE_INIT) {
-		pr_err("Error: Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
 		return -EINVAL;
 	}
 
 	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, smmu_hdl);
 		rc = -EINVAL;
 		goto end;
 	}
 
 	if (!iommu_cb_set.cb_info[idx].firmware_support) {
-		pr_err("Firmware memory not supported for this SMMU handle\n");
+		CAM_ERR(CAM_SMMU,
+			"Firmware memory not supported for this SMMU handle");
 		rc = -EINVAL;
 		goto end;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (!iommu_cb_set.cb_info[idx].is_fw_allocated) {
-		pr_err("Trying to deallocate firmware that is not allocated\n");
+		CAM_ERR(CAM_SMMU,
+			"Trying to deallocate firmware that is not allocated");
 		rc = -ENOMEM;
 		goto unlock_and_end;
 	}
@@ -966,7 +989,7 @@
 		firmware_len);
 
 	if (unmapped != firmware_len) {
-		pr_err("Only %zu unmapped out of total %zu\n",
+		CAM_ERR(CAM_SMMU, "Only %zu unmapped out of total %zu",
 			unmapped,
 			firmware_len);
 		rc = -EINVAL;
@@ -997,18 +1020,18 @@
 	struct cam_context_bank_info *cb = NULL;
 
 	if (!region_info) {
-		pr_err("Invalid region_info pointer\n");
+		CAM_ERR(CAM_SMMU, "Invalid region_info pointer");
 		return -EINVAL;
 	}
 
 	if (smmu_hdl == HANDLE_INIT) {
-		pr_err("Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Invalid handle");
 		return -EINVAL;
 	}
 
 	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU, "Handle or index invalid. idx = %d hdl = %x",
 			idx, smmu_hdl);
 		return -EINVAL;
 	}
@@ -1016,7 +1039,7 @@
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	cb = &iommu_cb_set.cb_info[idx];
 	if (!cb) {
-		pr_err("SMMU context bank pointer invalid\n");
+		CAM_ERR(CAM_SMMU, "SMMU context bank pointer invalid");
 		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 		return -EINVAL;
 	}
@@ -1024,7 +1047,7 @@
 	switch (region_id) {
 	case CAM_SMMU_REGION_FIRMWARE:
 		if (!cb->firmware_support) {
-			pr_err("Firmware not supported\n");
+			CAM_ERR(CAM_SMMU, "Firmware not supported");
 			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 			return -ENODEV;
 		}
@@ -1033,7 +1056,7 @@
 		break;
 	case CAM_SMMU_REGION_SHARED:
 		if (!cb->shared_support) {
-			pr_err("Shared mem not supported\n");
+			CAM_ERR(CAM_SMMU, "Shared mem not supported");
 			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 			return -ENODEV;
 		}
@@ -1042,7 +1065,7 @@
 		break;
 	case CAM_SMMU_REGION_SCRATCH:
 		if (!cb->scratch_buf_support) {
-			pr_err("Scratch memory not supported\n");
+			CAM_ERR(CAM_SMMU, "Scratch memory not supported");
 			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 			return -ENODEV;
 		}
@@ -1051,7 +1074,7 @@
 		break;
 	case CAM_SMMU_REGION_IO:
 		if (!cb->io_support) {
-			pr_err("IO memory not supported\n");
+			CAM_ERR(CAM_SMMU, "IO memory not supported");
 			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 			return -ENODEV;
 		}
@@ -1059,7 +1082,7 @@
 		region_info->iova_len = cb->io_info.iova_len;
 		break;
 	default:
-		pr_err("Invalid region id: %d for smmu hdl: %X\n",
+		CAM_ERR(CAM_SMMU, "Invalid region id: %d for smmu hdl: %X",
 			smmu_hdl, region_id);
 		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 		return -EINVAL;
@@ -1088,28 +1111,28 @@
 	buf = dma_buf_get(ion_fd);
 	if (IS_ERR_OR_NULL(buf)) {
 		rc = PTR_ERR(buf);
-		pr_err("Error: dma get buf failed. fd = %d\n", ion_fd);
+		CAM_ERR(CAM_SMMU, "Error: dma get buf failed. fd = %d", ion_fd);
 		goto err_out;
 	}
 
 	attach = dma_buf_attach(buf, iommu_cb_set.cb_info[idx].dev);
 	if (IS_ERR_OR_NULL(attach)) {
 		rc = PTR_ERR(attach);
-		pr_err("Error: dma buf attach failed\n");
+		CAM_ERR(CAM_SMMU, "Error: dma buf attach failed");
 		goto err_put;
 	}
 
 	table = dma_buf_map_attachment(attach, dma_dir);
 	if (IS_ERR_OR_NULL(table)) {
 		rc = PTR_ERR(table);
-		pr_err("Error: dma buf map attachment failed\n");
+		CAM_ERR(CAM_SMMU, "Error: dma buf map attachment failed");
 		goto err_detach;
 	}
 
 	if (region_id == CAM_SMMU_REGION_SHARED) {
 		domain = iommu_cb_set.cb_info[idx].mapping->domain;
 		if (!domain) {
-			pr_err("CB has no domain set\n");
+			CAM_ERR(CAM_SMMU, "CB has no domain set");
 			goto err_unmap_sg;
 		}
 
@@ -1118,7 +1141,8 @@
 			&iova);
 
 		if (rc < 0) {
-			pr_err("IOVA alloc failed for shared memory\n");
+			CAM_ERR(CAM_SMMU,
+				"IOVA alloc failed for shared memory");
 			goto err_unmap_sg;
 		}
 
@@ -1129,17 +1153,17 @@
 			IOMMU_READ | IOMMU_WRITE);
 
 		if (size < 0) {
-			pr_err("IOMMU mapping failed\n");
+			CAM_ERR(CAM_SMMU, "IOMMU mapping failed");
 			rc = cam_smmu_free_iova(iova,
 				size,
 				iommu_cb_set.cb_info[idx].handle);
 
 			if (rc)
-				pr_err("IOVA free failed\n");
+				CAM_ERR(CAM_SMMU, "IOVA free failed");
 			rc = -ENOMEM;
 			goto err_unmap_sg;
 		} else {
-			CDBG("iommu_map_sg returned %zu\n", size);
+			CAM_DBG(CAM_SMMU, "iommu_map_sg returned %zu", size);
 			*paddr_ptr = iova;
 			*len_ptr = size;
 		}
@@ -1148,7 +1172,7 @@
 			table->sgl, table->nents, dma_dir, buf);
 
 		if (rc != table->nents) {
-			pr_err("Error: msm_dma_map_sg_lazy failed\n");
+			CAM_ERR(CAM_SMMU, "Error: msm_dma_map_sg_lazy failed");
 			rc = -ENOMEM;
 			goto err_unmap_sg;
 		} else {
@@ -1156,22 +1180,23 @@
 			*len_ptr = (size_t)sg_dma_len(table->sgl);
 		}
 	} else {
-		pr_err("Error: Wrong region id passed for %s\n", __func__);
+		CAM_ERR(CAM_SMMU, "Error: Wrong region id passed");
 		rc = -EINVAL;
 		goto err_unmap_sg;
 	}
 
 	if (table->sgl) {
-		CDBG("DMA buf: %pK, device: %pK, attach: %pK, table: %pK\n",
-				(void *)buf,
-				(void *)iommu_cb_set.cb_info[idx].dev,
-				(void *)attach, (void *)table);
-		CDBG("table sgl: %pK, rc: %d, dma_address: 0x%x\n",
-				(void *)table->sgl, rc,
-				(unsigned int)table->sgl->dma_address);
+		CAM_DBG(CAM_SMMU,
+			"DMA buf: %pK, device: %pK, attach: %pK, table: %pK",
+			(void *)buf,
+			(void *)iommu_cb_set.cb_info[idx].dev,
+			(void *)attach, (void *)table);
+		CAM_DBG(CAM_SMMU, "table sgl: %pK, rc: %d, dma_address: 0x%x",
+			(void *)table->sgl, rc,
+			(unsigned int)table->sgl->dma_address);
 	} else {
 		rc = -EINVAL;
-		pr_err("Error: table sgl is null\n");
+		CAM_ERR(CAM_SMMU, "Error: table sgl is null");
 		goto err_unmap_sg;
 	}
 
@@ -1192,13 +1217,13 @@
 	mapping_info->region_id = region_id;
 
 	if (!*paddr_ptr || !*len_ptr) {
-		pr_err("Error: Space Allocation failed!\n");
+		CAM_ERR(CAM_SMMU, "Error: Space Allocation failed");
 		kfree(mapping_info);
 		rc = -ENOSPC;
 		goto err_alloc;
 	}
-	CDBG("ion_fd = %d, dev = %pK, paddr= %pK, len = %u\n", ion_fd,
-		(void *)iommu_cb_set.cb_info[idx].dev,
+	CAM_DBG(CAM_SMMU, "ion_fd = %d, dev = %pK, paddr= %pK, len = %u",
+		ion_fd, (void *)iommu_cb_set.cb_info[idx].dev,
 		(void *)*paddr_ptr, (unsigned int)*len_ptr);
 
 	/* add to the list */
@@ -1241,17 +1266,19 @@
 
 	if ((!mapping_info->buf) || (!mapping_info->table) ||
 		(!mapping_info->attach)) {
-		pr_err("Error: Invalid params dev = %pK, table = %pK\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: Invalid params dev = %pK, table = %pK",
 			(void *)iommu_cb_set.cb_info[idx].dev,
 			(void *)mapping_info->table);
-		pr_err("Error:dma_buf = %pK, attach = %pK\n",
+		CAM_ERR(CAM_SMMU, "Error:dma_buf = %pK, attach = %pK",
 			(void *)mapping_info->buf,
 			(void *)mapping_info->attach);
 		return -EINVAL;
 	}
 
 	if (mapping_info->region_id == CAM_SMMU_REGION_SHARED) {
-		CDBG("Removing SHARED buffer paddr = %pK, len = %zu\n",
+		CAM_DBG(CAM_SMMU,
+			"Removing SHARED buffer paddr = %pK, len = %zu",
 			(void *)mapping_info->paddr, mapping_info->len);
 
 		domain = iommu_cb_set.cb_info[idx].mapping->domain;
@@ -1261,8 +1288,8 @@
 			mapping_info->len);
 
 		if (size != mapping_info->len) {
-			pr_err("IOMMU unmap failed\n");
-			pr_err("Unmapped = %zu, requested = %zu\n",
+			CAM_ERR(CAM_SMMU, "IOMMU unmap failed");
+			CAM_ERR(CAM_SMMU, "Unmapped = %zu, requested = %zu",
 				size,
 				mapping_info->len);
 		}
@@ -1272,7 +1299,7 @@
 			iommu_cb_set.cb_info[idx].handle);
 
 		if (rc)
-			pr_err("IOVA free failed\n");
+			CAM_ERR(CAM_SMMU, "IOVA free failed");
 
 	} else if (mapping_info->region_id == CAM_SMMU_REGION_IO) {
 		msm_dma_unmap_sg(iommu_cb_set.cb_info[idx].dev,
@@ -1317,19 +1344,19 @@
 	int ret = 0;
 
 	if (!identifier) {
-		pr_err("Error: iommu hardware name is NULL\n");
+		CAM_ERR(CAM_SMMU, "Error: iommu hardware name is NULL");
 		return -EINVAL;
 	}
 
 	if (!handle_ptr) {
-		pr_err("Error: handle pointer is NULL\n");
+		CAM_ERR(CAM_SMMU, "Error: handle pointer is NULL");
 		return -EINVAL;
 	}
 
 	/* create and put handle in the table */
 	ret = cam_smmu_create_add_handle_in_table(identifier, handle_ptr);
 	if (ret < 0)
-		pr_err("Error: %s get handle fail\n", identifier);
+		CAM_ERR(CAM_SMMU, "Error: %s get handle fail", identifier);
 
 	return ret;
 }
@@ -1340,20 +1367,21 @@
 	int ret = 0, idx;
 
 	if (handle == HANDLE_INIT) {
-		pr_err("Error: Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
 		return -EINVAL;
 	}
 
 	idx = GET_SMMU_TABLE_IDX(handle);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: Index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU, "Error: Index invalid. idx = %d hdl = %x",
 			idx, handle);
 		return -EINVAL;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, handle);
 		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 		return -EINVAL;
@@ -1371,7 +1399,7 @@
 	case CAM_SMMU_VOTE:
 	case CAM_SMMU_DEVOTE:
 	default:
-		pr_err("Error: idx = %d, ops = %d\n", idx, ops);
+		CAM_ERR(CAM_SMMU, "Error: idx = %d, ops = %d", idx, ops);
 		ret = -EINVAL;
 	}
 	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
@@ -1396,10 +1424,10 @@
 	struct page *page;
 	struct sg_table *table = NULL;
 
-	CDBG("%s: nents = %lu, idx = %d, virt_len  = %zx\n",
-		__func__, nents, idx, virt_len);
-	CDBG("%s: phys_len = %zx, iommu_dir = %d, virt_addr = %pK\n",
-		__func__, phys_len, iommu_dir, virt_addr);
+	CAM_DBG(CAM_SMMU, "nents = %lu, idx = %d, virt_len  = %zx",
+		nents, idx, virt_len);
+	CAM_DBG(CAM_SMMU, "phys_len = %zx, iommu_dir = %d, virt_addr = %pK",
+		phys_len, iommu_dir, virt_addr);
 
 	/*
 	 * This table will go inside the 'mapping' structure
@@ -1435,7 +1463,8 @@
 		virt_len, &iova);
 
 	if (rc < 0) {
-		pr_err("Could not find valid iova for scratch buffer");
+		CAM_ERR(CAM_SMMU,
+			"Could not find valid iova for scratch buffer");
 		goto err_iommu_map;
 	}
 
@@ -1444,7 +1473,7 @@
 		table->sgl,
 		table->nents,
 		iommu_dir) != virt_len) {
-		pr_err("iommu_map_sg() failed");
+		CAM_ERR(CAM_SMMU, "iommu_map_sg() failed");
 		goto err_iommu_map;
 	}
 
@@ -1466,22 +1495,23 @@
 	mapping_info->phys_len = phys_len;
 	mapping_info->region_id = CAM_SMMU_REGION_SCRATCH;
 
-	CDBG("%s: paddr = %pK, len = %zx, phys_len = %zx",
-		__func__, (void *)mapping_info->paddr,
+	CAM_DBG(CAM_SMMU, "paddr = %pK, len = %zx, phys_len = %zx",
+		(void *)mapping_info->paddr,
 		mapping_info->len, mapping_info->phys_len);
 
 	list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
 
 	*virt_addr = (dma_addr_t)iova;
 
-	CDBG("%s: mapped virtual address = %lx\n", __func__,
+	CAM_DBG(CAM_SMMU, "mapped virtual address = %lx",
 		(unsigned long)*virt_addr);
 	return 0;
 
 err_mapping_info:
 	unmapped = iommu_unmap(domain, iova,  virt_len);
 	if (unmapped != virt_len)
-		pr_err("Unmapped only %zx instead of %zx", unmapped, virt_len);
+		CAM_ERR(CAM_SMMU, "Unmapped only %zx instead of %zx",
+			unmapped, virt_len);
 err_iommu_map:
 	__free_pages(page, get_order(phys_len));
 err_page_alloc:
@@ -1504,7 +1534,8 @@
 		&iommu_cb_set.cb_info[idx].scratch_map;
 
 	if (!mapping_info->table) {
-		pr_err("Error: Invalid params: dev = %pK, table = %pK",
+		CAM_ERR(CAM_SMMU,
+			"Error: Invalid params: dev = %pK, table = %pK",
 			(void *)iommu_cb_set.cb_info[idx].dev,
 			(void *)mapping_info->table);
 		return -EINVAL;
@@ -1513,14 +1544,15 @@
 	/* Clean up the mapping_info struct from the list */
 	unmapped = iommu_unmap(domain, mapping_info->paddr, mapping_info->len);
 	if (unmapped != mapping_info->len)
-		pr_err("Unmapped only %zx instead of %zx",
+		CAM_ERR(CAM_SMMU, "Unmapped only %zx instead of %zx",
 			unmapped, mapping_info->len);
 
 	rc = cam_smmu_free_scratch_va(scratch_map,
 		mapping_info->paddr,
 		mapping_info->len);
 	if (rc < 0) {
-		pr_err("Error: Invalid iova while freeing scratch buffer\n");
+		CAM_ERR(CAM_SMMU,
+			"Error: Invalid iova while freeing scratch buffer");
 		rc = -EINVAL;
 	}
 
@@ -1546,67 +1578,74 @@
 	unsigned int iommu_dir;
 
 	if (!paddr_ptr || !virt_len || !phys_len) {
-		pr_err("Error: Input pointer or lengths invalid\n");
+		CAM_ERR(CAM_SMMU, "Error: Input pointer or lengths invalid");
 		return -EINVAL;
 	}
 
 	if (virt_len < phys_len) {
-		pr_err("Error: virt_len > phys_len\n");
+		CAM_ERR(CAM_SMMU, "Error: virt_len > phys_len");
 		return -EINVAL;
 	}
 
 	if (handle == HANDLE_INIT) {
-		pr_err("Error: Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
 		return -EINVAL;
 	}
 
 	iommu_dir = cam_smmu_translate_dir_to_iommu_dir(dir);
 	if (iommu_dir == IOMMU_INVALID_DIR) {
-		pr_err("Error: translate direction failed. dir = %d\n", dir);
+		CAM_ERR(CAM_SMMU,
+			"Error: translate direction failed. dir = %d", dir);
 		return -EINVAL;
 	}
 
 	idx = GET_SMMU_TABLE_IDX(handle);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, handle);
 		return -EINVAL;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, handle);
 		rc = -EINVAL;
 		goto error;
 	}
 
 	if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
-		pr_err("Error: Context bank does not support scratch bufs\n");
+		CAM_ERR(CAM_SMMU,
+			"Error: Context bank does not support scratch bufs");
 		rc = -EINVAL;
 		goto error;
 	}
 
-	CDBG("%s: smmu handle = %x, idx = %d, dir = %d\n",
-		__func__, handle, idx, dir);
-	CDBG("%s: virt_len = %zx, phys_len  = %zx\n",
-		__func__, phys_len, virt_len);
+	CAM_DBG(CAM_SMMU, "smmu handle = %x, idx = %d, dir = %d",
+		handle, idx, dir);
+	CAM_DBG(CAM_SMMU, "virt_len = %zx, phys_len  = %zx",
+		phys_len, virt_len);
 
 	if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
-		pr_err("Err:Dev %s should call SMMU attach before map buffer\n",
-				iommu_cb_set.cb_info[idx].name);
+		CAM_ERR(CAM_SMMU,
+			"Err:Dev %s should call SMMU attach before map buffer",
+			iommu_cb_set.cb_info[idx].name);
 		rc = -EINVAL;
 		goto error;
 	}
 
 	if (!IS_ALIGNED(virt_len, PAGE_SIZE)) {
-		pr_err("Requested scratch buffer length not page aligned\n");
+		CAM_ERR(CAM_SMMU,
+			"Requested scratch buffer length not page aligned");
 		rc = -EINVAL;
 		goto error;
 	}
 
 	if (!IS_ALIGNED(virt_len, phys_len)) {
-		pr_err("Requested virt length not aligned with phys length\n");
+		CAM_ERR(CAM_SMMU,
+			"Requested virt length not aligned with phys length");
 		rc = -EINVAL;
 		goto error;
 	}
@@ -1617,7 +1656,7 @@
 		iommu_dir,
 		paddr_ptr);
 	if (rc < 0)
-		pr_err("Error: mapping or add list fail\n");
+		CAM_ERR(CAM_SMMU, "Error: mapping or add list fail");
 
 error:
 	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
@@ -1632,28 +1671,31 @@
 	struct cam_dma_buff_info *mapping_info;
 
 	if (handle == HANDLE_INIT) {
-		pr_err("Error: Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
 		return -EINVAL;
 	}
 
 	/* find index in the iommu_cb_set.cb_info */
 	idx = GET_SMMU_TABLE_IDX(handle);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, handle);
 		return -EINVAL;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, handle);
 		rc = -EINVAL;
 		goto handle_err;
 	}
 
 	if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
-		pr_err("Error: Context bank does not support scratch buffers\n");
+		CAM_ERR(CAM_SMMU,
+			"Error: Context bank does not support scratch buffers");
 		rc = -EINVAL;
 		goto handle_err;
 	}
@@ -1663,7 +1705,7 @@
 	 */
 	mapping_info = cam_smmu_find_mapping_by_virt_address(idx, paddr);
 	if (!mapping_info) {
-		pr_err("Error: Invalid params\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid params");
 		rc = -ENODEV;
 		goto handle_err;
 	}
@@ -1671,7 +1713,7 @@
 	/* unmapping one buffer from device */
 	rc = cam_smmu_free_scratch_buffer_remove_from_list(mapping_info, idx);
 	if (rc < 0) {
-		pr_err("Error: unmap or remove list fail\n");
+		CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail");
 		goto handle_err;
 	}
 
@@ -1698,12 +1740,12 @@
 	enum cam_smmu_buf_state buf_state;
 
 	if (!paddr_ptr || !len_ptr) {
-		pr_err("Input pointers are invalid\n");
+		CAM_ERR(CAM_SMMU, "Input pointers are invalid");
 		return -EINVAL;
 	}
 
 	if (handle == HANDLE_INIT) {
-		pr_err("Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Invalid handle");
 		return -EINVAL;
 	}
 
@@ -1714,27 +1756,28 @@
 
 	dma_dir = cam_smmu_translate_dir(dir);
 	if (dma_dir == DMA_NONE) {
-		pr_err("translate direction failed. dir = %d\n", dir);
+		CAM_ERR(CAM_SMMU, "translate direction failed. dir = %d", dir);
 		return -EINVAL;
 	}
 
 	idx = GET_SMMU_TABLE_IDX(handle);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU, "handle or index invalid. idx = %d hdl = %x",
 			idx, handle);
 		return -EINVAL;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
-		pr_err("hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU, "hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, handle);
 		rc = -EINVAL;
 		goto get_addr_end;
 	}
 
 	if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
-		pr_err("Err:Dev %s should call SMMU attach before map buffer\n",
+		CAM_ERR(CAM_SMMU,
+			"Err:Dev %s should call SMMU attach before map buffer",
 				iommu_cb_set.cb_info[idx].name);
 		rc = -EINVAL;
 		goto get_addr_end;
@@ -1743,15 +1786,16 @@
 	buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr,
 		len_ptr);
 	if (buf_state == CAM_SMMU_BUFF_EXIST) {
-		CDBG("ion_fd:%d already in the list, give same addr back",
-				 ion_fd);
+		CAM_ERR(CAM_SMMU,
+			"ion_fd:%d already in the list, give same addr back",
+			 ion_fd);
 		rc = -EALREADY;
 		goto get_addr_end;
 	}
 	rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd, dma_dir,
 			paddr_ptr, len_ptr, region_id);
 	if (rc < 0)
-		pr_err("mapping or add list fail\n");
+		CAM_ERR(CAM_SMMU, "mapping or add list fail");
 
 get_addr_end:
 	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
@@ -1767,12 +1811,12 @@
 	enum cam_smmu_buf_state buf_state;
 
 	if (!paddr_ptr || !len_ptr) {
-		pr_err("Error: Input pointers are invalid\n");
+		CAM_ERR(CAM_SMMU, "Error: Input pointers are invalid");
 		return -EINVAL;
 	}
 
 	if (handle == HANDLE_INIT) {
-		pr_err("Error: Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
 		return -EINVAL;
 	}
 
@@ -1782,14 +1826,16 @@
 
 	idx = GET_SMMU_TABLE_IDX(handle);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, handle);
 		return -EINVAL;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, handle);
 		rc = -EINVAL;
 		goto get_addr_end;
@@ -1797,7 +1843,7 @@
 
 	buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr, len_ptr);
 	if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) {
-		CDBG("ion_fd:%d not in the mapped list", ion_fd);
+		CAM_ERR(CAM_SMMU, "ion_fd:%d not in the mapped list", ion_fd);
 		rc = -EINVAL;
 		goto get_addr_end;
 	}
@@ -1823,21 +1869,23 @@
 	struct cam_dma_buff_info *mapping_info;
 
 	if (handle == HANDLE_INIT) {
-		pr_err("Error: Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
 		return -EINVAL;
 	}
 
 	/* find index in the iommu_cb_set.cb_info */
 	idx = GET_SMMU_TABLE_IDX(handle);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, handle);
 		return -EINVAL;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, handle);
 		rc = -EINVAL;
 		goto unmap_end;
@@ -1846,17 +1894,17 @@
 	/* Based on ion fd and index, we can find mapping info of buffer */
 	mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
 	if (!mapping_info) {
-		pr_err("Error: Invalid params! idx = %d, fd = %d\n",
+		CAM_ERR(CAM_SMMU, "Error: Invalid params idx = %d, fd = %d",
 			idx, ion_fd);
 		rc = -EINVAL;
 		goto unmap_end;
 	}
 
 	/* Unmapping one buffer from device */
-	CDBG("SMMU: removing buffer idx = %d\n", idx);
+	CAM_DBG(CAM_SMMU, "SMMU: removing buffer idx = %d", idx);
 	rc = cam_smmu_unmap_buf_and_remove_from_list(mapping_info, idx);
 	if (rc < 0)
-		pr_err("Error: unmap or remove list fail\n");
+		CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail");
 
 unmap_end:
 	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
@@ -1871,21 +1919,23 @@
 	struct cam_dma_buff_info *mapping_info;
 
 	if (handle == HANDLE_INIT) {
-		pr_err("Error: Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
 		return -EINVAL;
 	}
 
 	/* find index in the iommu_cb_set.cb_info */
 	idx = GET_SMMU_TABLE_IDX(handle);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, handle);
 		return -EINVAL;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, handle);
 		rc = -EINVAL;
 		goto put_addr_end;
@@ -1894,7 +1944,7 @@
 	/* based on ion fd and index, we can find mapping info of buffer */
 	mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
 	if (!mapping_info) {
-		pr_err("Error: Invalid params! idx = %d, fd = %d\n",
+		CAM_ERR(CAM_SMMU, "Error: Invalid params idx = %d, fd = %d",
 			idx, ion_fd);
 		rc = -EINVAL;
 		goto put_addr_end;
@@ -1911,27 +1961,29 @@
 	int idx;
 
 	if (handle == HANDLE_INIT) {
-		pr_err("Error: Invalid handle\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
 		return -EINVAL;
 	}
 
 	idx = GET_SMMU_TABLE_IDX(handle);
 	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
-		pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
 			idx, handle);
 		return -EINVAL;
 	}
 
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
-		pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
 			iommu_cb_set.cb_info[idx].handle, handle);
 		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 		return -EINVAL;
 	}
 
 	if (!list_empty_careful(&iommu_cb_set.cb_info[idx].smmu_buf_list)) {
-		pr_err("Client %s buffer list is not clean!\n",
+		CAM_ERR(CAM_SMMU, "Client %s buffer list is not clean",
 			iommu_cb_set.cb_info[idx].name);
 		cam_smmu_print_list(idx);
 		cam_smmu_clean_buffer_list(idx);
@@ -1981,7 +2033,7 @@
 	int rc = 0;
 
 	if (!cb || !dev) {
-		pr_err("Error: invalid input params\n");
+		CAM_ERR(CAM_SMMU, "Error: invalid input params");
 		return -EINVAL;
 	}
 
@@ -2001,12 +2053,13 @@
 			cb->shared_info.iova_len,
 			-1);
 
-		CDBG("Shared mem start->%lX\n",
+		CAM_DBG(CAM_SMMU, "Shared mem start->%lX",
 			(unsigned long)cb->shared_info.iova_start);
-		CDBG("Shared mem len->%zu\n", cb->shared_info.iova_len);
+		CAM_DBG(CAM_SMMU, "Shared mem len->%zu",
+			cb->shared_info.iova_len);
 
 		if (rc) {
-			pr_err("Genpool chunk creation failed\n");
+			CAM_ERR(CAM_SMMU, "Genpool chunk creation failed");
 			gen_pool_destroy(cb->shared_mem_pool);
 			cb->shared_mem_pool = NULL;
 			return rc;
@@ -2019,7 +2072,8 @@
 			cb->scratch_info.iova_len,
 			0);
 		if (rc < 0) {
-			pr_err("Error: failed to create scratch map\n");
+			CAM_ERR(CAM_SMMU,
+				"Error: failed to create scratch map");
 			rc = -ENODEV;
 			goto end;
 		}
@@ -2030,12 +2084,12 @@
 		cb->mapping = arm_iommu_create_mapping(&platform_bus_type,
 			cb->io_info.iova_start, cb->io_info.iova_len);
 		if (IS_ERR(cb->mapping)) {
-			pr_err("Error: create mapping Failed\n");
+			CAM_ERR(CAM_SMMU, "Error: create mapping Failed");
 			rc = -ENODEV;
 			goto end;
 		}
 	} else {
-		pr_err("Context bank does not have IO region\n");
+		CAM_ERR(CAM_SMMU, "Context bank does not have IO region");
 		rc = -ENODEV;
 		goto end;
 	}
@@ -2060,7 +2114,7 @@
 	struct device_node *domains_child_node = NULL;
 
 	if (!dev) {
-		pr_err("Error: Invalid device\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid device");
 		return -ENODEV;
 	}
 
@@ -2078,7 +2132,7 @@
 	}
 
 	if (iommu_cb_set.cb_num == 0) {
-		pr_err("Error: no context banks present\n");
+		CAM_ERR(CAM_SMMU, "Error: no context banks present");
 		return -ENOENT;
 	}
 
@@ -2088,14 +2142,14 @@
 		GFP_KERNEL);
 
 	if (!iommu_cb_set.cb_info) {
-		pr_err("Error: cannot allocate context banks\n");
+		CAM_ERR(CAM_SMMU, "Error: cannot allocate context banks");
 		return -ENOMEM;
 	}
 
 	cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_INIT);
 	iommu_cb_set.cb_init_count = 0;
 
-	CDBG("no of context banks :%d\n", iommu_cb_set.cb_num);
+	CAM_DBG(CAM_SMMU, "no of context banks :%d", iommu_cb_set.cb_num);
 	return 0;
 }
 
@@ -2109,13 +2163,13 @@
 	int num_regions = 0;
 
 	if (!of_node || !cb) {
-		pr_err("Invalid argument(s)\n");
+		CAM_ERR(CAM_SMMU, "Invalid argument(s)");
 		return -EINVAL;
 	}
 
 	mem_map_node = of_get_child_by_name(of_node, "iova-mem-map");
 	if (!mem_map_node) {
-		pr_err("iova-mem-map not present\n");
+		CAM_ERR(CAM_SMMU, "iova-mem-map not present");
 		return -EINVAL;
 	}
 
@@ -2129,7 +2183,7 @@
 			"iova-region-name", &region_name);
 		if (rc < 0) {
 			of_node_put(mem_map_node);
-			pr_err("IOVA region not found\n");
+			CAM_ERR(CAM_SMMU, "IOVA region not found");
 			return -EINVAL;
 		}
 
@@ -2137,7 +2191,7 @@
 			"iova-region-start", &region_start);
 		if (rc < 0) {
 			of_node_put(mem_map_node);
-			pr_err("Failed to read iova-region-start\n");
+			CAM_ERR(CAM_SMMU, "Failed to read iova-region-start");
 			return -EINVAL;
 		}
 
@@ -2145,7 +2199,7 @@
 			"iova-region-len", &region_len);
 		if (rc < 0) {
 			of_node_put(mem_map_node);
-			pr_err("Failed to read iova-region-len\n");
+			CAM_ERR(CAM_SMMU, "Failed to read iova-region-len");
 			return -EINVAL;
 		}
 
@@ -2153,7 +2207,7 @@
 			"iova-region-id", &region_id);
 		if (rc < 0) {
 			of_node_put(mem_map_node);
-			pr_err("Failed to read iova-region-id\n");
+			CAM_ERR(CAM_SMMU, "Failed to read iova-region-id");
 			return -EINVAL;
 		}
 
@@ -2179,20 +2233,22 @@
 			cb->io_info.iova_len = region_len;
 			break;
 		default:
-			pr_err("Incorrect region id present in DT file: %d\n",
+			CAM_ERR(CAM_SMMU,
+				"Incorrect region id present in DT file: %d",
 				region_id);
 		}
 
-		CDBG("Found label -> %s\n", cb->name);
-		CDBG("Found region -> %s\n", region_name);
-		CDBG("region_start -> %X\n", region_start);
-		CDBG("region_len -> %X\n", region_len);
-		CDBG("region_id -> %X\n", region_id);
+		CAM_DBG(CAM_SMMU, "Found label -> %s", cb->name);
+		CAM_DBG(CAM_SMMU, "Found region -> %s", region_name);
+		CAM_DBG(CAM_SMMU, "region_start -> %X", region_start);
+		CAM_DBG(CAM_SMMU, "region_len -> %X", region_len);
+		CAM_DBG(CAM_SMMU, "region_id -> %X", region_id);
 	}
 	of_node_put(mem_map_node);
 
 	if (!num_regions) {
-		pr_err("No memory regions found, at least one needed\n");
+		CAM_ERR(CAM_SMMU,
+			"No memory regions found, at least one needed");
 		rc = -ENODEV;
 	}
 
@@ -2207,13 +2263,13 @@
 	struct device *ctx = NULL;
 
 	if (!dev) {
-		pr_err("Error: Invalid device\n");
+		CAM_ERR(CAM_SMMU, "Error: Invalid device");
 		return -ENODEV;
 	}
 
 	/* check the bounds */
 	if (iommu_cb_set.cb_init_count >= iommu_cb_set.cb_num) {
-		pr_err("Error: populate more than allocated cb\n");
+		CAM_ERR(CAM_SMMU, "Error: populate more than allocated cb");
 		rc = -EBADHANDLE;
 		goto cb_init_fail;
 	}
@@ -2224,29 +2280,31 @@
 	/* set the name of the context bank */
 	rc = of_property_read_string(dev->of_node, "label", &cb->name);
 	if (rc < 0) {
-		pr_err("Error: failed to read label from sub device\n");
+		CAM_ERR(CAM_SMMU,
+			"Error: failed to read label from sub device");
 		goto cb_init_fail;
 	}
 
 	rc = cam_smmu_get_memory_regions_info(dev->of_node,
 		cb);
 	if (rc < 0) {
-		pr_err("Error: Getting region info\n");
+		CAM_ERR(CAM_SMMU, "Error: Getting region info");
 		return rc;
 	}
 
 	/* set up the iommu mapping for the  context bank */
 	if (type == CAM_QSMMU) {
-		pr_err("Error: QSMMU ctx not supported for : %s\n", cb->name);
+		CAM_ERR(CAM_SMMU, "Error: QSMMU ctx not supported for : %s",
+			cb->name);
 		return -ENODEV;
 	}
 
 	ctx = dev;
-	CDBG("getting Arm SMMU ctx : %s\n", cb->name);
+	CAM_DBG(CAM_SMMU, "getting Arm SMMU ctx : %s", cb->name);
 
 	rc = cam_smmu_setup_cb(cb, ctx);
 	if (rc < 0) {
-		pr_err("Error: failed to setup cb : %s\n", cb->name);
+		CAM_ERR(CAM_SMMU, "Error: failed to setup cb : %s", cb->name);
 		goto cb_init_fail;
 	}
 
@@ -2258,7 +2316,7 @@
 	/* increment count to next bank */
 	iommu_cb_set.cb_init_count++;
 
-	CDBG("X: cb init count :%d\n", iommu_cb_set.cb_init_count);
+	CAM_DBG(CAM_SMMU, "X: cb init count :%d", iommu_cb_set.cb_init_count);
 
 cb_init_fail:
 	return rc;
@@ -2272,14 +2330,14 @@
 	if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu")) {
 		rc = cam_alloc_smmu_context_banks(dev);
 		if (rc < 0) {
-			pr_err("Error: allocating context banks\n");
+			CAM_ERR(CAM_SMMU, "Error: allocating context banks");
 			return -ENOMEM;
 		}
 	}
 	if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu-cb")) {
 		rc = cam_populate_smmu_context_banks(dev, CAM_ARM_SMMU);
 		if (rc < 0) {
-			pr_err("Error: populating context banks\n");
+			CAM_ERR(CAM_SMMU, "Error: populating context banks");
 			return -ENOMEM;
 		}
 		return rc;
@@ -2287,7 +2345,7 @@
 	if (of_device_is_compatible(dev->of_node, "qcom,qsmmu-cam-cb")) {
 		rc = cam_populate_smmu_context_banks(dev, CAM_QSMMU);
 		if (rc < 0) {
-			pr_err("Error: populating context banks\n");
+			CAM_ERR(CAM_SMMU, "Error: populating context banks");
 			return -ENOMEM;
 		}
 		return rc;
@@ -2304,7 +2362,7 @@
 	rc = of_platform_populate(pdev->dev.of_node, msm_cam_smmu_dt_match,
 				NULL, &pdev->dev);
 	if (rc < 0) {
-		pr_err("Error: populating devices\n");
+		CAM_ERR(CAM_SMMU, "Error: populating devices");
 	} else {
 		INIT_WORK(&iommu_cb_set.smmu_work, cam_smmu_page_fault_work);
 		mutex_init(&iommu_cb_set.payload_list_lock);
diff --git a/drivers/media/platform/msm/camera/cam_sync/Makefile b/drivers/media/platform/msm/camera/cam_sync/Makefile
index e3012cb..8e884ca 100644
--- a/drivers/media/platform/msm/camera/cam_sync/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sync/Makefile
@@ -1 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_sync.o cam_sync_util.o
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index 96f40e1..644cb63 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "CAM-SYNC %s:%d " fmt, __func__, __LINE__
-
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/irqflags.h>
@@ -19,6 +17,7 @@
 #include <linux/platform_device.h>
 #include <linux/debugfs.h>
 #include "cam_sync_util.h"
+#include "cam_debug_util.h"
 
 struct sync_device *sync_dev;
 
@@ -35,7 +34,8 @@
 
 	rc = cam_sync_init_object(sync_dev->sync_table, idx, name);
 	if (rc) {
-		pr_err("Error: Unable to init row at idx = %ld\n", idx);
+		CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
+			idx);
 		spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
 		return -EINVAL;
 	}
@@ -62,7 +62,8 @@
 	row = sync_dev->sync_table + sync_obj;
 
 	if (row->state == CAM_SYNC_STATE_INVALID) {
-		pr_err("Error: accessing an uninitialized sync obj %d\n",
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj %d",
 			sync_obj);
 		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
 		return -EINVAL;
@@ -124,7 +125,8 @@
 	row = sync_dev->sync_table + sync_obj;
 
 	if (row->state == CAM_SYNC_STATE_INVALID) {
-		pr_err("Error: accessing an uninitialized sync obj = %d\n",
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj = %d",
 			sync_obj);
 		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
 		return -EINVAL;
@@ -157,12 +159,14 @@
 	/* Objects to be signaled will be added into this list */
 	INIT_LIST_HEAD(&sync_list);
 
-	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
+		CAM_ERR(CAM_SYNC, "Error: Out of range sync obj");
 		return -EINVAL;
-
+	}
 	row = sync_dev->sync_table + sync_obj;
 	if (row->state == CAM_SYNC_STATE_INVALID) {
-		pr_err("Error: accessing an uninitialized sync obj = %d\n",
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj = %d",
 			sync_obj);
 		return -EINVAL;
 	}
@@ -170,14 +174,15 @@
 	spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
 	if (row->type == CAM_SYNC_TYPE_GROUP) {
 		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
-		pr_err("Error: Signaling a GROUP sync object = %d\n",
+		CAM_ERR(CAM_SYNC, "Error: Signaling a GROUP sync object = %d",
 			sync_obj);
 		return -EINVAL;
 	}
 
 	if (row->state != CAM_SYNC_STATE_ACTIVE) {
 		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
-		pr_err("Error: Sync object already signaled sync_obj = %d",
+		CAM_ERR(CAM_SYNC,
+			"Error: Sync object already signaled sync_obj = %d",
 			sync_obj);
 		return -EALREADY;
 	}
@@ -185,7 +190,8 @@
 	if (status != CAM_SYNC_STATE_SIGNALED_SUCCESS &&
 		status != CAM_SYNC_STATE_SIGNALED_ERROR) {
 		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
-		pr_err("Error: signaling with undefined status = %d\n",
+		CAM_ERR(CAM_SYNC,
+			"Error: signaling with undefined status = %d",
 			status);
 		return -EINVAL;
 	}
@@ -297,18 +303,20 @@
 	rc = cam_sync_util_validate_merge(sync_obj,
 		num_objs);
 	if (rc < 0) {
-		pr_err("Validation failed, Merge not allowed");
+		CAM_ERR(CAM_SYNC, "Validation failed, Merge not allowed");
 		return -EINVAL;
 	}
 
 	rc = cam_sync_util_find_and_set_empty_row(sync_dev, &idx);
 	if (rc < 0) {
-		pr_err("Error: Unable to find empty row, table full");
+		CAM_ERR(CAM_SYNC,
+			"Error: Unable to find empty row, table full");
 		return -EINVAL;
 	}
 
 	if (idx <= 0 || idx >= CAM_SYNC_MAX_OBJS) {
-		pr_err("Error: Invalid empty row index returned = %ld", idx);
+		CAM_ERR(CAM_SYNC,
+			"Error: Invalid empty row index returned = %ld", idx);
 		return -EINVAL;
 	}
 
@@ -317,7 +325,8 @@
 		num_objs);
 
 	if (rc < 0) {
-		pr_err("Error: Unable to init row at idx = %ld\n", idx);
+		CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
+			idx);
 		return -EINVAL;
 	}
 
@@ -335,7 +344,8 @@
 
 	row = sync_dev->sync_table + sync_obj;
 	if (row->state == CAM_SYNC_STATE_INVALID) {
-		pr_err("Error: accessing an uninitialized sync obj: idx = %d\n",
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj: idx = %d",
 			sync_obj);
 		return -EINVAL;
 	}
@@ -356,7 +366,8 @@
 	row = sync_dev->sync_table + sync_obj;
 
 	if (row->state == CAM_SYNC_STATE_INVALID) {
-		pr_err("Error: accessing an uninitialized sync obj = %d\n",
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj = %d",
 			sync_obj);
 		return -EINVAL;
 	}
@@ -365,15 +376,16 @@
 		msecs_to_jiffies(timeout_ms));
 
 	if (!timeleft) {
-		pr_err("Error: cam_sync_wait() timed out for sync obj = %d\n",
-			sync_obj);
+		CAM_ERR(CAM_SYNC,
+			"Error: timed out for sync obj = %d", sync_obj);
 		rc = -ETIMEDOUT;
 	} else {
 		switch (row->state) {
 		case CAM_SYNC_STATE_INVALID:
 		case CAM_SYNC_STATE_ACTIVE:
 		case CAM_SYNC_STATE_SIGNALED_ERROR:
-			pr_err("Error: Wait on invalid state = %d, obj = %d\n",
+			CAM_ERR(CAM_SYNC,
+				"Error: Wait on invalid state = %d, obj = %d",
 				row->state, sync_obj);
 			rc = -EINVAL;
 			break;
@@ -566,7 +578,8 @@
 	row =  sync_dev->sync_table + sync_obj;
 
 	if (row->state == CAM_SYNC_STATE_INVALID) {
-		pr_err("Error: accessing an uninitialized sync obj = %d\n",
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj = %d",
 			sync_obj);
 		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
 		kfree(user_payload_kernel);
@@ -616,12 +629,12 @@
 	struct sync_table_row *row = NULL;
 
 	if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
-		CDBG("Incorrect ioctl size\n");
+		CAM_ERR(CAM_SYNC, "Incorrect ioctl size");
 		return -EINVAL;
 	}
 
 	if (!k_ioctl->ioctl_ptr) {
-		CDBG("Invalid embedded ioctl ptr\n");
+		CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr");
 		return -EINVAL;
 	}
 
@@ -638,7 +651,8 @@
 	row =  sync_dev->sync_table + sync_obj;
 
 	if (row->state == CAM_SYNC_STATE_INVALID) {
-		pr_err("Error: accessing an uninitialized sync obj = %d\n",
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj = %d",
 			sync_obj);
 		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
 		return -EINVAL;
@@ -667,7 +681,7 @@
 	struct cam_private_ioctl_arg k_ioctl;
 
 	if (!sync_dev) {
-		pr_err("%s sync_dev NULL\n", __func__);
+		CAM_ERR(CAM_SYNC, "sync_dev NULL");
 		return -EINVAL;
 	}
 
@@ -735,7 +749,7 @@
 	struct sync_device *sync_dev = video_drvdata(filep);
 
 	if (!sync_dev) {
-		pr_err("%s Sync device NULL\n", __func__);
+		CAM_ERR(CAM_SYNC, "Sync device NULL");
 		return -ENODEV;
 	}
 
@@ -752,7 +766,7 @@
 		sync_dev->cam_sync_eventq = filep->private_data;
 		spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
 	} else {
-		pr_err("v4l2_fh_open failed : %d\n", rc);
+		CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
 	}
 	mutex_unlock(&sync_dev->table_lock);
 
@@ -766,7 +780,7 @@
 	struct sync_device *sync_dev = video_drvdata(filep);
 
 	if (!sync_dev) {
-		pr_err("%s Sync device NULL\n", __func__);
+		CAM_ERR(CAM_SYNC, "Sync device NULL");
 		rc = -ENODEV;
 		return rc;
 	}
@@ -784,11 +798,13 @@
 			 */
 			rc = cam_sync_signal(i, CAM_SYNC_STATE_SIGNALED_ERROR);
 			if (rc < 0)
-				pr_err("Cleanup signal failed: idx = %d\n", i);
+				CAM_ERR(CAM_SYNC,
+					"Cleanup signal failed: idx = %d", i);
 
 			rc = cam_sync_destroy(i);
 			if (rc < 0)
-				pr_err("Cleanup destroy failed: idx = %d\n", i);
+				CAM_ERR(CAM_SYNC,
+					"Cleanup destroy failed: idx = %d", i);
 		}
 	}
 	mutex_unlock(&sync_dev->table_lock);
@@ -951,7 +967,8 @@
 		WQ_HIGHPRI | WQ_UNBOUND, 0);
 
 	if (!sync_dev->work_queue) {
-		pr_err("Error: high priority work queue creation failed!\n");
+		CAM_ERR(CAM_SYNC,
+			"Error: high priority work queue creation failed");
 		rc = -ENOMEM;
 		goto v4l2_fail;
 	}
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
index 3b3cbff..c62aacf 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
@@ -10,8 +10,6 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "CAM-SYNC-UTIL %s:%d " fmt, __func__, __LINE__
-
 #include "cam_sync_util.h"
 
 int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
@@ -85,7 +83,8 @@
 			active_count++;
 			break;
 		default:
-			pr_err("Invalid state of child object during merge\n");
+			CAM_ERR(CAM_SYNC,
+				"Invalid state of child object during merge");
 			return CAM_SYNC_STATE_SIGNALED_ERROR;
 		}
 	}
@@ -256,7 +255,7 @@
 	struct sync_table_row *row = NULL;
 
 	if (num_objs <= 1) {
-		pr_err("Single object merge is not allowed\n");
+		CAM_ERR(CAM_SYNC, "Single object merge is not allowed");
 		return -EINVAL;
 	}
 
@@ -265,7 +264,8 @@
 		spin_lock_bh(&sync_dev->row_spinlocks[sync_obj[i]]);
 		if (row->type == CAM_SYNC_TYPE_GROUP ||
 			row->state == CAM_SYNC_STATE_INVALID) {
-			pr_err("Group obj %d can't be merged or obj UNINIT\n",
+			CAM_ERR(CAM_SYNC,
+				"Group obj %d can't be merged or obj UNINIT",
 				sync_obj[i]);
 			spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj[i]]);
 			return -EINVAL;
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
index 9dedd14..8b60ce1 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
@@ -16,6 +16,7 @@
 
 #include <cam_sync_api.h>
 #include "cam_sync_private.h"
+#include "cam_debug_util.h"
 
 extern struct sync_device *sync_dev;
 
diff --git a/drivers/media/platform/msm/camera/cam_utils/Makefile b/drivers/media/platform/msm/camera/cam_utils/Makefile
index 370651f..4702963 100644
--- a/drivers/media/platform/msm/camera/cam_utils/Makefile
+++ b/drivers/media/platform/msm/camera/cam_utils/Makefile
@@ -2,4 +2,4 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
 
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_soc_util.o cam_io_util.o cam_packet_util.o cam_debug_util.o cam_trace.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_soc_util.o cam_io_util.o cam_packet_util.o cam_debug_util.o cam_trace.o cam_common_util.o
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_common_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.c
new file mode 100644
index 0000000..199d3ea
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.c
@@ -0,0 +1,35 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include "cam_common_util.h"
+#include "cam_debug_util.h"
+
+int cam_common_util_get_string_index(const char **strings,
+	uint32_t num_strings, char *matching_string, uint32_t *index)
+{
+	int i;
+
+	for (i = 0; i < num_strings; i++) {
+		if (strnstr(strings[i], matching_string, strlen(strings[i]))) {
+			CAM_DBG(CAM_UTIL, "matched %s : %d\n",
+				matching_string, i);
+			*index = i;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h
new file mode 100644
index 0000000..d6a11b7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_COMMON_UTIL_H_
+#define _CAM_COMMON_UTIL_H_
+
+#define CAM_BITS_MASK_SHIFT(x, mask, shift) (((x) & (mask)) >> shift)
+
+/**
+ * cam_common_util_get_string_index()
+ *
+ * @brief                  Match the string from list of strings to return
+ *                         matching index
+ *
+ * @strings:               Pointer to list of strings
+ * @num_strings:           Number of strings in 'strings'
+ * @matching_string:       String to match
+ * @index:                 Pointer to index to return matching index
+ *
+ * @return:                0 for success
+ *                         -EINVAL for Fail
+ */
+int cam_common_util_get_string_index(const char **strings,
+	uint32_t num_strings, char *matching_string, uint32_t *index);
+
+#endif /* _CAM_COMMON_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.c
index eb5e7d8..21f90ca 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.c
@@ -11,9 +11,14 @@
  */
 
 #include <linux/io.h>
+#include <linux/module.h>
+
 #include "cam_debug_util.h"
 
-static const char *cam_debug_module_id_to_name(unsigned int module_id)
+static uint debug_mdl;
+module_param(debug_mdl, uint, 0644);
+
+const char *cam_get_module_name(unsigned int module_id)
 {
 	const char *name = NULL;
 
@@ -57,6 +62,27 @@
 	case CAM_FLASH:
 		name = "CAM-FLASH";
 		break;
+	case CAM_ACTUATOR:
+		name = "CAM-ACTUATOR";
+		break;
+	case CAM_CCI:
+		name = "CAM-CCI";
+		break;
+	case CAM_CSIPHY:
+		name = "CAM-CSIPHY";
+		break;
+	case CAM_EEPROM:
+		name = "CAM-EEPROM";
+		break;
+	case CAM_UTIL:
+		name = "CAM-UTIL";
+		break;
+	case CAM_CTXT:
+		name = "CAM-CTXT";
+		break;
+	case CAM_HFI:
+		name = "CAM-HFI";
+		break;
 	default:
 		name = "CAM";
 		break;
@@ -69,31 +95,37 @@
 	const char *func, const int line, const char *fmt, ...)
 {
 	char str_buffer[STR_BUFFER_MAX_LENGTH];
-	const char *module_name;
 	va_list args;
 
 	va_start(args, fmt);
-	vsnprintf(str_buffer, STR_BUFFER_MAX_LENGTH, fmt, args);
-	va_end(args);
-
-	module_name = cam_debug_module_id_to_name(module_id);
 
 	switch (dbg_level) {
-	case CAM_LEVEL_INFO:
-		pr_info("CAM_INFO: %s: %s: %d: %s\n",
-			module_name, func, line, str_buffer);
-		break;
-	case CAM_LEVEL_WARN:
-		pr_warn("CAM_WARN: %s: %s: %d: %s\n",
-			module_name, func, line, str_buffer);
+	case CAM_LEVEL_DBG:
+		if (debug_mdl & module_id) {
+			vsnprintf(str_buffer, STR_BUFFER_MAX_LENGTH, fmt, args);
+			pr_info("CAM_DBG: %s: %s: %d: %s\n",
+				cam_get_module_name(module_id),
+				func, line, str_buffer);
+			va_end(args);
+		}
 		break;
 	case CAM_LEVEL_ERR:
+		vsnprintf(str_buffer, STR_BUFFER_MAX_LENGTH, fmt, args);
 		pr_err("CAM_ERR: %s: %s: %d: %s\n",
-			module_name, func, line, str_buffer);
+			cam_get_module_name(module_id), func, line, str_buffer);
+		va_end(args);
 		break;
-	case CAM_LEVEL_DBG:
-		pr_info("CAM_DBG: %s: %s: %d: %s\n",
-			module_name, func, line, str_buffer);
+	case CAM_LEVEL_INFO:
+		vsnprintf(str_buffer, STR_BUFFER_MAX_LENGTH, fmt, args);
+		pr_info("CAM_INFO: %s: %s: %d: %s\n",
+			cam_get_module_name(module_id), func, line, str_buffer);
+		va_end(args);
+		break;
+	case CAM_LEVEL_WARN:
+		vsnprintf(str_buffer, STR_BUFFER_MAX_LENGTH, fmt, args);
+		pr_warn("CAM_WARN: %s: %s: %d: %s\n",
+			cam_get_module_name(module_id), func, line, str_buffer);
+		va_end(args);
 		break;
 	default:
 		break;
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
index 34adb0e..7275d56 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
@@ -13,22 +13,28 @@
 #ifndef _CAM_DEBUG_UTIL_H_
 #define _CAM_DEBUG_UTIL_H_
 
-#define CAM_CDM    (1 << 0)
-#define CAM_CORE   (1 << 1)
-#define CAM_CPAS   (1 << 2)
-#define CAM_ISP    (1 << 3)
-#define CAM_CRM    (1 << 4)
-#define CAM_SENSOR (1 << 5)
-#define CAM_SMMU   (1 << 6)
-#define CAM_SYNC   (1 << 7)
-#define CAM_ICP    (1 << 8)
-#define CAM_JPEG   (1 << 9)
-#define CAM_FD     (1 << 10)
-#define CAM_LRME   (1 << 11)
-#define CAM_FLASH  (1 << 12)
+#define CAM_CDM        (1 << 0)
+#define CAM_CORE       (1 << 1)
+#define CAM_CPAS       (1 << 2)
+#define CAM_ISP        (1 << 3)
+#define CAM_CRM        (1 << 4)
+#define CAM_SENSOR     (1 << 5)
+#define CAM_SMMU       (1 << 6)
+#define CAM_SYNC       (1 << 7)
+#define CAM_ICP        (1 << 8)
+#define CAM_JPEG       (1 << 9)
+#define CAM_FD         (1 << 10)
+#define CAM_LRME       (1 << 11)
+#define CAM_FLASH      (1 << 12)
+#define CAM_ACTUATOR   (1 << 13)
+#define CAM_CCI        (1 << 14)
+#define CAM_CSIPHY     (1 << 15)
+#define CAM_EEPROM     (1 << 16)
+#define CAM_UTIL       (1 << 17)
+#define CAM_HFI        (1 << 18)
+#define CAM_CTXT       (1 << 19)
 
 #define STR_BUFFER_MAX_LENGTH  1024
-#define GROUP                  0x0000
 
 enum cam_debug_level {
 	CAM_LEVEL_INFO,
@@ -40,61 +46,61 @@
 /*
  *  cam_debug_log()
  *
- * @brief:        Get the Module name form module ID and print
- *                respective debug logs
+ * @brief     :  Get the Module name from module ID and print
+ *               respective debug logs
  *
  * @module_id :  Respective Module ID which is calling this function
  * @dbg_level :  Debug level from cam_module_debug_level enum entries
- * @func :       Function which is calling to print logs
- * @line :       Line number associated with the function which is calling
+ * @func      :  Function which is calling to print logs
+ * @line      :  Line number associated with the function which is calling
  *               to print log
- * @fmt  :       Formatted string which needs to be print in the log
+ * @fmt       :  Formatted string which needs to be print in the log
  *
  */
 void cam_debug_log(unsigned int module_id, enum cam_debug_level dbg_level,
 	const char *func, const int line, const char *fmt, ...);
 
 /*
+ * cam_get_module_name()
+ *
+ * @brief     :  Get the module name from module ID
+ *
+ * @module_id :  Module ID which is using this function
+ */
+const char *cam_get_module_name(unsigned int module_id);
+
+/*
  * CAM_ERR
- * @brief :     This Macro will print error logs
+ * @brief    :  This Macro will print error logs
  *
  * @__module :  Respective module id which is been calling this Macro
- * @fmt :       Formatted string which needs to be print in log
- * @args:       Arguments which needs to be print in log
+ * @fmt      :  Formatted string which needs to be print in log
+ * @args     :  Arguments which needs to be print in log
  */
 #define CAM_ERR(__module, fmt, args...)                            \
-	{                                                          \
-		cam_debug_log(__module, CAM_LEVEL_ERR,             \
-			__func__, __LINE__, fmt, ##args);          \
-	}
+	cam_debug_log(__module, CAM_LEVEL_ERR, __func__, __LINE__, fmt, ##args)
 
 /*
  * CAM_WARN
- * @brief :     This Macro will print warning logs
+ * @brief    :  This Macro will print warning logs
  *
  * @__module :  Respective module id which is been calling this Macro
  * @fmt      :  Formatted string which needs to be print in log
  * @args     :  Arguments which needs to be print in log
  */
 #define CAM_WARN(__module, fmt, args...)                           \
-	{                                                          \
-		cam_debug_log(__module, CAM_LEVEL_WARN,            \
-			__func__, __LINE__, fmt, ##args);          \
-	}
+	cam_debug_log(__module, CAM_LEVEL_WARN, __func__, __LINE__, fmt, ##args)
 
 /*
  * CAM_INFO
- * @brief :     This Macro will print Information logs
+ * @brief    :  This Macro will print Information logs
  *
  * @__module :  Respective module id which is been calling this Macro
  * @fmt      :  Formatted string which needs to be print in log
  * @args     :  Arguments which needs to be print in log
  */
 #define CAM_INFO(__module, fmt, args...)                           \
-	{                                                          \
-		cam_debug_log(__module, CAM_LEVEL_INFO,            \
-			__func__, __LINE__, fmt, ##args);          \
-	}
+	cam_debug_log(__module, CAM_LEVEL_INFO, __func__, __LINE__, fmt, ##args)
 
 /*
  * CAM_DBG
@@ -105,11 +111,14 @@
  * @args     :  Arguments which needs to be print in log
  */
 #define CAM_DBG(__module, fmt, args...)                            \
-	do {                                                       \
-		if (GROUP & __module) {                            \
-			cam_debug_log(__module, CAM_LEVEL_DBG,     \
-				__func__, __LINE__, fmt, ##args);  \
-		}                                                  \
-	} while (0)
+	cam_debug_log(__module, CAM_LEVEL_DBG, __func__, __LINE__, fmt, ##args)
+
+/*
+ * CAM_ERR_RATE_LIMIT
+ * @brief :     This Macro will prevent error print logs with ratelimit
+ */
+#define CAM_ERR_RATE_LIMIT(__module, fmt, args...)                 \
+	pr_err_ratelimited("CAM_ERR: %s: %s: %d\n" fmt,            \
+		cam_get_module_name(__module), __func__,  __LINE__, ##args)
 
 #endif /* _CAM_DEBUG_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
index 78cd9d8..ec08c3c 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
@@ -10,22 +10,18 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/err.h>
 #include "cam_io_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 int cam_io_w(uint32_t data, void __iomem *addr)
 {
 	if (!addr)
 		return -EINVAL;
 
-	CDBG("0x%pK %08x\n", addr, data);
+	CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
 	writel_relaxed(data, addr);
 
 	return 0;
@@ -36,7 +32,7 @@
 	if (!addr)
 		return -EINVAL;
 
-	CDBG("0x%pK %08x\n", addr, data);
+	CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
 	/* Ensure previous writes are done */
 	wmb();
 	writel_relaxed(data, addr);
@@ -49,12 +45,12 @@
 	uint32_t data;
 
 	if (!addr) {
-		pr_err("Invalid args\n");
+		CAM_ERR(CAM_UTIL, "Invalid args");
 		return 0;
 	}
 
 	data = readl_relaxed(addr);
-	CDBG("0x%pK %08x\n", addr, data);
+	CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
 
 	return data;
 }
@@ -64,14 +60,14 @@
 	uint32_t data;
 
 	if (!addr) {
-		pr_err("Invalid args\n");
+		CAM_ERR(CAM_UTIL, "Invalid args");
 		return 0;
 	}
 
 	/* Ensure previous read is done */
 	rmb();
 	data = readl_relaxed(addr);
-	CDBG("0x%pK %08x\n", addr, data);
+	CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
 
 	return data;
 }
@@ -86,10 +82,10 @@
 	if (!dest_addr || !src_addr)
 		return -EINVAL;
 
-	CDBG("%pK %pK %d\n", dest_addr, src_addr, len);
+	CAM_DBG(CAM_UTIL, "%pK %pK %d", dest_addr, src_addr, len);
 
 	for (i = 0; i < len/4; i++) {
-		CDBG("0x%pK %08x\n", d, *s);
+		CAM_DBG(CAM_UTIL, "0x%pK %08x", d, *s);
 		writel_relaxed(*s++, d++);
 	}
 
@@ -106,7 +102,7 @@
 	if (!dest_addr || !src_addr)
 		return -EINVAL;
 
-	CDBG("%pK %pK %d\n", dest_addr, src_addr, len);
+	CAM_DBG(CAM_UTIL, "%pK %pK %d", dest_addr, src_addr, len);
 
 	/*
 	 * Do not use cam_io_w_mb to avoid double wmb() after a write
@@ -114,7 +110,7 @@
 	 */
 	wmb();
 	for (i = 0; i < (len / 4); i++) {
-		CDBG("0x%pK %08x\n", d, *s);
+		CAM_DBG(CAM_UTIL, "0x%pK %08x", d, *s);
 		writel_relaxed(*s++, d++);
 	}
 
@@ -138,7 +134,7 @@
 	}
 
 	if (cnt > retry) {
-		pr_debug("Poll failed by value\n");
+		CAM_DBG(CAM_UTIL, "Poll failed by value");
 		rc = -EINVAL;
 	}
 
@@ -163,7 +159,7 @@
 	}
 
 	if (cnt > retry) {
-		pr_debug("Poll failed with mask\n");
+		CAM_DBG(CAM_UTIL, "Poll failed with mask");
 		rc = -EINVAL;
 	}
 
@@ -179,7 +175,7 @@
 		return -EINVAL;
 
 	for (i = 0; i < len; i++) {
-		CDBG("i= %d len =%d val=%x addr =%pK\n",
+		CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr =%pK",
 			i, len, data[i], addr);
 		writel_relaxed(data[i], addr);
 	}
@@ -196,7 +192,7 @@
 		return -EINVAL;
 
 	for (i = 0; i < len; i++) {
-		CDBG("i= %d len =%d val=%x addr =%pK\n",
+		CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr =%pK",
 			i, len, data[i], addr);
 		/* Ensure previous writes are done */
 		wmb();
@@ -217,7 +213,7 @@
 		return -EINVAL;
 
 	for (i = 0; i < len; i++) {
-		CDBG("i= %d len =%d val=%x addr_base =%pK reg=%x\n",
+		CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr_base =%pK reg=%x",
 			i, len, __VAL(i), addr_base, __OFFSET(i));
 		writel_relaxed(__VAL(i), addr_base + __OFFSET(i));
 	}
@@ -236,7 +232,7 @@
 	/* Ensure write is done */
 	wmb();
 	for (i = 0; i < len; i++) {
-		CDBG("i= %d len =%d val=%x addr_base =%pK reg=%x\n",
+		CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr_base =%pK reg=%x",
 			i, len, __VAL(i), addr_base, __OFFSET(i));
 		writel_relaxed(__VAL(i), addr_base + __OFFSET(i));
 	}
@@ -254,7 +250,8 @@
 	int           i;
 	uint32_t      data;
 
-	CDBG("addr=%pK offset=0x%x size=%d\n", base_addr, start_offset, size);
+	CAM_DBG(CAM_UTIL, "addr=%pK offset=0x%x size=%d",
+		base_addr, start_offset, size);
 
 	if (!base_addr || (size <= 0))
 		return -EINVAL;
@@ -271,13 +268,13 @@
 		snprintf(p_str, 9, "%08x ", data);
 		p_str += 9;
 		if ((i + 1) % NUM_REGISTER_PER_LINE == 0) {
-			pr_err("%s\n", line_str);
+			CAM_ERR(CAM_UTIL, "%s", line_str);
 			line_str[0] = '\0';
 			p_str = line_str;
 		}
 	}
 	if (line_str[0] != '\0')
-		pr_err("%s\n", line_str);
+		CAM_ERR(CAM_UTIL, "%s", line_str);
 
 	return 0;
 }
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
index 6d90c1e..a1cdfe9 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
@@ -10,13 +10,118 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+#include <linux/types.h>
+#include <linux/slab.h>
 
 #include "cam_mem_mgr.h"
 #include "cam_packet_util.h"
+#include "cam_debug_util.h"
 
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+static int cam_packet_util_get_cmd_mem_addr(int handle, uint32_t **buf_addr,
+	size_t *len)
+{
+	int rc = 0;
+	uint64_t kmd_buf_addr = 0;
+
+	rc = cam_mem_get_cpu_buf(handle, &kmd_buf_addr, len);
+	if (rc) {
+		CAM_ERR(CAM_UTIL, "Unable to get the virtual address %d", rc);
+	} else {
+		if (kmd_buf_addr && *len) {
+			*buf_addr = (uint32_t *)kmd_buf_addr;
+		} else {
+			CAM_ERR(CAM_UTIL, "Invalid addr and length :%ld", *len);
+			rc = -ENOMEM;
+		}
+	}
+	return rc;
+}
+
+int cam_packet_util_validate_cmd_desc(struct cam_cmd_buf_desc *cmd_desc)
+{
+	if ((cmd_desc->length > cmd_desc->size) ||
+		(cmd_desc->mem_handle <= 0)) {
+		CAM_ERR(CAM_UTIL, "invalid cmd arg %d %d %d %d",
+			cmd_desc->offset, cmd_desc->length,
+			cmd_desc->mem_handle, cmd_desc->size);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cam_packet_util_validate_packet(struct cam_packet *packet)
+{
+	if (!packet)
+		return -EINVAL;
+
+	CAM_DBG(CAM_UTIL, "num cmd buf:%d num of io config:%d kmd buf index:%d",
+		packet->num_cmd_buf, packet->num_io_configs,
+		packet->kmd_cmd_buf_index);
+
+	if ((packet->kmd_cmd_buf_index >= packet->num_cmd_buf) ||
+		(!packet->header.size) ||
+		(packet->cmd_buf_offset > packet->header.size) ||
+		(packet->io_configs_offset > packet->header.size))  {
+		CAM_ERR(CAM_UTIL, "invalid packet:%d %d %d %d %d",
+			packet->kmd_cmd_buf_index,
+			packet->num_cmd_buf, packet->cmd_buf_offset,
+			packet->io_configs_offset, packet->header.size);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cam_packet_util_get_kmd_buffer(struct cam_packet *packet,
+	struct cam_kmd_buf_info *kmd_buf)
+{
+	int                      rc = 0;
+	size_t                   len = 0;
+	struct cam_cmd_buf_desc *cmd_desc;
+	uint32_t                *cpu_addr;
+
+	if (!packet || !kmd_buf) {
+		CAM_ERR(CAM_UTIL, "Invalid arg %pK %pK", packet, kmd_buf);
+		return -EINVAL;
+	}
+
+	/* Take first command descriptor and add offset to it for kmd*/
+	cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)
+		&packet->payload + packet->cmd_buf_offset);
+	cmd_desc += packet->kmd_cmd_buf_index;
+
+	rc = cam_packet_util_validate_cmd_desc(cmd_desc);
+	if (rc)
+		return rc;
+
+	rc = cam_packet_util_get_cmd_mem_addr(cmd_desc->mem_handle, &cpu_addr,
+		&len);
+	if (rc)
+		return rc;
+
+	if (len < cmd_desc->size) {
+		CAM_ERR(CAM_UTIL, "invalid memory len:%ld and cmd desc size:%d",
+			len, cmd_desc->size);
+		return -EINVAL;
+	}
+
+	cpu_addr += (cmd_desc->offset / 4) + (packet->kmd_cmd_buf_offset / 4);
+	CAM_DBG(CAM_UTIL, "total size %d, cmd size: %d, KMD buffer size: %d",
+		cmd_desc->size, cmd_desc->length,
+		cmd_desc->size - cmd_desc->length);
+	CAM_DBG(CAM_UTIL, "hdl 0x%x, cmd offset %d, kmd offset %d, addr 0x%pK",
+		cmd_desc->mem_handle, cmd_desc->offset,
+		packet->kmd_cmd_buf_offset, cpu_addr);
+
+	kmd_buf->cpu_addr   = cpu_addr;
+	kmd_buf->handle     = cmd_desc->mem_handle;
+	kmd_buf->offset     = cmd_desc->offset + packet->kmd_cmd_buf_offset;
+	kmd_buf->size       = cmd_desc->size - cmd_desc->length;
+	kmd_buf->used_bytes = 0;
+
+	return rc;
+}
 
 int cam_packet_util_process_patches(struct cam_packet *packet,
 	int32_t iommu_hdl)
@@ -36,7 +141,7 @@
 	patch_desc = (struct cam_patch_desc *)
 			((uint32_t *) &packet->payload +
 			packet->patch_offset/4);
-	CDBG("packet = %pK patch_desc = %pK size = %lu\n",
+	CAM_DBG(CAM_UTIL, "packet = %pK patch_desc = %pK size = %lu",
 			(void *)packet, (void *)patch_desc,
 			sizeof(struct cam_patch_desc));
 
@@ -44,7 +149,7 @@
 		rc = cam_mem_get_io_buf(patch_desc[i].src_buf_hdl,
 			iommu_hdl, &iova_addr, &src_buf_size);
 		if (rc < 0) {
-			pr_err("unable to get src buf address\n");
+			CAM_ERR(CAM_UTIL, "unable to get src buf address");
 			return rc;
 		}
 		src_buf_iova_addr = (uint32_t *)iova_addr;
@@ -53,12 +158,12 @@
 		rc = cam_mem_get_cpu_buf(patch_desc[i].dst_buf_hdl,
 			&cpu_addr, &dst_buf_len);
 		if (rc < 0) {
-			pr_err("unable to get dst buf address\n");
+			CAM_ERR(CAM_UTIL, "unable to get dst buf address");
 			return rc;
 		}
 		dst_cpu_addr = (uint32_t *)cpu_addr;
 
-		CDBG("i = %d patch info = %x %x %x %x\n", i,
+		CAM_DBG(CAM_UTIL, "i = %d patch info = %x %x %x %x", i,
 			patch_desc[i].dst_buf_hdl, patch_desc[i].dst_offset,
 			patch_desc[i].src_buf_hdl, patch_desc[i].src_offset);
 
@@ -68,7 +173,8 @@
 
 		*dst_cpu_addr = temp;
 
-		CDBG("patch is done for dst %pK with src %pK value %llx\n",
+		CAM_DBG(CAM_UTIL,
+			"patch is done for dst %pK with src %pK value %llx",
 			dst_cpu_addr, src_buf_iova_addr,
 			*((uint64_t *)dst_cpu_addr));
 	}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
index 614e868..8b590a7 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
@@ -16,6 +16,62 @@
 #include <uapi/media/cam_defs.h>
 
 /**
+ * @brief                  KMD scratch buffer information
+ *
+ * @handle:                Memory handle
+ * @cpu_addr:              Cpu address
+ * @offset:                Offset from the start of the buffer
+ * @size:                  Size of the buffer
+ * @used_bytes:            Used memory in bytes
+ *
+ */
+struct cam_kmd_buf_info {
+	int        handle;
+	uint32_t  *cpu_addr;
+	uint32_t   offset;
+	uint32_t   size;
+	uint32_t   used_bytes;
+};
+
+/**
+ * cam_packet_util_validate_packet()
+ *
+ * @brief                  Validate the packet
+ *
+ * @packet:                Packet to be validated
+ *
+ * @return:                0 for success
+ *                         -EINVAL for Fail
+ */
+int cam_packet_util_validate_packet(struct cam_packet *packet);
+
+/**
+ * cam_packet_util_validate_cmd_desc()
+ *
+ * @brief                  Validate the packet
+ *
+ * @cmd_desc:              Command descriptor to be validated
+ *
+ * @return:                0 for success
+ *                         -EINVAL for Fail
+ */
+int cam_packet_util_validate_cmd_desc(struct cam_cmd_buf_desc *cmd_desc);
+
+/**
+ * cam_packet_util_get_kmd_buffer()
+ *
+ * @brief                  Get the kmd buffer from the packet command descriptor
+ *
+ * @packet:                Packet data
+ * @kmd_buf:               Extracted the KMD buffer information
+ *
+ * @return:                0 for success
+ *                         -EINVAL for Fail
+ */
+int cam_packet_util_get_kmd_buffer(struct cam_packet *packet,
+	struct cam_kmd_buf_info *kmd_buf_info);
+
+/**
  * cam_packet_util_process_patches()
  *
  * @brief:              Replace the handle in Packet to Address using the
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index 1d86bb1..1990230 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -10,17 +10,13 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
-
 #include <linux/of.h>
 #include <linux/clk.h>
 #include <linux/slab.h>
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
 #include "cam_soc_util.h"
-
-#undef CDBG
-#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#include "cam_debug_util.h"
 
 int cam_soc_util_get_level_from_string(const char *string,
 	enum cam_vote_level *level)
@@ -43,7 +39,7 @@
 	} else if (!strcmp(string, "turbo")) {
 		*level = CAM_TURBO_VOTE;
 	} else {
-		pr_err("Invalid string %s\n", string);
+		CAM_ERR(CAM_UTIL, "Invalid string %s", string);
 		return -EINVAL;
 	}
 
@@ -68,7 +64,8 @@
 	enum cam_vote_level *apply_level)
 {
 	if (req_level >= CAM_MAX_VOTE) {
-		pr_err("Invalid clock level parameter %d\n", req_level);
+		CAM_ERR(CAM_UTIL, "Invalid clock level parameter %d",
+			req_level);
 		return -EINVAL;
 	}
 
@@ -84,13 +81,15 @@
 			}
 
 		if (i == CAM_MAX_VOTE) {
-			pr_err("No valid clock level found to apply, req=%d\n",
+			CAM_ERR(CAM_UTIL,
+				"No valid clock level found to apply, req=%d",
 				req_level);
 			return -EINVAL;
 		}
 	}
 
-	CDBG("Req level %d, Applying %d\n", req_level, *apply_level);
+	CAM_DBG(CAM_UTIL, "Req level %d, Applying %d",
+		req_level, *apply_level);
 
 	return 0;
 }
@@ -98,12 +97,12 @@
 int cam_soc_util_irq_enable(struct cam_hw_soc_info *soc_info)
 {
 	if (!soc_info) {
-		pr_err("Invalid arguments\n");
+		CAM_ERR(CAM_UTIL, "Invalid arguments");
 		return -EINVAL;
 	}
 
 	if (!soc_info->irq_line) {
-		pr_err("No IRQ line available\n");
+		CAM_ERR(CAM_UTIL, "No IRQ line available");
 		return -ENODEV;
 	}
 
@@ -115,12 +114,12 @@
 int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info)
 {
 	if (!soc_info) {
-		pr_err("Invalid arguments\n");
+		CAM_ERR(CAM_UTIL, "Invalid arguments");
 		return -EINVAL;
 	}
 
 	if (!soc_info->irq_line) {
-		pr_err("No IRQ line available\n");
+		CAM_ERR(CAM_UTIL, "No IRQ line available");
 		return -ENODEV;
 	}
 
@@ -138,33 +137,34 @@
 	if (!clk || !clk_name)
 		return -EINVAL;
 
-	CDBG("set %s, rate %d\n", clk_name, clk_rate);
+	CAM_DBG(CAM_UTIL, "set %s, rate %d", clk_name, clk_rate);
 	if (clk_rate > 0) {
 		clk_rate_round = clk_round_rate(clk, clk_rate);
-		CDBG("new_rate %ld\n", clk_rate_round);
+		CAM_DBG(CAM_UTIL, "new_rate %ld", clk_rate_round);
 		if (clk_rate_round < 0) {
-			pr_err("round failed for clock %s rc = %ld\n",
+			CAM_ERR(CAM_UTIL, "round failed for clock %s rc = %ld",
 				clk_name, clk_rate_round);
 			return clk_rate_round;
 		}
 		rc = clk_set_rate(clk, clk_rate_round);
 		if (rc) {
-			pr_err("set_rate failed on %s\n", clk_name);
+			CAM_ERR(CAM_UTIL, "set_rate failed on %s", clk_name);
 			return rc;
 		}
 	} else if (clk_rate == INIT_RATE) {
 		clk_rate_round = clk_get_rate(clk);
-		CDBG("init new_rate %ld\n", clk_rate_round);
+		CAM_DBG(CAM_UTIL, "init new_rate %ld", clk_rate_round);
 		if (clk_rate_round == 0) {
 			clk_rate_round = clk_round_rate(clk, 0);
 			if (clk_rate_round <= 0) {
-				pr_err("round rate failed on %s\n", clk_name);
+				CAM_ERR(CAM_UTIL, "round rate failed on %s",
+					clk_name);
 				return clk_rate_round;
 			}
 		}
 		rc = clk_set_rate(clk, clk_rate_round);
 		if (rc) {
-			pr_err("set_rate failed on %s\n", clk_name);
+			CAM_ERR(CAM_UTIL, "set_rate failed on %s", clk_name);
 			return rc;
 		}
 	}
@@ -186,7 +186,7 @@
 
 	rc = clk_prepare_enable(clk);
 	if (rc) {
-		pr_err("enable failed for %s: rc(%d)\n", clk_name, rc);
+		CAM_ERR(CAM_UTIL, "enable failed for %s: rc(%d)", clk_name, rc);
 		return rc;
 	}
 
@@ -198,7 +198,7 @@
 	if (!clk || !clk_name)
 		return -EINVAL;
 
-	CDBG("disable %s\n", clk_name);
+	CAM_DBG(CAM_UTIL, "disable %s", clk_name);
 	clk_disable_unprepare(clk);
 
 	return 0;
@@ -223,7 +223,8 @@
 
 	if ((soc_info->num_clk == 0) ||
 		(soc_info->num_clk >= CAM_SOC_MAX_CLK)) {
-		pr_err("Invalid number of clock %d\n", soc_info->num_clk);
+		CAM_ERR(CAM_UTIL, "Invalid number of clock %d",
+			soc_info->num_clk);
 		return -EINVAL;
 	}
 
@@ -304,14 +305,14 @@
 
 	count = of_property_count_strings(of_node, "clock-names");
 
-	CDBG("count = %d\n", count);
+	CAM_DBG(CAM_UTIL, "count = %d", count);
 	if (count > CAM_SOC_MAX_CLK) {
-		pr_err("invalid count of clocks, count=%d", count);
+		CAM_ERR(CAM_UTIL, "invalid count of clocks, count=%d", count);
 		rc = -EINVAL;
 		return rc;
 	}
 	if (count <= 0) {
-		CDBG("No clock-names found\n");
+		CAM_DBG(CAM_UTIL, "No clock-names found");
 		count = 0;
 		soc_info->num_clk = count;
 		return 0;
@@ -321,9 +322,11 @@
 	for (i = 0; i < count; i++) {
 		rc = of_property_read_string_index(of_node, "clock-names",
 				i, &(soc_info->clk_name[i]));
-		CDBG("clock-names[%d] = %s\n", i, soc_info->clk_name[i]);
+		CAM_DBG(CAM_UTIL, "clock-names[%d] = %s",
+			i, soc_info->clk_name[i]);
 		if (rc) {
-			pr_err("i= %d count= %d reading clock-names failed\n",
+			CAM_ERR(CAM_UTIL,
+				"i= %d count= %d reading clock-names failed",
 				i, count);
 			return rc;
 		}
@@ -331,12 +334,13 @@
 
 	num_clk_rates = of_property_count_u32_elems(of_node, "clock-rates");
 	if (num_clk_rates <= 0) {
-		pr_err("reading clock-rates count failed\n");
+		CAM_ERR(CAM_UTIL, "reading clock-rates count failed");
 		return -EINVAL;
 	}
 
 	if ((num_clk_rates % soc_info->num_clk) != 0) {
-		pr_err("mismatch clk/rates, No of clocks=%d, No of rates=%d\n",
+		CAM_ERR(CAM_UTIL,
+			"mismatch clk/rates, No of clocks=%d, No of rates=%d",
 			soc_info->num_clk, num_clk_rates);
 		return -EINVAL;
 	}
@@ -346,7 +350,8 @@
 	num_clk_level_strings = of_property_count_strings(of_node,
 		"clock-cntl-level");
 	if (num_clk_level_strings != num_clk_levels) {
-		pr_err("Mismatch No of levels=%d, No of level string=%d\n",
+		CAM_ERR(CAM_UTIL,
+			"Mismatch No of levels=%d, No of level string=%d",
 			num_clk_levels, num_clk_level_strings);
 		return -EINVAL;
 	}
@@ -355,7 +360,8 @@
 		rc = of_property_read_string_index(of_node,
 			"clock-cntl-level", i, &clk_cntl_lvl_string);
 		if (rc) {
-			pr_err("Error reading clock-cntl-level, rc=%d\n", rc);
+			CAM_ERR(CAM_UTIL,
+				"Error reading clock-cntl-level, rc=%d", rc);
 			return rc;
 		}
 
@@ -364,14 +370,16 @@
 		if (rc)
 			return rc;
 
-		CDBG("[%d] : %s %d\n", i, clk_cntl_lvl_string, level);
+		CAM_DBG(CAM_UTIL,
+			"[%d] : %s %d", i, clk_cntl_lvl_string, level);
 		soc_info->clk_level_valid[level] = true;
 		for (j = 0; j < soc_info->num_clk; j++) {
 			rc = of_property_read_u32_index(of_node, "clock-rates",
 				((i * soc_info->num_clk) + j),
 				&soc_info->clk_rate[level][j]);
 			if (rc) {
-				pr_err("Error reading clock-rates, rc=%d\n",
+				CAM_ERR(CAM_UTIL,
+					"Error reading clock-rates, rc=%d",
 					rc);
 				return rc;
 			}
@@ -381,7 +389,8 @@
 				(long)NO_SET_RATE :
 				soc_info->clk_rate[level][j];
 
-			CDBG("soc_info->clk_rate[%d][%d] = %d\n", level, j,
+			CAM_DBG(CAM_UTIL, "soc_info->clk_rate[%d][%d] = %d",
+				level, j,
 				soc_info->clk_rate[level][j]);
 		}
 	}
@@ -390,7 +399,7 @@
 	rc = of_property_read_string_index(of_node, "src-clock-name", 0,
 		&src_clk_str);
 	if (rc || !src_clk_str) {
-		CDBG("No src_clk_str found\n");
+		CAM_DBG(CAM_UTIL, "No src_clk_str found");
 		rc = 0;
 		/* Bottom loop is dependent on src_clk_str. So return here */
 		return rc;
@@ -399,7 +408,8 @@
 	for (i = 0; i < soc_info->num_clk; i++) {
 		if (strcmp(soc_info->clk_name[i], src_clk_str) == 0) {
 			soc_info->src_clk_idx = i;
-			CDBG("src clock = %s, index = %d\n", src_clk_str, i);
+			CAM_DBG(CAM_UTIL, "src clock = %s, index = %d",
+				src_clk_str, i);
 			break;
 		}
 	}
@@ -415,7 +425,8 @@
 
 	if ((soc_info->num_clk == 0) ||
 		(soc_info->num_clk >= CAM_SOC_MAX_CLK)) {
-		pr_err("Invalid number of clock %d\n", soc_info->num_clk);
+		CAM_ERR(CAM_UTIL, "Invalid number of clock %d",
+			soc_info->num_clk);
 		return -EINVAL;
 	}
 
@@ -448,7 +459,7 @@
 
 	count /= sizeof(uint32_t);
 	if (!count) {
-		pr_err("gpio-req-tbl-num 0\n");
+		CAM_ERR(CAM_UTIL, "gpio-req-tbl-num 0");
 		return 0;
 	}
 
@@ -467,30 +478,32 @@
 	rc = of_property_read_u32_array(of_node, "gpio-req-tbl-num",
 		val_array, count);
 	if (rc) {
-		pr_err("failed in reading gpio-req-tbl-num, rc = %d\n", rc);
+		CAM_ERR(CAM_UTIL, "failed in reading gpio-req-tbl-num, rc = %d",
+			rc);
 		goto free_gpio_req_tbl;
 	}
 
 	for (i = 0; i < count; i++) {
 		if (val_array[i] >= gpio_array_size) {
-			pr_err("gpio req tbl index %d invalid\n", val_array[i]);
+			CAM_ERR(CAM_UTIL, "gpio req tbl index %d invalid",
+				val_array[i]);
 			goto free_gpio_req_tbl;
 		}
 		gconf->cam_gpio_req_tbl[i].gpio = gpio_array[val_array[i]];
-		CDBG("cam_gpio_req_tbl[%d].gpio = %d\n", i,
+		CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].gpio = %d", i,
 			gconf->cam_gpio_req_tbl[i].gpio);
 	}
 
 	rc = of_property_read_u32_array(of_node, "gpio-req-tbl-flags",
 		val_array, count);
 	if (rc) {
-		pr_err("Failed in gpio-req-tbl-flags, rc %d\n", rc);
+		CAM_ERR(CAM_UTIL, "Failed in gpio-req-tbl-flags, rc %d", rc);
 		goto free_gpio_req_tbl;
 	}
 
 	for (i = 0; i < count; i++) {
 		gconf->cam_gpio_req_tbl[i].flags = val_array[i];
-		CDBG("cam_gpio_req_tbl[%d].flags = %ld\n", i,
+		CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].flags = %ld", i,
 			gconf->cam_gpio_req_tbl[i].flags);
 	}
 
@@ -499,10 +512,10 @@
 			"gpio-req-tbl-label", i,
 			&gconf->cam_gpio_req_tbl[i].label);
 		if (rc) {
-			pr_err("Failed rc %d\n", rc);
+			CAM_ERR(CAM_UTIL, "Failed rc %d", rc);
 			goto free_gpio_req_tbl;
 		}
-		CDBG("cam_gpio_req_tbl[%d].label = %s\n", i,
+		CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].label = %s", i,
 			gconf->cam_gpio_req_tbl[i].label);
 	}
 
@@ -536,7 +549,7 @@
 
 	/* Validate input parameters */
 	if (!of_node) {
-		pr_err("Invalid param of_node\n");
+		CAM_ERR(CAM_UTIL, "Invalid param of_node");
 		return -EINVAL;
 	}
 
@@ -545,7 +558,7 @@
 	if (gpio_array_size <= 0)
 		return 0;
 
-	CDBG("gpio count %d\n", gpio_array_size);
+	CAM_DBG(CAM_UTIL, "gpio count %d", gpio_array_size);
 
 	gpio_array = kcalloc(gpio_array_size, sizeof(uint16_t), GFP_KERNEL);
 	if (!gpio_array)
@@ -553,7 +566,7 @@
 
 	for (i = 0; i < gpio_array_size; i++) {
 		gpio_array[i] = of_get_gpio(of_node, i);
-		CDBG("gpio_array[%d] = %d", i, gpio_array[i]);
+		CAM_DBG(CAM_UTIL, "gpio_array[%d] = %d", i, gpio_array[i]);
 	}
 
 	gconf = kzalloc(sizeof(*gconf), GFP_KERNEL);
@@ -563,7 +576,7 @@
 	rc = cam_soc_util_get_dt_gpio_req_tbl(of_node, gconf, gpio_array,
 		gpio_array_size);
 	if (rc) {
-		pr_err("failed in msm_camera_get_dt_gpio_req_tbl\n");
+		CAM_ERR(CAM_UTIL, "failed in msm_camera_get_dt_gpio_req_tbl");
 		goto free_gpio_array;
 	}
 
@@ -603,23 +616,23 @@
 
 
 	if (!gpio_conf) {
-		CDBG("No GPIO entry\n");
+		CAM_DBG(CAM_UTIL, "No GPIO entry");
 		return 0;
 	}
 	if (gpio_conf->cam_gpio_common_tbl_size <= 0) {
-		pr_err("GPIO table size is invalid\n");
+		CAM_ERR(CAM_UTIL, "GPIO table size is invalid");
 		return -EINVAL;
 	}
 	size = gpio_conf->cam_gpio_req_tbl_size;
 	gpio_tbl = gpio_conf->cam_gpio_req_tbl;
 
 	if (!gpio_tbl || !size) {
-		pr_err("Invalid gpio_tbl %pK / size %d\n",
+		CAM_ERR(CAM_UTIL, "Invalid gpio_tbl %pK / size %d",
 			gpio_tbl, size);
 		return -EINVAL;
 	}
 	for (i = 0; i < size; i++) {
-		CDBG("i=%d, gpio=%d dir=%ld\n", i,
+		CAM_DBG(CAM_UTIL, "i=%d, gpio=%d dir=%ld", i,
 			gpio_tbl[i].gpio, gpio_tbl[i].flags);
 	}
 	if (gpio_en) {
@@ -632,7 +645,7 @@
 				 * apply new gpios, outout a error message
 				 * for driver bringup debug
 				 */
-				pr_err("gpio %d:%s request fails\n",
+				CAM_ERR(CAM_UTIL, "gpio %d:%s request fails",
 					gpio_tbl[i].gpio, gpio_tbl[i].label);
 			}
 		}
@@ -651,7 +664,7 @@
 	struct platform_device *pdev = NULL;
 
 	if (!soc_info || !soc_info->pdev) {
-		pr_err("Invalid parameters\n");
+		CAM_ERR(CAM_UTIL, "Invalid parameters");
 		return -EINVAL;
 	}
 
@@ -662,7 +675,7 @@
 	count = of_property_count_strings(of_node, "regulator-names");
 	if (count != -EINVAL) {
 		if (count <= 0) {
-			pr_err("no regulators found\n");
+			CAM_ERR(CAM_UTIL, "no regulators found");
 			count = 0;
 			return -EINVAL;
 		}
@@ -670,22 +683,23 @@
 		soc_info->num_rgltr = count;
 
 	} else {
-		CDBG("No regulators node found\n");
+		CAM_DBG(CAM_UTIL, "No regulators node found");
 		return 0;
 	}
 
 	for (i = 0; i < soc_info->num_rgltr; i++) {
 		rc = of_property_read_string_index(of_node,
 			"regulator-names", i, &soc_info->rgltr_name[i]);
-		CDBG("rgltr_name[%d] = %s\n", i, soc_info->rgltr_name[i]);
+		CAM_DBG(CAM_UTIL, "rgltr_name[%d] = %s",
+			i, soc_info->rgltr_name[i]);
 		if (rc) {
-			pr_err("no regulator resource at cnt=%d\n", i);
+			CAM_ERR(CAM_UTIL, "no regulator resource at cnt=%d", i);
 			return -ENODEV;
 		}
 	}
 
 	if (!of_property_read_bool(of_node, "rgltr-cntrl-support")) {
-		CDBG("No regulator control parameter defined\n");
+		CAM_DBG(CAM_UTIL, "No regulator control parameter defined");
 		soc_info->rgltr_ctrl_support = false;
 		return 0;
 	}
@@ -695,21 +709,21 @@
 	rc = of_property_read_u32_array(of_node, "rgltr-min-voltage",
 		soc_info->rgltr_min_volt, soc_info->num_rgltr);
 	if (rc) {
-		pr_err("No minimum volatage value found, rc=%d\n", rc);
+		CAM_ERR(CAM_UTIL, "No minimum volatage value found, rc=%d", rc);
 		return -EINVAL;
 	}
 
 	rc = of_property_read_u32_array(of_node, "rgltr-max-voltage",
 		soc_info->rgltr_max_volt, soc_info->num_rgltr);
 	if (rc) {
-		pr_err("No maximum volatage value found, rc=%d\n", rc);
+		CAM_ERR(CAM_UTIL, "No maximum volatage value found, rc=%d", rc);
 		return -EINVAL;
 	}
 
 	rc = of_property_read_u32_array(of_node, "rgltr-load-current",
 		soc_info->rgltr_op_mode, soc_info->num_rgltr);
 	if (rc) {
-		pr_err("No Load curent found rc=%d\n", rc);
+		CAM_ERR(CAM_UTIL, "No Load curent found rc=%d", rc);
 		return -EINVAL;
 	}
 
@@ -730,12 +744,13 @@
 
 	rc = of_property_read_u32(of_node, "cell-index", &soc_info->index);
 	if (rc) {
-		pr_err("device %s failed to read cell-index\n", pdev->name);
+		CAM_ERR(CAM_UTIL, "device %s failed to read cell-index",
+			pdev->name);
 		return rc;
 	}
 	count = of_property_count_strings(of_node, "reg-names");
 	if (count <= 0) {
-		pr_err("no reg-names found\n");
+		CAM_ERR(CAM_UTIL, "no reg-names found");
 		count = 0;
 	}
 	soc_info->num_mem_block = count;
@@ -744,7 +759,7 @@
 		rc = of_property_read_string_index(of_node, "reg-names", i,
 			&soc_info->mem_block_name[i]);
 		if (rc) {
-			pr_err("failed to read reg-names at %d\n", i);
+			CAM_ERR(CAM_UTIL, "failed to read reg-names at %d", i);
 			return rc;
 		}
 		soc_info->mem_block[i] =
@@ -752,7 +767,7 @@
 			soc_info->mem_block_name[i]);
 
 		if (!soc_info->mem_block[i]) {
-			pr_err("no mem resource by name %s\n",
+			CAM_ERR(CAM_UTIL, "no mem resource by name %s",
 				soc_info->mem_block_name[i]);
 			rc = -ENODEV;
 			return rc;
@@ -763,7 +778,7 @@
 		rc = of_property_read_u32_array(of_node, "reg-cam-base",
 			soc_info->mem_block_cam_base, soc_info->num_mem_block);
 		if (rc) {
-			pr_err("Error reading register offsets\n");
+			CAM_ERR(CAM_UTIL, "Error reading register offsets");
 			return rc;
 		}
 	}
@@ -771,13 +786,13 @@
 	rc = of_property_read_string_index(of_node, "interrupt-names", 0,
 		&soc_info->irq_name);
 	if (rc) {
-		pr_warn("No interrupt line present\n");
+		CAM_WARN(CAM_UTIL, "No interrupt line present");
 		rc = 0;
 	} else {
 		soc_info->irq_line = platform_get_resource_byname(pdev,
 			IORESOURCE_IRQ, soc_info->irq_name);
 		if (!soc_info->irq_line) {
-			pr_err("no irq resource\n");
+			CAM_ERR(CAM_UTIL, "no irq resource");
 			rc = -ENODEV;
 			return rc;
 		}
@@ -817,7 +832,7 @@
 	if (IS_ERR_OR_NULL(*reg)) {
 		rc = PTR_ERR(*reg);
 		rc = rc ? rc : -EINVAL;
-		pr_err("Regulator %s get failed %d\n", rgltr_name, rc);
+		CAM_ERR(CAM_UTIL, "Regulator %s get failed %d", rgltr_name, rc);
 		*reg = NULL;
 	}
 	return rc;
@@ -831,13 +846,13 @@
 	int32_t rc = 0;
 
 	if (!rgltr) {
-		pr_err("Invalid NULL parameter\n");
+		CAM_ERR(CAM_UTIL, "Invalid NULL parameter");
 		return -EINVAL;
 	}
 
 	rc = regulator_disable(rgltr);
 	if (rc) {
-		pr_err("%s regulator disable failed\n", rgltr_name);
+		CAM_ERR(CAM_UTIL, "%s regulator disable failed", rgltr_name);
 		return rc;
 	}
 
@@ -864,31 +879,32 @@
 	int32_t rc = 0;
 
 	if (!rgltr) {
-		pr_err("Invalid NULL parameter\n");
+		CAM_ERR(CAM_UTIL, "Invalid NULL parameter");
 		return -EINVAL;
 	}
 
 	if (regulator_count_voltages(rgltr) > 0) {
-		CDBG("voltage min=%d, max=%d\n",
+		CAM_DBG(CAM_UTIL, "voltage min=%d, max=%d",
 			rgltr_min_volt, rgltr_max_volt);
 
 		rc = regulator_set_voltage(
 			rgltr, rgltr_min_volt, rgltr_max_volt);
 		if (rc) {
-			pr_err("%s set voltage failed\n", rgltr_name);
+			CAM_ERR(CAM_UTIL, "%s set voltage failed", rgltr_name);
 			return rc;
 		}
 
 		rc = regulator_set_load(rgltr, rgltr_op_mode);
 		if (rc) {
-			pr_err("%s set optimum mode failed\n", rgltr_name);
+			CAM_ERR(CAM_UTIL, "%s set optimum mode failed",
+				rgltr_name);
 			return rc;
 		}
 	}
 
 	rc = regulator_enable(rgltr);
 	if (rc) {
-		pr_err("%s regulator_enable failed\n", rgltr_name);
+		CAM_ERR(CAM_UTIL, "%s regulator_enable failed", rgltr_name);
 		return rc;
 	}
 
@@ -909,7 +925,7 @@
 
 	device_pctrl->pinctrl = devm_pinctrl_get(dev);
 	if (IS_ERR_OR_NULL(device_pctrl->pinctrl)) {
-		CDBG("Pinctrl not available\n");
+		CAM_DBG(CAM_UTIL, "Pinctrl not available");
 		device_pctrl->pinctrl = NULL;
 		return 0;
 	}
@@ -917,7 +933,8 @@
 		pinctrl_lookup_state(device_pctrl->pinctrl,
 				CAM_SOC_PINCTRL_STATE_DEFAULT);
 	if (IS_ERR_OR_NULL(device_pctrl->gpio_state_active)) {
-		pr_err("Failed to get the active state pinctrl handle\n");
+		CAM_ERR(CAM_UTIL,
+			"Failed to get the active state pinctrl handle");
 		device_pctrl->gpio_state_active = NULL;
 		return -EINVAL;
 	}
@@ -925,7 +942,8 @@
 		= pinctrl_lookup_state(device_pctrl->pinctrl,
 				CAM_SOC_PINCTRL_STATE_SLEEP);
 	if (IS_ERR_OR_NULL(device_pctrl->gpio_state_suspend)) {
-		pr_err("Failed to get the suspend state pinctrl handle\n");
+		CAM_ERR(CAM_UTIL,
+			"Failed to get the suspend state pinctrl handle");
 		device_pctrl->gpio_state_suspend = NULL;
 		return -EINVAL;
 	}
@@ -973,7 +991,8 @@
 		}
 
 		if (rc) {
-			pr_err("%s enable failed\n", soc_info->rgltr_name[j]);
+			CAM_ERR(CAM_UTIL, "%s enable failed",
+				soc_info->rgltr_name[j]);
 			goto disable_rgltr;
 		}
 	}
@@ -1007,7 +1026,7 @@
 
 
 	if (!soc_info || !soc_info->pdev) {
-		pr_err("Invalid parameters\n");
+		CAM_ERR(CAM_UTIL, "Invalid parameters");
 		return -EINVAL;
 	}
 
@@ -1018,7 +1037,8 @@
 			if (!request_mem_region(soc_info->mem_block[i]->start,
 				resource_size(soc_info->mem_block[i]),
 				soc_info->mem_block_name[i])){
-				pr_err("Error Mem Region request Failed:%s\n",
+				CAM_ERR(CAM_UTIL,
+					"Error Mem Region request Failed:%s",
 					soc_info->mem_block_name[i]);
 				rc = -ENOMEM;
 				goto unmap_base;
@@ -1028,7 +1048,7 @@
 			soc_info->mem_block[i]->start,
 			resource_size(soc_info->mem_block[i]));
 		if (!soc_info->reg_map[i].mem_base) {
-			pr_err("i= %d base NULL\n", i);
+			CAM_ERR(CAM_UTIL, "i= %d base NULL", i);
 			rc = -ENOMEM;
 			goto unmap_base;
 		}
@@ -1041,7 +1061,7 @@
 
 	for (i = 0; i < soc_info->num_rgltr; i++) {
 		if (soc_info->rgltr_name[i] == NULL) {
-			pr_err("can't find regulator name\n");
+			CAM_ERR(CAM_UTIL, "can't find regulator name");
 			goto put_regulator;
 		}
 
@@ -1056,7 +1076,7 @@
 			handler, IRQF_TRIGGER_RISING,
 			soc_info->irq_name, irq_data);
 		if (rc) {
-			pr_err("irq request fail\n");
+			CAM_ERR(CAM_UTIL, "irq request fail");
 			rc = -EBUSY;
 			goto put_regulator;
 		}
@@ -1069,7 +1089,8 @@
 		soc_info->clk[i] = clk_get(&soc_info->pdev->dev,
 			soc_info->clk_name[i]);
 		if (!soc_info->clk[i]) {
-			pr_err("get failed for %s\n", soc_info->clk_name[i]);
+			CAM_ERR(CAM_UTIL, "get failed for %s",
+				soc_info->clk_name[i]);
 			rc = -ENOENT;
 			goto put_clk;
 		}
@@ -1077,11 +1098,11 @@
 
 	rc = cam_soc_util_request_pinctrl(soc_info);
 	if (rc)
-		CDBG("Failed in request pinctrl, rc=%d\n", rc);
+		CAM_DBG(CAM_UTIL, "Failed in request pinctrl, rc=%d", rc);
 
 	rc = cam_soc_util_request_gpio_table(soc_info, true);
 	if (rc) {
-		pr_err("Failed in request gpio table, rc=%d\n", rc);
+		CAM_ERR(CAM_UTIL, "Failed in request gpio table, rc=%d", rc);
 		goto put_clk;
 	}
 
@@ -1135,7 +1156,7 @@
 	struct platform_device *pdev = NULL;
 
 	if (!soc_info || !soc_info->pdev) {
-		pr_err("Invalid parameter\n");
+		CAM_ERR(CAM_UTIL, "Invalid parameter");
 		return -EINVAL;
 	}
 
@@ -1186,7 +1207,7 @@
 
 	rc = cam_soc_util_regulator_enable_default(soc_info);
 	if (rc) {
-		pr_err("Regulators enable failed\n");
+		CAM_ERR(CAM_UTIL, "Regulators enable failed");
 		return rc;
 	}
 
@@ -1257,7 +1278,7 @@
 {
 	void __iomem     *base_addr = NULL;
 
-	CDBG("base_idx %u size=%d\n", base_index, size);
+	CAM_DBG(CAM_UTIL, "base_idx %u size=%d", base_index, size);
 
 	if (!soc_info || base_index >= soc_info->num_reg_map ||
 		size <= 0 || (offset + size) >=
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_trace.h b/drivers/media/platform/msm/camera/cam_utils/cam_trace.h
index f233799..2e9e61f 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_trace.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_trace.h
@@ -23,6 +23,8 @@
 
 #include <linux/tracepoint.h>
 #include <media/cam_req_mgr.h>
+#include "cam_req_mgr_core.h"
+#include "cam_req_mgr_interface.h"
 #include "cam_context.h"
 
 TRACE_EVENT(cam_context_state,
@@ -107,19 +109,112 @@
 );
 
 TRACE_EVENT(cam_flush_req,
-	TP_PROTO(struct cam_req_mgr_flush_info *info),
-	TP_ARGS(info),
+	TP_PROTO(struct cam_req_mgr_core_link *link,
+		struct cam_req_mgr_flush_info *info),
+	TP_ARGS(link, info),
 	TP_STRUCT__entry(
 		__field(uint32_t, type)
 		__field(int64_t, req_id)
+		__field(void*, link)
+		__field(void*, session)
 	),
 	TP_fast_assign(
-		__entry->type   = info->flush_type;
-		__entry->req_id = info->req_id;
+		__entry->type    = info->flush_type;
+		__entry->req_id  = info->req_id;
+		__entry->link    = link;
+		__entry->session = link->parent;
 	),
 	TP_printk(
-		"FlushRequest type=%u request=%llu",
-			__entry->type, __entry->req_id
+		"FlushRequest type=%u request=%llu link=%pK session=%pK",
+			__entry->type, __entry->req_id, __entry->link,
+			__entry->session
+	)
+);
+
+TRACE_EVENT(cam_req_mgr_connect_device,
+	TP_PROTO(struct cam_req_mgr_core_link *link,
+		struct cam_req_mgr_device_info *info),
+	TP_ARGS(link, info),
+	TP_STRUCT__entry(
+		__string(name, info->name)
+		__field(uint32_t, id)
+		__field(uint32_t, delay)
+		__field(void*, link)
+		__field(void*, session)
+	),
+	TP_fast_assign(
+		__assign_str(name, info->name);
+		__entry->id      = info->dev_id;
+		__entry->delay   = info->p_delay;
+		__entry->link    = link;
+		__entry->session = link->parent;
+	),
+	TP_printk(
+		"ReqMgr Connect name=%s id=%u pd=%d link=%pK session=%pK",
+			__get_str(name), __entry->id, __entry->delay,
+			__entry->link, __entry->session
+	)
+);
+
+TRACE_EVENT(cam_req_mgr_apply_request,
+	TP_PROTO(struct cam_req_mgr_core_link *link,
+		struct cam_req_mgr_apply_request *req,
+		struct cam_req_mgr_connected_device *dev),
+	TP_ARGS(link, req, dev),
+	TP_STRUCT__entry(
+		__string(name, dev->dev_info.name)
+		__field(uint32_t, dev_id)
+		__field(uint64_t, req_id)
+		__field(void*, link)
+		__field(void*, session)
+	),
+	TP_fast_assign(
+		__assign_str(name, dev->dev_info.name);
+		__entry->dev_id  = dev->dev_info.dev_id;
+		__entry->req_id  = req->request_id;
+		__entry->link    = link;
+		__entry->session = link->parent;
+	),
+	TP_printk(
+		"ReqMgr ApplyRequest devname=%s devid=%u request=%lld link=%pK session=%pK",
+			__get_str(name), __entry->dev_id, __entry->req_id,
+			__entry->link, __entry->session
+	)
+);
+
+TRACE_EVENT(cam_req_mgr_add_req,
+	TP_PROTO(struct cam_req_mgr_core_link *link,
+		int idx, struct cam_req_mgr_add_request *add_req,
+		struct cam_req_mgr_req_tbl *tbl,
+		struct cam_req_mgr_connected_device *dev),
+	TP_ARGS(link, idx, add_req, tbl, dev),
+	TP_STRUCT__entry(
+		__string(name, dev->dev_info.name)
+		__field(uint32_t, dev_id)
+		__field(uint64_t, req_id)
+		__field(uint32_t, slot_id)
+		__field(uint32_t, delay)
+		__field(uint32_t, readymap)
+		__field(uint32_t, devicemap)
+		__field(void*, link)
+		__field(void*, session)
+	),
+	TP_fast_assign(
+		__assign_str(name, dev->dev_info.name);
+		__entry->dev_id    = dev->dev_info.dev_id;
+		__entry->req_id    = add_req->req_id;
+		__entry->slot_id   = idx;
+		__entry->delay     = tbl->pd;
+		__entry->readymap  = tbl->slot[idx].req_ready_map;
+		__entry->devicemap = tbl->dev_mask;
+		__entry->link      = link;
+		__entry->session   = link->parent;
+	),
+	TP_printk(
+		"ReqMgr AddRequest devname=%s devid=%d request=%lld slot=%d pd=%d readymap=%x devicemap=%d link=%pk session=%pK",
+			__get_str(name), __entry->dev_id, __entry->req_id,
+			__entry->slot_id, __entry->delay, __entry->readymap,
+			__entry->devicemap, __entry->link, __entry->session
 	)
 );
 #endif /* _CAM_TRACE_H */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index 3e686e9..da36e38 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -1136,6 +1136,9 @@
 	if (sscanf(buf, "%5x %x", &off, &cnt) < 2)
 		return -EINVAL;
 
+	if (off % sizeof(u32))
+		return -EINVAL;
+
 	if (off > dbg->max_offset)
 		return -EINVAL;
 
@@ -1204,6 +1207,9 @@
 	if (cnt < 2)
 		return -EFAULT;
 
+	if (off % sizeof(u32))
+		return -EFAULT;
+
 	if (off >= dbg->max_offset)
 		return -EFAULT;
 
@@ -1252,6 +1258,9 @@
 			goto debug_read_error;
 		}
 
+		if (dbg->off % sizeof(u32))
+			return -EFAULT;
+
 		ptr = dbg->base + dbg->off;
 		tot = 0;
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index d300de2..8727535 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -486,7 +486,7 @@
 			struct sde_rotator_vbinfo *vbinfo =
 					&ctx->vbinfo_cap[i];
 
-			if (vbinfo->fence && vbinfo->fd < 0) {
+			if (vbinfo->fence) {
 				/* fence is not used */
 				SDEDEV_DBG(rot_dev->dev,
 						"put fence s:%d t:%d i:%d\n",
@@ -2158,7 +2158,7 @@
 			&& (buf->index < ctx->nbuf_cap)) {
 		int idx = buf->index;
 
-		if (ctx->vbinfo_cap[idx].fence && ctx->vbinfo_cap[idx].fd < 0) {
+		if (ctx->vbinfo_cap[idx].fence) {
 			/* fence is not used */
 			SDEDEV_DBG(ctx->rot_dev->dev, "put fence s:%d i:%d\n",
 					ctx->session_id, idx);
@@ -2487,6 +2487,7 @@
 	struct msm_sde_rotator_fence *fence = arg;
 	struct msm_sde_rotator_comp_ratio *comp_ratio = arg;
 	struct sde_rotator_vbinfo *vbinfo;
+	int ret;
 
 	switch (cmd) {
 	case VIDIOC_S_SDE_ROTATOR_FENCE:
@@ -2545,17 +2546,37 @@
 
 		vbinfo = &ctx->vbinfo_cap[fence->index];
 
-		if (vbinfo->fence == NULL) {
-			vbinfo->fd = -1;
-		} else {
-			vbinfo->fd =
-				sde_rotator_get_sync_fence_fd(vbinfo->fence);
-			if (vbinfo->fd < 0) {
+		if (!vbinfo)
+			return -EINVAL;
+
+		if (vbinfo->fence) {
+			ret = sde_rotator_get_sync_fence_fd(vbinfo->fence);
+			if (ret < 0) {
 				SDEDEV_ERR(rot_dev->dev,
-					"fail get fence fd s:%d\n",
-					ctx->session_id);
-				return vbinfo->fd;
+						"fail get fence fd s:%d\n",
+						ctx->session_id);
+				return ret;
 			}
+
+			/**
+			 * Loose any reference to sync fence once we pass
+			 * it to user. Driver does not clean up user
+			 * unclosed fence descriptors.
+			 */
+			vbinfo->fence = NULL;
+
+			/**
+			 * Cache fence descriptor in case user calls this
+			 * ioctl multiple times. Cached value would be stale
+			 * if user duplicated and closed old descriptor.
+			 */
+			vbinfo->fd = ret;
+		} else if (!sde_rotator_get_fd_sync_fence(vbinfo->fd)) {
+			/**
+			 * User has closed cached fence descriptor.
+			 * Invalidate descriptor cache.
+			 */
+			vbinfo->fd = -1;
 		}
 		fence->fd = vbinfo->fd;
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
index 573e0a8..27e9ba6 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
@@ -783,10 +783,15 @@
 	if (!fmt_found) {
 		for (i = 0; i < ARRAY_SIZE(sde_mdp_format_ubwc_map); i++) {
 			fmt = &sde_mdp_format_ubwc_map[i].mdp_format;
-			if (format == fmt->format)
+			if (format == fmt->format) {
+				fmt_found = true;
 				break;
+			}
 		}
 	}
+	/* If format not supported than return NULL */
+	if (!fmt_found)
+		fmt = NULL;
 
 	return fmt;
 }
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index 8c63469..54766a2 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -347,6 +347,14 @@
 	}
 
 	INIT_DELAYED_WORK(&core->fw_unload_work, msm_vidc_fw_unload_handler);
+
+	mutex_lock(&core->lock);
+	core->vote_data = kcalloc(MAX_SUPPORTED_INSTANCES,
+		sizeof(*core->vote_data), GFP_KERNEL);
+	if (!core->vote_data)
+		dprintk(VIDC_ERR, "%s: failed to allocate memory\n", __func__);
+	mutex_unlock(&core->lock);
+
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 5c587e2..bc4b280 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1553,23 +1553,25 @@
 			break;
 		}
 
-		buff_req_buffer = get_buff_req_buffer(inst,
-			HAL_BUFFER_EXTRADATA_INPUT);
-
 		extra_idx = EXTRADATA_IDX(inst->bufq[OUTPUT_PORT].num_planes);
+		if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
+			buff_req_buffer = get_buff_req_buffer(inst,
+						HAL_BUFFER_EXTRADATA_INPUT);
 
-		inst->bufq[OUTPUT_PORT].plane_sizes[extra_idx] =
-			buff_req_buffer ?
-			buff_req_buffer->buffer_size : 0;
-
-		buff_req_buffer = get_buff_req_buffer(inst,
-			HAL_BUFFER_EXTRADATA_OUTPUT);
+			inst->bufq[OUTPUT_PORT].plane_sizes[extra_idx] =
+					buff_req_buffer ?
+					buff_req_buffer->buffer_size : 0;
+		}
 
 		extra_idx = EXTRADATA_IDX(inst->bufq[CAPTURE_PORT].num_planes);
-		inst->bufq[CAPTURE_PORT].plane_sizes[extra_idx] =
-			buff_req_buffer ?
-			buff_req_buffer->buffer_size : 0;
+		if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
+			buff_req_buffer = get_buff_req_buffer(inst,
+						HAL_BUFFER_EXTRADATA_OUTPUT);
 
+			inst->bufq[CAPTURE_PORT].plane_sizes[extra_idx] =
+				buff_req_buffer ?
+				buff_req_buffer->buffer_size : 0;
+		}
 		property_id = 0;
 		}
 		break;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 427568c..1b931ee 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -1116,7 +1116,7 @@
 
 	q->mem_ops = &msm_vidc_vb2_mem_ops;
 	q->drv_priv = inst;
-	q->allow_zero_bytesused = 1;
+	q->allow_zero_bytesused = !V4L2_TYPE_IS_OUTPUT(type);
 	q->copy_timestamp = 1;
 	return vb2_queue_init(q);
 }
@@ -1499,6 +1499,7 @@
 	INIT_MSM_VIDC_LIST(&inst->outputbufs);
 	INIT_MSM_VIDC_LIST(&inst->registeredbufs);
 	INIT_MSM_VIDC_LIST(&inst->reconbufs);
+	INIT_MSM_VIDC_LIST(&inst->eosbufs);
 
 	kref_init(&inst->kref);
 
@@ -1603,6 +1604,7 @@
 	DEINIT_MSM_VIDC_LIST(&inst->pending_getpropq);
 	DEINIT_MSM_VIDC_LIST(&inst->outputbufs);
 	DEINIT_MSM_VIDC_LIST(&inst->registeredbufs);
+	DEINIT_MSM_VIDC_LIST(&inst->eosbufs);
 
 	kfree(inst);
 	inst = NULL;
@@ -1650,6 +1652,8 @@
 	 */
 	msm_comm_validate_output_buffers(inst);
 
+	msm_comm_release_eos_buffers(inst);
+
 	if (msm_comm_release_output_buffers(inst, true))
 		dprintk(VIDC_ERR,
 			"Failed to release output buffers\n");
@@ -1694,6 +1698,7 @@
 	DEINIT_MSM_VIDC_LIST(&inst->pending_getpropq);
 	DEINIT_MSM_VIDC_LIST(&inst->outputbufs);
 	DEINIT_MSM_VIDC_LIST(&inst->registeredbufs);
+	DEINIT_MSM_VIDC_LIST(&inst->eosbufs);
 
 	mutex_destroy(&inst->sync_lock);
 	mutex_destroy(&inst->bufq[CAPTURE_PORT].lock);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 4327309..6b09a54 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -139,15 +139,16 @@
 
 	hdev = core->device;
 
+	mutex_lock(&core->lock);
 	vote_data = core->vote_data;
 	if (!vote_data) {
 		dprintk(VIDC_PROF,
 			"Failed to get vote_data for inst %pK\n",
 				inst);
+		mutex_unlock(&core->lock);
 		return -EINVAL;
 	}
 
-	mutex_lock(&core->lock);
 	list_for_each_entry(inst, &core->instances, list) {
 		int codec = 0;
 		struct msm_vidc_buffer *temp, *next;
@@ -157,6 +158,7 @@
 		if (!inst) {
 			dprintk(VIDC_ERR, "%s Invalid args\n",
 				__func__);
+			mutex_unlock(&core->lock);
 			return -EINVAL;
 		}
 
@@ -227,7 +229,7 @@
 		vote_data[i].work_mode = inst->clk_data.work_mode;
 		fill_recon_stats(inst, &vote_data[i]);
 
-		if (core->resources.sys_cache_enabled)
+		if (core->resources.sys_cache_res_set)
 			vote_data[i].use_sys_cache = true;
 
 		i++;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 2ce0c3d..d47f95b 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -35,8 +35,6 @@
 		V4L2_EVENT_MSM_VIDC_RELEASE_BUFFER_REFERENCE
 #define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
 
-#define MAX_SUPPORTED_INSTANCES 16
-
 const char *const mpeg_video_vidc_extradata[] = {
 	"Extradata none",
 	"Extradata MB Quantization",
@@ -956,8 +954,8 @@
 	/* This should come from sys_init_done */
 	core->resources.max_inst_count =
 		sys_init_msg->max_sessions_supported ?
-		sys_init_msg->max_sessions_supported :
-		MAX_SUPPORTED_INSTANCES;
+		min_t(u32, sys_init_msg->max_sessions_supported,
+		MAX_SUPPORTED_INSTANCES) : MAX_SUPPORTED_INSTANCES;
 
 	core->resources.max_secure_inst_count =
 		core->resources.max_secure_inst_count ?
@@ -978,11 +976,6 @@
 		__func__, core->codec_count, core->enc_codec_supported,
 		core->dec_codec_supported);
 
-	core->vote_data = kcalloc(MAX_SUPPORTED_INSTANCES,
-		sizeof(core->vote_data), GFP_KERNEL);
-	if (!core->vote_data)
-		dprintk(VIDC_ERR, "%s: failed to allocate memory\n", __func__);
-
 	complete(&(core->completions[index]));
 }
 
@@ -2179,6 +2172,26 @@
 	return 0;
 }
 
+static bool is_eos_buffer(struct msm_vidc_inst *inst, u32 device_addr)
+{
+	struct eos_buf *temp, *next;
+	bool found = false;
+
+	mutex_lock(&inst->eosbufs.lock);
+	list_for_each_entry_safe(temp, next, &inst->eosbufs.list, list) {
+		if (temp->smem.device_addr == device_addr) {
+			found = true;
+			list_del(&temp->list);
+			msm_comm_smem_free(inst, &temp->smem);
+			kfree(temp);
+			break;
+		}
+	}
+	mutex_unlock(&inst->eosbufs.lock);
+
+	return found;
+}
+
 static void handle_ebd(enum hal_command_response cmd, void *data)
 {
 	struct msm_vidc_cb_data_done *response = data;
@@ -2203,6 +2216,13 @@
 	}
 
 	empty_buf_done = (struct vidc_hal_ebd *)&response->input_done;
+	/* If this is internal EOS buffer, handle it in driver */
+	if (is_eos_buffer(inst, empty_buf_done->packet_buffer)) {
+		dprintk(VIDC_DBG, "Received EOS buffer %pK\n",
+			(void *)(u64)empty_buf_done->packet_buffer);
+		goto exit;
+	}
+
 	planes[0] = empty_buf_done->packet_buffer;
 	planes[1] = empty_buf_done->extra_data_buffer;
 
@@ -3666,6 +3686,64 @@
 		rc = msm_comm_session_continue(inst);
 		break;
 	}
+	case V4L2_DEC_CMD_STOP:
+	{
+		struct vidc_frame_data data = {0};
+		struct hfi_device *hdev;
+		struct eos_buf *binfo;
+		u32 smem_flags = 0;
+
+		get_inst(inst->core, inst);
+		if (inst->session_type != MSM_VIDC_DECODER) {
+			dprintk(VIDC_DBG,
+				"Non-Decoder session. DEC_STOP is not valid\n");
+			rc = -EINVAL;
+			goto exit;
+		}
+
+		binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
+		if (!binfo) {
+			dprintk(VIDC_ERR, "%s: Out of memory\n", __func__);
+			rc = -ENOMEM;
+			goto exit;
+		}
+
+		if (inst->flags & VIDC_SECURE)
+			smem_flags |= SMEM_SECURE;
+
+		rc = msm_comm_smem_alloc(inst,
+				SZ_4K, 1, smem_flags,
+				HAL_BUFFER_INPUT, 0, &binfo->smem);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to allocate output memory\n");
+			goto exit;
+		}
+
+		mutex_lock(&inst->eosbufs.lock);
+		list_add_tail(&binfo->list, &inst->eosbufs.list);
+		mutex_unlock(&inst->eosbufs.lock);
+
+		data.alloc_len = binfo->smem.size;
+		data.device_addr = binfo->smem.device_addr;
+		data.clnt_data = data.device_addr;
+		data.buffer_type = HAL_BUFFER_INPUT;
+		data.filled_len = 0;
+		data.offset = 0;
+		data.flags = HAL_BUFFERFLAG_EOS;
+		data.timestamp = LLONG_MAX;
+		data.extradata_addr = data.device_addr;
+		data.extradata_size = 0;
+		dprintk(VIDC_DBG, "Queueing EOS buffer %pK\n",
+			(void *)(u64)data.device_addr);
+		hdev = inst->core->device;
+
+		rc = call_hfi_op(hdev, session_etb, inst->session,
+				&data);
+exit:
+		put_inst(inst);
+		break;
+	}
 	default:
 		dprintk(VIDC_ERR, "Unknown Command %d\n", which_cmd);
 		rc = -ENOTSUPP;
@@ -4047,9 +4125,14 @@
 		return -EINVAL;
 	}
 	extra_buffers = msm_vidc_get_extra_buff_count(inst, HAL_BUFFER_INPUT);
-
 	bufreq->buffer_count_min_host = bufreq->buffer_count_min +
 		extra_buffers;
+	bufreq = get_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_INPUT);
+	if (bufreq) {
+		if (bufreq->buffer_count_min)
+			bufreq->buffer_count_min_host =
+				bufreq->buffer_count_min + extra_buffers;
+	}
 
 	if (msm_comm_get_stream_output_mode(inst) ==
 			HAL_VIDEO_DECODER_SECONDARY) {
@@ -4447,6 +4530,26 @@
 	return rc;
 }
 
+void msm_comm_release_eos_buffers(struct msm_vidc_inst *inst)
+{
+	struct eos_buf *buf, *next;
+
+	if (!inst) {
+		dprintk(VIDC_ERR,
+			"Invalid instance pointer = %pK\n", inst);
+		return;
+	}
+
+	mutex_lock(&inst->eosbufs.lock);
+	list_for_each_entry_safe(buf, next, &inst->eosbufs.list, list) {
+		list_del(&buf->list);
+		kfree(buf);
+	}
+	INIT_LIST_HEAD(&inst->eosbufs.list);
+	mutex_unlock(&inst->eosbufs.lock);
+}
+
+
 int msm_comm_release_recon_buffers(struct msm_vidc_inst *inst)
 {
 	struct recon_buf *buf, *next;
@@ -5333,8 +5436,8 @@
 			}
 		} else if (inst->session_type == MSM_VIDC_ENCODER) {
 			if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-				if (!i) /* yuv */
-					skip = true;
+				/* yuv and extradata */
+				skip = true;
 			} else if (b->type ==
 					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
 				if (!i) /* bitstream */
@@ -5913,8 +6016,11 @@
 		 * always compare dma_buf addresses which is guaranteed
 		 * to be same across the processes (duplicate fds).
 		 */
-		dma_planes[i] = (unsigned long)dma_buf_get(vb2->planes[i].m.fd);
-		dma_buf_put((struct dma_buf *)dma_planes[i]);
+		dma_planes[i] = (unsigned long)msm_smem_get_dma_buf(
+				vb2->planes[i].m.fd);
+		if (!dma_planes[i])
+			return NULL;
+		msm_smem_put_dma_buf((struct dma_buf *)dma_planes[i]);
 	}
 
 	mutex_lock(&inst->registeredbufs.lock);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index 18ba4a5..4a06f19 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -56,6 +56,7 @@
 					bool check_for_reuse);
 int msm_comm_release_persist_buffers(struct msm_vidc_inst *inst);
 int msm_comm_release_recon_buffers(struct msm_vidc_inst *inst);
+void msm_comm_release_eos_buffers(struct msm_vidc_inst *inst);
 int msm_comm_release_output_buffers(struct msm_vidc_inst *inst,
 	bool force_release);
 void msm_comm_validate_output_buffers(struct msm_vidc_inst *inst);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index e554a46..6511029 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -49,6 +49,7 @@
 #define MAX_NUM_OUTPUT_BUFFERS VIDEO_MAX_FRAME // same as VB2_MAX_FRAME
 #define MAX_NUM_CAPTURE_BUFFERS VIDEO_MAX_FRAME // same as VB2_MAX_FRAME
 
+#define MAX_SUPPORTED_INSTANCES 16
 
 /* Maintains the number of FTB's between each FBD over a window */
 #define DCVS_FTB_WINDOW 16
@@ -150,6 +151,11 @@
 	u32 CF;
 };
 
+struct eos_buf {
+	struct list_head list;
+	struct msm_smem smem;
+};
+
 struct internal_buf {
 	struct list_head list;
 	enum hal_buffer buffer_type;
@@ -326,6 +332,7 @@
 	struct msm_vidc_list pending_getpropq;
 	struct msm_vidc_list outputbufs;
 	struct msm_vidc_list reconbufs;
+	struct msm_vidc_list eosbufs;
 	struct msm_vidc_list registeredbufs;
 	struct buffer_requirements buff_req;
 	struct smem_client *mem_client;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 755f0c86..8888980 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -161,7 +161,7 @@
 	uint32_t dcvs_tbl_size;
 	struct dcvs_limit *dcvs_limit;
 	bool sys_cache_present;
-	bool sys_cache_enabled;
+	bool sys_cache_res_set;
 	struct subcache_set subcache_set;
 	struct reg_set reg_set;
 	struct addr_set qdss_addr_set;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index b430d14..2137a51 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -99,9 +99,10 @@
 static void __unload_fw(struct venus_hfi_device *device);
 static int __tzbsp_set_video_state(enum tzbsp_video_state state);
 static int __enable_subcaches(struct venus_hfi_device *device);
+static int __set_subcaches(struct venus_hfi_device *device);
+static int __release_subcaches(struct venus_hfi_device *device);
 static int __disable_subcaches(struct venus_hfi_device *device);
 
-
 /**
  * Utility function to enforce some of our assumptions.  Spam calls to this
  * in hotspots in code to double check some of the assumptions that we hold.
@@ -1747,10 +1748,8 @@
 		dprintk(VIDC_WARN, "Failed to send image version pkt to f/w\n");
 
 	rc = __enable_subcaches(device);
-	if (rc) {
-		dprintk(VIDC_WARN,
-			"Failed to enable subcaches, err = %d\n", rc);
-	}
+	if (!rc)
+		__set_subcaches(device);
 
 	if (dev->res->pm_qos_latency_us) {
 #ifdef CONFIG_SMP
@@ -2804,9 +2803,11 @@
 		}
 	}
 
+	__flush_debug_queue(device, device->raw_packet);
+
 	rc = __suspend(device);
 	if (rc)
-		dprintk(VIDC_ERR, "Failed venus power off\n");
+		dprintk(VIDC_ERR, "Failed __suspend\n");
 
 	/* Cancel pending delayed works if any */
 	cancel_delayed_work(&venus_hfi_pm_work);
@@ -3763,39 +3764,64 @@
 	int rc = 0;
 	u32 c = 0;
 	struct subcache_info *sinfo;
-	u32 resource[VIDC_MAX_SUBCACHE_SIZE];
-	struct hfi_resource_syscache_info_type *sc_res_info;
-	struct hfi_resource_subcache_type *sc_res;
-	struct vidc_resource_hdr rhdr;
 
 	if (msm_vidc_syscache_disable || !is_sys_cache_present(device))
 		return 0;
 
-	memset((void *)resource, 0x0, (sizeof(u32) * VIDC_MAX_SUBCACHE_SIZE));
-
-	sc_res_info = (struct hfi_resource_syscache_info_type *)resource;
-	sc_res = &(sc_res_info->rg_subcache_entries[0]);
-
 	/* Activate subcaches */
 	venus_hfi_for_each_subcache(device, sinfo) {
 		rc = llcc_slice_activate(sinfo->subcache);
 		if (rc) {
 			dprintk(VIDC_ERR, "Failed to activate %s: %d\n",
 				sinfo->name, rc);
-			continue;
+			goto err_activate_fail;
 		}
 		sinfo->isactive = true;
-
-		/* Update the entry */
-		sc_res[c].size = sinfo->subcache->llcc_slice_size;
-		sc_res[c].sc_id = sinfo->subcache->llcc_slice_id;
-		dprintk(VIDC_DBG, "Activate subcache %s\n", sinfo->name);
+		dprintk(VIDC_DBG, "Activated subcache %s\n", sinfo->name);
 		c++;
 	}
 
+	dprintk(VIDC_DBG, "Activated %d Subcaches to Venus\n", c);
+
+	return 0;
+
+err_activate_fail:
+	__release_subcaches(device);
+	__disable_subcaches(device);
+	return -EINVAL;
+}
+
+static int __set_subcaches(struct venus_hfi_device *device)
+{
+	int rc = 0;
+	u32 c = 0;
+	struct subcache_info *sinfo;
+	u32 resource[VIDC_MAX_SUBCACHE_SIZE];
+	struct hfi_resource_syscache_info_type *sc_res_info;
+	struct hfi_resource_subcache_type *sc_res;
+	struct vidc_resource_hdr rhdr;
+
+	if (device->res->sys_cache_res_set) {
+		dprintk(VIDC_DBG, "Subcaches already set to Venus\n");
+		return 0;
+	}
+
+	memset((void *)resource, 0x0, (sizeof(u32) * VIDC_MAX_SUBCACHE_SIZE));
+
+	sc_res_info = (struct hfi_resource_syscache_info_type *)resource;
+	sc_res = &(sc_res_info->rg_subcache_entries[0]);
+
+	venus_hfi_for_each_subcache(device, sinfo) {
+		if (sinfo->isactive == true) {
+			sc_res[c].size = sinfo->subcache->llcc_slice_size;
+			sc_res[c].sc_id = sinfo->subcache->llcc_slice_id;
+			c++;
+		}
+	}
+
 	/* Set resource to Venus for activated subcaches */
 	if (c) {
-		dprintk(VIDC_DBG, "Setting Subcaches\n");
+		dprintk(VIDC_DBG, "Setting %d Subcaches\n", c);
 
 		rhdr.resource_handle = sc_res_info; /* cookie */
 		rhdr.resource_id = VIDC_RESOURCE_SYSCACHE;
@@ -3814,9 +3840,8 @@
 			sinfo->isset = true;
 	}
 
-	dprintk(VIDC_DBG, "Activated & Set Subcaches to Venus\n");
-
-	device->res->sys_cache_enabled = true;
+	dprintk(VIDC_DBG, "Set Subcaches done to Venus\n");
+	device->res->sys_cache_res_set = true;
 
 	return 0;
 
@@ -3826,7 +3851,7 @@
 	return rc;
 }
 
-static int __disable_subcaches(struct venus_hfi_device *device)
+static int __release_subcaches(struct venus_hfi_device *device)
 {
 	struct subcache_info *sinfo;
 	int rc = 0;
@@ -3839,8 +3864,6 @@
 	if (msm_vidc_syscache_disable || !is_sys_cache_present(device))
 		return 0;
 
-	dprintk(VIDC_DBG, "Disabling Subcaches\n");
-
 	memset((void *)resource, 0x0, (sizeof(u32) * VIDC_MAX_SUBCACHE_SIZE));
 
 	sc_res_info = (struct hfi_resource_syscache_info_type *)resource;
@@ -3858,16 +3881,29 @@
 	}
 
 	if (c > 0) {
+		dprintk(VIDC_DBG, "Releasing %d subcaches\n", c);
 		rhdr.resource_handle = sc_res_info; /* cookie */
 		rhdr.resource_id = VIDC_RESOURCE_SYSCACHE;
 
 		rc = __core_release_resource(device, &rhdr);
 		if (rc)
-			dprintk(VIDC_ERR, "Failed to release subcaches\n");
-
-		dprintk(VIDC_DBG, "Release %d subcaches\n", c);
+			dprintk(VIDC_ERR,
+				"Failed to release %d subcaches\n", c);
 	}
 
+	device->res->sys_cache_res_set = false;
+
+	return rc;
+}
+
+static int __disable_subcaches(struct venus_hfi_device *device)
+{
+	struct subcache_info *sinfo;
+	int rc = 0;
+
+	if (msm_vidc_syscache_disable || !is_sys_cache_present(device))
+		return 0;
+
 	/* De-activate subcaches */
 	venus_hfi_for_each_subcache_reverse(device, sinfo) {
 		if (sinfo->isactive == true) {
@@ -3883,8 +3919,6 @@
 		}
 	}
 
-	device->res->sys_cache_enabled = false;
-
 	return rc;
 }
 
@@ -3984,10 +4018,7 @@
 		return 0;
 	}
 
-	dprintk(VIDC_PROF, "Entering power collapse\n");
-
-	if (__disable_subcaches(device))
-		dprintk(VIDC_ERR, "Failed to disable subcaches\n");
+	dprintk(VIDC_PROF, "Entering suspend\n");
 
 	if (device->res->pm_qos_latency_us &&
 		pm_qos_request_active(&device->qos))
@@ -3999,8 +4030,10 @@
 		goto err_tzbsp_suspend;
 	}
 
+	__disable_subcaches(device);
+
 	__venus_power_off(device);
-	dprintk(VIDC_PROF, "Venus power collapsed\n");
+	dprintk(VIDC_PROF, "Venus power off\n");
 	return rc;
 
 err_tzbsp_suspend:
@@ -4061,10 +4094,8 @@
 	__sys_set_debug(device, msm_vidc_fw_debug);
 
 	rc = __enable_subcaches(device);
-	if (rc) {
-		dprintk(VIDC_WARN,
-			"Failed to enable subcaches, err = %d\n", rc);
-	}
+	if (!rc)
+		__set_subcaches(device);
 
 	dprintk(VIDC_PROF, "Resumed from power collapse\n");
 exit:
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 52dc794..1da2c94 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1099,10 +1099,10 @@
 			       struct s5p_jpeg_ctx *ctx)
 {
 	int c, components = 0, notfound, n_dht = 0, n_dqt = 0;
-	unsigned int height, width, word, subsampling = 0, sos = 0, sof = 0,
-		     sof_len = 0;
-	unsigned int dht[S5P_JPEG_MAX_MARKER], dht_len[S5P_JPEG_MAX_MARKER],
-		     dqt[S5P_JPEG_MAX_MARKER], dqt_len[S5P_JPEG_MAX_MARKER];
+	unsigned int height = 0, width = 0, word, subsampling = 0;
+	unsigned int sos = 0, sof = 0, sof_len = 0;
+	unsigned int dht[S5P_JPEG_MAX_MARKER], dht_len[S5P_JPEG_MAX_MARKER];
+	unsigned int dqt[S5P_JPEG_MAX_MARKER], dqt_len[S5P_JPEG_MAX_MARKER];
 	long length;
 	struct s5p_jpeg_buffer jpeg_buffer;
 
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 86cc70fe25..2d4b836 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1629,7 +1629,7 @@
 	if (kc == KEY_KEYBOARD && !ictx->release_code) {
 		ictx->last_keycode = kc;
 		if (!nomouse) {
-			ictx->pad_mouse = ~(ictx->pad_mouse) & 0x1;
+			ictx->pad_mouse = !ictx->pad_mouse;
 			dev_dbg(dev, "toggling to %s mode\n",
 				ictx->pad_mouse ? "mouse" : "keyboard");
 			spin_unlock_irqrestore(&ictx->kc_lock, flags);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
index 283495c..aab8eee 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
@@ -320,7 +320,7 @@
 static int mxl111sf_i2c_send_data(struct mxl111sf_state *state,
 				  u8 index, u8 *wdata)
 {
-	int ret = mxl111sf_ctrl_msg(state->d, wdata[0],
+	int ret = mxl111sf_ctrl_msg(state, wdata[0],
 				    &wdata[1], 25, NULL, 0);
 	mxl_fail(ret);
 
@@ -330,7 +330,7 @@
 static int mxl111sf_i2c_get_data(struct mxl111sf_state *state,
 				 u8 index, u8 *wdata, u8 *rdata)
 {
-	int ret = mxl111sf_ctrl_msg(state->d, wdata[0],
+	int ret = mxl111sf_ctrl_msg(state, wdata[0],
 				    &wdata[1], 25, rdata, 24);
 	mxl_fail(ret);
 
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 5d676b5..f1f4486 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -24,9 +24,6 @@
 #include "lgdt3305.h"
 #include "lg2160.h"
 
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE  64
-
 int dvb_usb_mxl111sf_debug;
 module_param_named(debug, dvb_usb_mxl111sf_debug, int, 0644);
 MODULE_PARM_DESC(debug, "set debugging level "
@@ -56,27 +53,34 @@
 
 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
 
-int mxl111sf_ctrl_msg(struct dvb_usb_device *d,
+int mxl111sf_ctrl_msg(struct mxl111sf_state *state,
 		      u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
 {
+	struct dvb_usb_device *d = state->d;
 	int wo = (rbuf == NULL || rlen == 0); /* write-only */
 	int ret;
-	u8 sndbuf[MAX_XFER_SIZE];
 
-	if (1 + wlen > sizeof(sndbuf)) {
+	if (1 + wlen > MXL_MAX_XFER_SIZE) {
 		pr_warn("%s: len=%d is too big!\n", __func__, wlen);
 		return -EOPNOTSUPP;
 	}
 
 	pr_debug("%s(wlen = %d, rlen = %d)\n", __func__, wlen, rlen);
 
-	memset(sndbuf, 0, 1+wlen);
+	mutex_lock(&state->msg_lock);
+	memset(state->sndbuf, 0, 1+wlen);
+	memset(state->rcvbuf, 0, rlen);
 
-	sndbuf[0] = cmd;
-	memcpy(&sndbuf[1], wbuf, wlen);
+	state->sndbuf[0] = cmd;
+	memcpy(&state->sndbuf[1], wbuf, wlen);
 
-	ret = (wo) ? dvb_usbv2_generic_write(d, sndbuf, 1+wlen) :
-		dvb_usbv2_generic_rw(d, sndbuf, 1+wlen, rbuf, rlen);
+	ret = (wo) ? dvb_usbv2_generic_write(d, state->sndbuf, 1+wlen) :
+		dvb_usbv2_generic_rw(d, state->sndbuf, 1+wlen, state->rcvbuf,
+				     rlen);
+
+	memcpy(rbuf, state->rcvbuf, rlen);
+	mutex_unlock(&state->msg_lock);
+
 	mxl_fail(ret);
 
 	return ret;
@@ -92,7 +96,7 @@
 	u8 buf[2];
 	int ret;
 
-	ret = mxl111sf_ctrl_msg(state->d, MXL_CMD_REG_READ, &addr, 1, buf, 2);
+	ret = mxl111sf_ctrl_msg(state, MXL_CMD_REG_READ, &addr, 1, buf, 2);
 	if (mxl_fail(ret)) {
 		mxl_debug("error reading reg: 0x%02x", addr);
 		goto fail;
@@ -118,7 +122,7 @@
 
 	pr_debug("W: (0x%02x, 0x%02x)\n", addr, data);
 
-	ret = mxl111sf_ctrl_msg(state->d, MXL_CMD_REG_WRITE, buf, 2, NULL, 0);
+	ret = mxl111sf_ctrl_msg(state, MXL_CMD_REG_WRITE, buf, 2, NULL, 0);
 	if (mxl_fail(ret))
 		pr_err("error writing reg: 0x%02x, val: 0x%02x", addr, data);
 	return ret;
@@ -922,6 +926,8 @@
 	static u8 eeprom[256];
 	struct i2c_client c;
 
+	mutex_init(&state->msg_lock);
+
 	ret = get_chip_info(state);
 	if (mxl_fail(ret))
 		pr_err("failed to get chip info during probe");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.h b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
index 846260e..3e6f588 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
@@ -19,6 +19,9 @@
 #include <media/tveeprom.h>
 #include <media/media-entity.h>
 
+/* Max transfer size done by I2C transfer functions */
+#define MXL_MAX_XFER_SIZE  64
+
 #define MXL_EP1_REG_READ     1
 #define MXL_EP2_REG_WRITE    2
 #define MXL_EP3_INTERRUPT    3
@@ -86,6 +89,9 @@
 	struct mutex fe_lock;
 	u8 num_frontends;
 	struct mxl111sf_adap_state adap_state[3];
+	u8 sndbuf[MXL_MAX_XFER_SIZE];
+	u8 rcvbuf[MXL_MAX_XFER_SIZE];
+	struct mutex msg_lock;
 #ifdef CONFIG_MEDIA_CONTROLLER_DVB
 	struct media_entity tuner;
 	struct media_pad tuner_pads[2];
@@ -108,7 +114,7 @@
 
 /* needed for hardware i2c functions in mxl111sf-i2c.c:
  * mxl111sf_i2c_send_data / mxl111sf_i2c_get_data */
-int mxl111sf_ctrl_msg(struct dvb_usb_device *d,
+int mxl111sf_ctrl_msg(struct mxl111sf_state *state,
 		      u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen);
 
 #define mxl_printk(kern, fmt, arg...) \
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 65fed71..cc91f7b 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -375,6 +375,7 @@
 			 struct device *dev)
 {
 	struct enclosure_component *cdev;
+	int err;
 
 	if (!edev || component >= edev->components)
 		return -EINVAL;
@@ -384,12 +385,17 @@
 	if (cdev->dev == dev)
 		return -EEXIST;
 
-	if (cdev->dev)
+	if (cdev->dev) {
 		enclosure_remove_links(cdev);
-
-	put_device(cdev->dev);
+		put_device(cdev->dev);
+	}
 	cdev->dev = get_device(dev);
-	return enclosure_add_links(cdev);
+	err = enclosure_add_links(cdev);
+	if (err) {
+		put_device(cdev->dev);
+		cdev->dev = NULL;
+	}
+	return err;
 }
 EXPORT_SYMBOL_GPL(enclosure_add_device);
 
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c6f3496..120fd54 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -4589,6 +4589,10 @@
 
 	dev_set_drvdata(&card->dev, md);
 
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	mmc_set_bus_resume_policy(card->host, 1);
+#endif
+
 	if (mmc_add_disk(md))
 		goto out;
 
@@ -4632,6 +4636,9 @@
 	pm_runtime_put_noidle(&card->dev);
 	mmc_blk_remove_req(md);
 	dev_set_drvdata(&card->dev, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	mmc_set_bus_resume_policy(card->host, 0);
+#endif
 }
 
 static int _mmc_blk_suspend(struct mmc_card *card, bool wait)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 0bf89b4..978dd9a 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -4472,7 +4472,7 @@
 
 	BUG_ON(host->card);
 
-	mmc_register_extcon(host);
+	mmc_unregister_extcon(host);
 
 	mmc_claim_host(host);
 	mmc_power_off(host);
diff --git a/drivers/mmc/host/sdhci-msm-ice.c b/drivers/mmc/host/sdhci-msm-ice.c
index acc4ae0..d624b48 100644
--- a/drivers/mmc/host/sdhci-msm-ice.c
+++ b/drivers/mmc/host/sdhci-msm-ice.c
@@ -212,6 +212,38 @@
 }
 
 static
+int sdhci_msm_ice_get_cfg(struct sdhci_msm_host *msm_host, struct request *req,
+			unsigned int *bypass, short *key_index)
+{
+	int err = 0;
+	struct ice_data_setting ice_set;
+
+	memset(&ice_set, 0, sizeof(struct ice_data_setting));
+	if (msm_host->ice.vops->config_start) {
+		err = msm_host->ice.vops->config_start(
+						msm_host->ice.pdev,
+						req, &ice_set, false);
+		if (err) {
+			pr_err("%s: ice config failed %d\n",
+					mmc_hostname(msm_host->mmc), err);
+			return err;
+		}
+	}
+	/* if writing data command */
+	if (rq_data_dir(req) == WRITE)
+		*bypass = ice_set.encr_bypass ?
+				SDHCI_MSM_ICE_ENABLE_BYPASS :
+				SDHCI_MSM_ICE_DISABLE_BYPASS;
+	/* if reading data command */
+	else if (rq_data_dir(req) == READ)
+		*bypass = ice_set.decr_bypass ?
+				SDHCI_MSM_ICE_ENABLE_BYPASS :
+				SDHCI_MSM_ICE_DISABLE_BYPASS;
+	*key_index = ice_set.crypto_data.key_index;
+	return err;
+}
+
+static
 void sdhci_msm_ice_update_cfg(struct sdhci_host *host, u64 lba,
 			u32 slot, unsigned int bypass, short key_index)
 {
@@ -250,7 +282,7 @@
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 	struct sdhci_msm_host *msm_host = pltfm_host->priv;
 	int err = 0;
-	struct ice_data_setting ice_set;
+	short key_index = 0;
 	sector_t lba = 0;
 	unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS;
 	struct request *req;
@@ -261,41 +293,22 @@
 		return -EINVAL;
 	}
 
-	BUG_ON(!mrq);
-	memset(&ice_set, 0, sizeof(struct ice_data_setting));
+	WARN_ON(!mrq);
+	if (!mrq)
+		return -EINVAL;
 	req = mrq->req;
 	if (req) {
 		lba = req->__sector;
-		if (msm_host->ice.vops->config_start) {
-			err = msm_host->ice.vops->config_start(
-							msm_host->ice.pdev,
-							req, &ice_set, false);
-			if (err) {
-				pr_err("%s: ice config failed %d\n",
-						mmc_hostname(host->mmc), err);
-				return err;
-			}
-		}
-		/* if writing data command */
-		if (rq_data_dir(req) == WRITE)
-			bypass = ice_set.encr_bypass ?
-					SDHCI_MSM_ICE_ENABLE_BYPASS :
-					SDHCI_MSM_ICE_DISABLE_BYPASS;
-		/* if reading data command */
-		else if (rq_data_dir(req) == READ)
-			bypass = ice_set.decr_bypass ?
-					SDHCI_MSM_ICE_ENABLE_BYPASS :
-					SDHCI_MSM_ICE_DISABLE_BYPASS;
-		pr_debug("%s: %s: slot %d encr_bypass %d bypass %d decr_bypass %d key_index %d\n",
+		err = sdhci_msm_ice_get_cfg(msm_host, req, &bypass, &key_index);
+		if (err)
+			return err;
+		pr_debug("%s: %s: slot %d bypass %d key_index %d\n",
 				mmc_hostname(host->mmc),
 				(rq_data_dir(req) == WRITE) ? "WRITE" : "READ",
-				slot, ice_set.encr_bypass, bypass,
-				ice_set.decr_bypass,
-				ice_set.crypto_data.key_index);
+				slot, bypass, key_index);
 	}
 
-	sdhci_msm_ice_update_cfg(host, lba, slot, bypass,
-				ice_set.crypto_data.key_index);
+	sdhci_msm_ice_update_cfg(host, lba, slot, bypass, key_index);
 	return 0;
 }
 
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index b6c17ec0..4b45ea5 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -691,7 +691,7 @@
 		mclk_freq = 5;
 	else if (host->clock <= 187000000)
 		mclk_freq = 6;
-	else if (host->clock <= 200000000)
+	else if (host->clock <= 208000000)
 		mclk_freq = 7;
 
 	writel_relaxed(((readl_relaxed(host->ioaddr +
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 380a641..258bc8d 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -230,7 +230,7 @@
 	/* Wait for 100ms as Octeon resets. */
 	mdelay(100);
 
-	if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1) == 0x1234ULL) {
+	if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) {
 		dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n",
 			oct->octeon_id);
 		return 1;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
index e779af8..cda32d5 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -48,7 +48,7 @@
 	/* Wait for 10ms as Octeon resets. */
 	mdelay(100);
 
-	if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1) == 0x1234ULL) {
+	if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1)) {
 		dev_err(&oct->pci_dev->dev, "Soft reset failed\n");
 		return 1;
 	}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index a61447f..1264a36 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -246,6 +246,7 @@
 			E1000_STATUS_FUNC_SHIFT;
 
 	/* Set phy->phy_addr and phy->id. */
+	igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
 	ret_val = igb_get_phy_id_82575(hw);
 	if (ret_val)
 		return ret_val;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index ea58234..9d37229 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2671,8 +2671,6 @@
 		PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
 	stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
 	stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
-	stats->tx_carrier_errors =
-		PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
 	stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
 			   stats->rx_frame_errors;
 	stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 2115c8a..8beecd6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -67,6 +67,7 @@
 
 enum {
 	MLX5_DROP_NEW_HEALTH_WORK,
+	MLX5_DROP_NEW_RECOVERY_WORK,
 };
 
 static u8 get_nic_state(struct mlx5_core_dev *dev)
@@ -193,7 +194,7 @@
 	mlx5_handle_bad_state(dev);
 
 	spin_lock(&health->wq_lock);
-	if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+	if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags))
 		schedule_delayed_work(&health->recover_work, recover_delay);
 	else
 		dev_err(&dev->pdev->dev,
@@ -328,6 +329,7 @@
 	init_timer(&health->timer);
 	health->sick = 0;
 	clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+	clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
 	health->health = &dev->iseg->health;
 	health->health_counter = &dev->iseg->health_counter;
 
@@ -350,11 +352,22 @@
 
 	spin_lock(&health->wq_lock);
 	set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+	set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
 	spin_unlock(&health->wq_lock);
 	cancel_delayed_work_sync(&health->recover_work);
 	cancel_work_sync(&health->work);
 }
 
+void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
+{
+	struct mlx5_core_health *health = &dev->priv.health;
+
+	spin_lock(&health->wq_lock);
+	set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
+	spin_unlock(&health->wq_lock);
+	cancel_delayed_work_sync(&dev->priv.health.recover_work);
+}
+
 void mlx5_health_cleanup(struct mlx5_core_dev *dev)
 {
 	struct mlx5_core_health *health = &dev->priv.health;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 5bea0bf..b3309f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1169,7 +1169,7 @@
 	int err = 0;
 
 	if (cleanup)
-		mlx5_drain_health_wq(dev);
+		mlx5_drain_health_recovery(dev);
 
 	mutex_lock(&dev->intf_state_mutex);
 	if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 4ca4613..b1af7cd 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1505,8 +1505,8 @@
 		*index = entry->index;
 		resolved = false;
 	} else if (removing) {
-		ofdpa_neigh_del(trans, found);
 		*index = found->index;
+		ofdpa_neigh_del(trans, found);
 	} else if (updating) {
 		ofdpa_neigh_update(found, trans, NULL, false);
 		resolved = !is_zero_ether_addr(found->eth_dst);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 00279da..c4ada72 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4399,12 +4399,9 @@
 	struct efx_ef10_filter_table *table = efx->filter_state;
 	struct net_device *net_dev = efx->net_dev;
 	struct netdev_hw_addr *uc;
-	int addr_count;
 	unsigned int i;
 
-	addr_count = netdev_uc_count(net_dev);
 	table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
-	table->dev_uc_count = 1 + addr_count;
 	ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
 	i = 1;
 	netdev_for_each_uc_addr(uc, net_dev) {
@@ -4415,6 +4412,8 @@
 		ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
 		i++;
 	}
+
+	table->dev_uc_count = i;
 }
 
 static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
@@ -4422,11 +4421,10 @@
 	struct efx_ef10_filter_table *table = efx->filter_state;
 	struct net_device *net_dev = efx->net_dev;
 	struct netdev_hw_addr *mc;
-	unsigned int i, addr_count;
+	unsigned int i;
 
 	table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
 
-	addr_count = netdev_mc_count(net_dev);
 	i = 0;
 	netdev_for_each_mc_addr(mc, net_dev) {
 		if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 4865221..b88f7d6 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -908,7 +908,7 @@
 	if (overflow) {
 		pr_debug("tx timestamp queue overflow, count %d\n", overflow);
 		while (skb) {
-			skb_complete_tx_timestamp(skb, NULL);
+			kfree_skb(skb);
 			skb = skb_dequeue(&dp83640->tx_queue);
 		}
 		return;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index fab56c9..2229188 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -622,6 +622,8 @@
 	if ((regval & 0xFF) == 0xFF) {
 		phy_init_hw(phydev);
 		phydev->link = 0;
+		if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
+			phydev->drv->config_intr(phydev);
 	}
 
 	return 0;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 642df93..578bd50 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -787,15 +787,10 @@
 static void vrf_dev_uninit(struct net_device *dev)
 {
 	struct net_vrf *vrf = netdev_priv(dev);
-	struct net_device *port_dev;
-	struct list_head *iter;
 
 	vrf_rtable_release(dev, vrf);
 	vrf_rt6_release(dev, vrf);
 
-	netdev_for_each_lower_dev(dev, port_dev, iter)
-		vrf_del_slave(dev, port_dev);
-
 	free_percpu(dev->dstats);
 	dev->dstats = NULL;
 }
@@ -1232,6 +1227,12 @@
 
 static void vrf_dellink(struct net_device *dev, struct list_head *head)
 {
+	struct net_device *port_dev;
+	struct list_head *iter;
+
+	netdev_for_each_lower_dev(dev, port_dev, iter)
+		vrf_del_slave(dev, port_dev);
+
 	unregister_netdevice_queue(dev, head);
 }
 
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 963e533..983e941 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -227,15 +227,15 @@
 
 static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni)
 {
-	struct vxlan_dev *vxlan;
+	struct vxlan_dev_node *node;
 
 	/* For flow based devices, map all packets to VNI 0 */
 	if (vs->flags & VXLAN_F_COLLECT_METADATA)
 		vni = 0;
 
-	hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) {
-		if (vxlan->default_dst.remote_vni == vni)
-			return vxlan;
+	hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) {
+		if (node->vxlan->default_dst.remote_vni == vni)
+			return node->vxlan;
 	}
 
 	return NULL;
@@ -2309,17 +2309,22 @@
 	struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
 
 	spin_lock(&vn->sock_lock);
-	hlist_del_init_rcu(&vxlan->hlist);
+	hlist_del_init_rcu(&vxlan->hlist4.hlist);
+#if IS_ENABLED(CONFIG_IPV6)
+	hlist_del_init_rcu(&vxlan->hlist6.hlist);
+#endif
 	spin_unlock(&vn->sock_lock);
 }
 
-static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
+static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
+			     struct vxlan_dev_node *node)
 {
 	struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
 	__be32 vni = vxlan->default_dst.remote_vni;
 
+	node->vxlan = vxlan;
 	spin_lock(&vn->sock_lock);
-	hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
+	hlist_add_head_rcu(&node->hlist, vni_head(vs, vni));
 	spin_unlock(&vn->sock_lock);
 }
 
@@ -2778,6 +2783,7 @@
 {
 	struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
 	struct vxlan_sock *vs = NULL;
+	struct vxlan_dev_node *node;
 
 	if (!vxlan->cfg.no_share) {
 		spin_lock(&vn->sock_lock);
@@ -2795,12 +2801,16 @@
 	if (IS_ERR(vs))
 		return PTR_ERR(vs);
 #if IS_ENABLED(CONFIG_IPV6)
-	if (ipv6)
+	if (ipv6) {
 		rcu_assign_pointer(vxlan->vn6_sock, vs);
-	else
+		node = &vxlan->hlist6;
+	} else
 #endif
+	{
 		rcu_assign_pointer(vxlan->vn4_sock, vs);
-	vxlan_vs_add_dev(vs, vxlan);
+		node = &vxlan->hlist4;
+	}
+	vxlan_vs_add_dev(vs, vxlan, node);
 	return 0;
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index ae304355..fe5102c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1821,8 +1821,6 @@
 static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum)
 {
 	REG_SET_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
-	REG_SET_BIT(ah, 0x9864, 0x7f000);
-	REG_SET_BIT(ah, 0x9924, 0x7f00fe);
 	REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
 	REG_WRITE(ah, AR_CR, AR_CR_RXD);
 	REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c
index d38e50f..e0374eb 100644
--- a/drivers/net/wireless/ath/ath9k/rng.c
+++ b/drivers/net/wireless/ath/ath9k/rng.c
@@ -120,6 +120,8 @@
 
 void ath9k_rng_stop(struct ath_softc *sc)
 {
-	if (sc->rng_task)
+	if (sc->rng_task) {
 		kthread_stop(sc->rng_task);
+		sc->rng_task = NULL;
+	}
 }
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index 16aca9e..1fa7f84 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -189,22 +189,27 @@
 	if (strtobool(buf, &start))
 		return -EINVAL;
 
+	mutex_lock(&sc->mutex);
+
 	if (start == sc->tx99_state) {
 		if (!start)
-			return count;
+			goto out;
 		ath_dbg(common, XMIT, "Resetting TX99\n");
 		ath9k_tx99_deinit(sc);
 	}
 
 	if (!start) {
 		ath9k_tx99_deinit(sc);
-		return count;
+		goto out;
 	}
 
 	r = ath9k_tx99_init(sc);
-	if (r)
+	if (r) {
+		mutex_unlock(&sc->mutex);
 		return r;
-
+	}
+out:
+	mutex_unlock(&sc->mutex);
 	return count;
 }
 
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index ae0952f..3028f18 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -592,7 +592,8 @@
 			cfg80211_connect_bss(ndev, evt->bssid, wil->bss,
 					     assoc_req_ie, assoc_req_ielen,
 					     assoc_resp_ie, assoc_resp_ielen,
-					     WLAN_STATUS_SUCCESS, GFP_KERNEL);
+					     WLAN_STATUS_SUCCESS, GFP_KERNEL,
+					     NL80211_TIMEOUT_UNSPECIFIED);
 		}
 		wil->bss = NULL;
 	} else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 72139b5..746f8c9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -705,7 +705,7 @@
 int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
 			   struct sk_buff_head *pktq, uint totlen)
 {
-	struct sk_buff *glom_skb;
+	struct sk_buff *glom_skb = NULL;
 	struct sk_buff *skb;
 	u32 addr = sdiodev->sbwad;
 	int err = 0;
@@ -726,10 +726,8 @@
 			return -ENOMEM;
 		err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
 					 glom_skb);
-		if (err) {
-			brcmu_pkt_buf_free_skb(glom_skb);
+		if (err)
 			goto done;
-		}
 
 		skb_queue_walk(pktq, skb) {
 			memcpy(skb->data, glom_skb->data, skb->len);
@@ -740,6 +738,7 @@
 					    pktq);
 
 done:
+	brcmu_pkt_buf_free_skb(glom_skb);
 	return err;
 }
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 01d44f9..b85398c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -4930,6 +4930,11 @@
 		cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
 					GFP_KERNEL);
 	} else if (ieee80211_is_action(mgmt->frame_control)) {
+		if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) {
+			brcmf_err("invalid action frame length\n");
+			err = -EINVAL;
+			goto exit;
+		}
 		af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
 		if (af_params == NULL) {
 			brcmf_err("unable to allocate frame\n");
@@ -6873,7 +6878,7 @@
 	wiphy = wiphy_new(ops, sizeof(struct brcmf_cfg80211_info));
 	if (!wiphy) {
 		brcmf_err("Could not allocate wiphy device\n");
-		return NULL;
+		goto ops_out;
 	}
 	memcpy(wiphy->perm_addr, drvr->mac, ETH_ALEN);
 	set_wiphy_dev(wiphy, busdev);
@@ -7007,6 +7012,7 @@
 	ifp->vif = NULL;
 wiphy_out:
 	brcmf_free_wiphy(wiphy);
+ops_out:
 	kfree(ops);
 	return NULL;
 }
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index f949ad2b..fa3547e 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -70,10 +70,10 @@
 #define WSPI_MAX_CHUNK_SIZE    4092
 
 /*
- * wl18xx driver aggregation buffer size is (13 * PAGE_SIZE) compared to
- * (4 * PAGE_SIZE) for wl12xx, so use the larger buffer needed for wl18xx
+ * wl18xx driver aggregation buffer size is (13 * 4K) compared to
+ * (4 * 4K) for wl12xx, so use the larger buffer needed for wl18xx
  */
-#define SPI_AGGR_BUFFER_SIZE (13 * PAGE_SIZE)
+#define SPI_AGGR_BUFFER_SIZE (13 * SZ_4K)
 
 /* Maximum number of SPI write chunks */
 #define WSPI_MAX_NUM_OF_CHUNKS \
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 599cf50..cd442e4 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -281,6 +281,7 @@
 {
 	RING_IDX req_prod = queue->rx.req_prod_pvt;
 	int notify;
+	int err = 0;
 
 	if (unlikely(!netif_carrier_ok(queue->info->netdev)))
 		return;
@@ -295,8 +296,10 @@
 		struct xen_netif_rx_request *req;
 
 		skb = xennet_alloc_one_rx_buffer(queue);
-		if (!skb)
+		if (!skb) {
+			err = -ENOMEM;
 			break;
+		}
 
 		id = xennet_rxidx(req_prod);
 
@@ -320,8 +323,13 @@
 
 	queue->rx.req_prod_pvt = req_prod;
 
-	/* Not enough requests? Try again later. */
-	if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
+	/* Try again later if there are not enough requests or skb allocation
+	 * failed.
+	 * Enough requests is quantified as the sum of newly created slots and
+	 * the unconsumed slots at the backend.
+	 */
+	if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
+	    unlikely(err)) {
 		mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
 		return;
 	}
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
index f8dcdf4..af62c4c 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.c
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -459,7 +459,7 @@
 
 	INIT_WORK(&priv->fw_dnld.rx_work, fw_dnld_rx_work);
 	snprintf(name, sizeof(name), "%s_nfcmrvl_fw_dnld_rx_wq",
-		 dev_name(priv->dev));
+		 dev_name(&priv->ndev->nfc_dev->dev));
 	priv->fw_dnld.rx_wq = create_singlethread_workqueue(name);
 	if (!priv->fw_dnld.rx_wq)
 		return -ENOMEM;
@@ -496,6 +496,7 @@
 {
 	struct nfcmrvl_private *priv = nci_get_drvdata(ndev);
 	struct nfcmrvl_fw_dnld *fw_dnld = &priv->fw_dnld;
+	int res;
 
 	if (!priv->support_fw_dnld)
 		return -ENOTSUPP;
@@ -511,7 +512,9 @@
 	 */
 
 	/* Retrieve FW binary */
-	if (request_firmware(&fw_dnld->fw, firmware_name, priv->dev) < 0) {
+	res = request_firmware(&fw_dnld->fw, firmware_name,
+			       &ndev->nfc_dev->dev);
+	if (res < 0) {
 		nfc_err(priv->dev, "failed to retrieve FW %s", firmware_name);
 		return -ENOENT;
 	}
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
index 51c8240..a446590 100644
--- a/drivers/nfc/nfcmrvl/main.c
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -124,12 +124,13 @@
 	memcpy(&priv->config, pdata, sizeof(*pdata));
 
 	if (priv->config.reset_n_io) {
-		rc = devm_gpio_request_one(dev,
-					   priv->config.reset_n_io,
-					   GPIOF_OUT_INIT_LOW,
-					   "nfcmrvl_reset_n");
-		if (rc < 0)
+		rc = gpio_request_one(priv->config.reset_n_io,
+				      GPIOF_OUT_INIT_LOW,
+				      "nfcmrvl_reset_n");
+		if (rc < 0) {
+			priv->config.reset_n_io = 0;
 			nfc_err(dev, "failed to request reset_n io\n");
+		}
 	}
 
 	if (phy == NFCMRVL_PHY_SPI) {
@@ -154,32 +155,36 @@
 	if (!priv->ndev) {
 		nfc_err(dev, "nci_allocate_device failed\n");
 		rc = -ENOMEM;
-		goto error;
+		goto error_free_gpio;
 	}
 
-	nci_set_drvdata(priv->ndev, priv);
-
-	rc = nci_register_device(priv->ndev);
-	if (rc) {
-		nfc_err(dev, "nci_register_device failed %d\n", rc);
-		goto error_free_dev;
-	}
-
-	/* Ensure that controller is powered off */
-	nfcmrvl_chip_halt(priv);
-
 	rc = nfcmrvl_fw_dnld_init(priv);
 	if (rc) {
 		nfc_err(dev, "failed to initialize FW download %d\n", rc);
 		goto error_free_dev;
 	}
 
+	nci_set_drvdata(priv->ndev, priv);
+
+	rc = nci_register_device(priv->ndev);
+	if (rc) {
+		nfc_err(dev, "nci_register_device failed %d\n", rc);
+		goto error_fw_dnld_deinit;
+	}
+
+	/* Ensure that controller is powered off */
+	nfcmrvl_chip_halt(priv);
+
 	nfc_info(dev, "registered with nci successfully\n");
 	return priv;
 
+error_fw_dnld_deinit:
+	nfcmrvl_fw_dnld_deinit(priv);
 error_free_dev:
 	nci_free_device(priv->ndev);
-error:
+error_free_gpio:
+	if (priv->config.reset_n_io)
+		gpio_free(priv->config.reset_n_io);
 	kfree(priv);
 	return ERR_PTR(rc);
 }
@@ -195,7 +200,7 @@
 	nfcmrvl_fw_dnld_deinit(priv);
 
 	if (priv->config.reset_n_io)
-		devm_gpio_free(priv->dev, priv->config.reset_n_io);
+		gpio_free(priv->config.reset_n_io);
 
 	nci_unregister_device(ndev);
 	nci_free_device(ndev);
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index 83a99e3..6c0c301 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -109,6 +109,7 @@
 	struct nfcmrvl_private *priv;
 	struct nfcmrvl_platform_data *pdata = NULL;
 	struct nfcmrvl_platform_data config;
+	struct device *dev = nu->tty->dev;
 
 	/*
 	 * Platform data cannot be used here since usually it is already used
@@ -116,9 +117,8 @@
 	 * and check if DT entries were added.
 	 */
 
-	if (nu->tty->dev->parent && nu->tty->dev->parent->of_node)
-		if (nfcmrvl_uart_parse_dt(nu->tty->dev->parent->of_node,
-					  &config) == 0)
+	if (dev && dev->parent && dev->parent->of_node)
+		if (nfcmrvl_uart_parse_dt(dev->parent->of_node, &config) == 0)
 			pdata = &config;
 
 	if (!pdata) {
@@ -131,7 +131,7 @@
 	}
 
 	priv = nfcmrvl_nci_register_dev(NFCMRVL_PHY_UART, nu, &uart_ops,
-					nu->tty->dev, pdata);
+					dev, pdata);
 	if (IS_ERR(priv))
 		return PTR_ERR(priv);
 
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 368795a..94733f7 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1203,10 +1203,13 @@
 		struct page *page, bool is_write)
 {
 	struct btt *btt = bdev->bd_disk->private_data;
+	int rc;
 
-	btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector);
-	page_endio(page, is_write, 0);
-	return 0;
+	rc = btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector);
+	if (rc == 0)
+		page_endio(page, is_write, 0);
+
+	return rc;
 }
 
 
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 7ceba08..18a0bea 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -450,14 +450,15 @@
 static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
 {
 	const unsigned int sector_size = 512;
-	sector_t start_sector;
+	sector_t start_sector, end_sector;
 	u64 num_sectors;
 	u32 rem;
 
 	start_sector = div_u64(ns_offset, sector_size);
-	num_sectors = div_u64_rem(len, sector_size, &rem);
+	end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
 	if (rem)
-		num_sectors++;
+		end_sector++;
+	num_sectors = end_sector - start_sector;
 
 	if (unlikely(num_sectors > (u64)INT_MAX)) {
 		u64 remaining = num_sectors;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 3222f3e..286fda4 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -88,7 +88,7 @@
 
 struct nvme_rdma_queue {
 	struct nvme_rdma_qe	*rsp_ring;
-	u8			sig_count;
+	atomic_t		sig_count;
 	int			queue_size;
 	size_t			cmnd_capsule_len;
 	struct nvme_rdma_ctrl	*ctrl;
@@ -555,6 +555,7 @@
 		queue->cmnd_capsule_len = sizeof(struct nvme_command);
 
 	queue->queue_size = queue_size;
+	atomic_set(&queue->sig_count, 0);
 
 	queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
 			RDMA_PS_TCP, IB_QPT_RC);
@@ -1011,17 +1012,16 @@
 		nvme_rdma_wr_error(cq, wc, "SEND");
 }
 
-static inline int nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
+/*
+ * We want to signal completion at least every queue depth/2.  This returns the
+ * largest power of two that is not above half of (queue size + 1) to optimize
+ * (avoid divisions).
+ */
+static inline bool nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
 {
-	int sig_limit;
+	int limit = 1 << ilog2((queue->queue_size + 1) / 2);
 
-	/*
-	 * We signal completion every queue depth/2 and also handle the
-	 * degenerated case of a  device with queue_depth=1, where we
-	 * would need to signal every message.
-	 */
-	sig_limit = max(queue->queue_size / 2, 1);
-	return (++queue->sig_count % sig_limit) == 0;
+	return (atomic_inc_return(&queue->sig_count) & (limit - 1)) == 0;
 }
 
 static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 965911d..1b4d93e 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -488,21 +488,24 @@
 
 	rval = device_add(&nvmem->dev);
 	if (rval)
-		goto out;
+		goto err_put_device;
 
 	if (config->compat) {
 		rval = nvmem_setup_compat(nvmem, config);
 		if (rval)
-			goto out;
+			goto err_device_del;
 	}
 
 	if (config->cells)
 		nvmem_add_cells(nvmem, config);
 
 	return nvmem;
-out:
-	ida_simple_remove(&nvmem_ida, nvmem->id);
-	kfree(nvmem);
+
+err_device_del:
+	device_del(&nvmem->dev);
+err_put_device:
+	put_device(&nvmem->dev);
+
 	return ERR_PTR(rval);
 }
 EXPORT_SYMBOL_GPL(nvmem_register);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index fd5cfad..f7a9701 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -225,6 +225,7 @@
 
 	return tsize;
 }
+EXPORT_SYMBOL_GPL(of_device_get_modalias);
 
 /**
  * of_device_uevent - Display OF related uevent information
@@ -287,3 +288,4 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(of_device_uevent_modalias);
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 3ed6238..c4953ec 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -741,6 +741,8 @@
 
 	BUG_ON(!dev);
 	ioc = GET_IOC(dev);
+	if (!ioc)
+		return DMA_ERROR_CODE;
 
 	BUG_ON(size <= 0);
 
@@ -814,6 +816,10 @@
 	
 	BUG_ON(!dev);
 	ioc = GET_IOC(dev);
+	if (!ioc) {
+		WARN_ON(!ioc);
+		return;
+	}
 
 	DBG_RUN("%s() iovp 0x%lx/%x\n",
 		__func__, (long)iova, size);
@@ -918,6 +924,8 @@
 	
 	BUG_ON(!dev);
 	ioc = GET_IOC(dev);
+	if (!ioc)
+		return 0;
 	
 	DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
 
@@ -990,6 +998,10 @@
 
 	BUG_ON(!dev);
 	ioc = GET_IOC(dev);
+	if (!ioc) {
+		WARN_ON(!ioc);
+		return;
+	}
 
 	DBG_RUN_SG("%s() START %d entries, %p,%x\n",
 		__func__, nents, sg_virt(sglist), sglist->length);
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 1133b5c..5c63b92 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -154,7 +154,10 @@
 };
 
 /* Looks nice and keeps the compiler happy */
-#define DINO_DEV(d) ((struct dino_device *) d)
+#define DINO_DEV(d) ({				\
+	void *__pdata = d;			\
+	BUG_ON(!__pdata);			\
+	(struct dino_device *)__pdata; })
 
 
 /*
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 2ec2aef..bc286cb 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -111,8 +111,10 @@
 
 
 /* Looks nice and keeps the compiler happy */
-#define LBA_DEV(d) ((struct lba_device *) (d))
-
+#define LBA_DEV(d) ({				\
+	void *__pdata = d;			\
+	BUG_ON(!__pdata);			\
+	(struct lba_device *)__pdata; })
 
 /*
 ** Only allow 8 subsidiary busses per LBA
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 151b86b..56918d1 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -691,6 +691,8 @@
 		return 0;
 
 	ioc = GET_IOC(dev);
+	if (!ioc)
+		return 0;
 
 	/*
 	 * check if mask is >= than the current max IO Virt Address
@@ -722,6 +724,8 @@
 	int pide;
 
 	ioc = GET_IOC(dev);
+	if (!ioc)
+		return DMA_ERROR_CODE;
 
 	/* save offset bits */
 	offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
@@ -813,6 +817,10 @@
 	DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
 
 	ioc = GET_IOC(dev);
+	if (!ioc) {
+		WARN_ON(!ioc);
+		return;
+	}
 	offset = iova & ~IOVP_MASK;
 	iova ^= offset;        /* clear offset bits */
 	size += offset;
@@ -952,6 +960,8 @@
 	DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
 
 	ioc = GET_IOC(dev);
+	if (!ioc)
+		return 0;
 
 	/* Fast path single entry scatterlists. */
 	if (nents == 1) {
@@ -1037,6 +1047,10 @@
 		__func__, nents, sg_virt(sglist), sglist->length);
 
 	ioc = GET_IOC(dev);
+	if (!ioc) {
+		WARN_ON(!ioc);
+		return;
+	}
 
 #ifdef SBA_COLLECT_STATS
 	ioc->usg_calls++;
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index 3452983..03ebfd5 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -131,6 +131,7 @@
 		 PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
 		 PCIE_CORE_INT_MMVC)
 
+#define PCIE_RC_CONFIG_NORMAL_BASE	0x800000
 #define PCIE_RC_CONFIG_BASE		0xa00000
 #define PCIE_RC_CONFIG_VENDOR		(PCIE_RC_CONFIG_BASE + 0x00)
 #define PCIE_RC_CONFIG_RID_CCR		(PCIE_RC_CONFIG_BASE + 0x08)
@@ -267,7 +268,9 @@
 static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
 				     int where, int size, u32 *val)
 {
-	void __iomem *addr = rockchip->apb_base + PCIE_RC_CONFIG_BASE + where;
+	void __iomem *addr;
+
+	addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
 
 	if (!IS_ALIGNED((uintptr_t)addr, size)) {
 		*val = 0;
@@ -291,11 +294,13 @@
 				     int where, int size, u32 val)
 {
 	u32 mask, tmp, offset;
+	void __iomem *addr;
 
 	offset = where & ~0x3;
+	addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
 
 	if (size == 4) {
-		writel(val, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset);
+		writel(val, addr);
 		return PCIBIOS_SUCCESSFUL;
 	}
 
@@ -306,9 +311,9 @@
 	 * corrupt RW1C bits in adjacent registers.  But the hardware
 	 * doesn't support smaller writes.
 	 */
-	tmp = readl(rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset) & mask;
+	tmp = readl(addr) & mask;
 	tmp |= val << ((where & 0x3) * 8);
-	writel(tmp, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset);
+	writel(tmp, addr);
 
 	return PCIBIOS_SUCCESSFUL;
 }
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 1ccce1c..8a68e2b 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -954,6 +954,7 @@
 		return pci_legacy_resume_early(dev);
 
 	pci_update_current_state(pci_dev, PCI_D0);
+	pci_restore_state(pci_dev);
 
 	if (drv && drv->pm && drv->pm->thaw_noirq)
 		error = drv->pm->thaw_noirq(dev);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 9520166..db15141 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -951,7 +951,7 @@
 	ret = gpiochip_irqchip_add(chip,
 				   &msm_gpio_irq_chip,
 				   0,
-				   handle_edge_irq,
+				   handle_fasteoi_irq,
 				   IRQ_TYPE_NONE);
 	if (ret) {
 		dev_err(pctrl->dev, "Failed to add irqchip to gpiochip\n");
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 9f417bb..4a9232e 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -2582,6 +2582,21 @@
 EXPORT_SYMBOL(ipa_stop_gsi_channel);
 
 /**
+ * ipa_start_gsi_channel()- Startsa GSI channel in IPA
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+int ipa_start_gsi_channel(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_start_gsi_channel, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_start_gsi_channel);
+
+/**
  * ipa_get_version_string() - Get string representation of IPA version
  * @ver: IPA version
  *
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index 133e058..20471eb 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -324,6 +324,8 @@
 
 	int (*ipa_stop_gsi_channel)(u32 clnt_hdl);
 
+	int (*ipa_start_gsi_channel)(u32 clnt_hdl);
+
 	struct iommu_domain *(*ipa_get_smmu_domain)(void);
 
 	int (*ipa_disable_apps_wan_cons_deaggr)(uint32_t agg_size,
diff --git a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
index 79da63e..a623d0b 100644
--- a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -114,6 +114,7 @@
  * @send_dl_skb: client callback for sending skb in downlink direction
  * @stats: statistics, how many packets were transmitted using the SW bridge
  * @is_conencted: is bridge connected ?
+ * @is_suspended: is bridge suspended ?
  * @mode: ODU mode (router/bridge)
  * @lock: for the initialization, connect and disconnect synchronization
  * @llv6_addr: link local IPv6 address of ODU network interface
@@ -122,6 +123,8 @@
  * @odu_prod_hdl: handle for IPA_CLIENT_ODU_PROD pipe
  * @odu_emb_cons_hdl: handle for IPA_CLIENT_ODU_EMB_CONS pipe
  * @odu_teth_cons_hdl: handle for IPA_CLIENT_ODU_TETH_CONS pipe
+ * @rm_comp: completion object for IP RM
+ * @wakeup_request: client callback to wakeup
  */
 struct odu_bridge_ctx {
 	struct class *class;
@@ -135,6 +138,7 @@
 	int (*send_dl_skb)(void *priv, struct sk_buff *skb);
 	struct stats stats;
 	bool is_connected;
+	bool is_suspended;
 	enum odu_bridge_mode mode;
 	struct mutex lock;
 	struct in6_addr llv6_addr;
@@ -146,6 +150,8 @@
 	u32 ipa_sys_desc_size;
 	void *logbuf;
 	void *logbuf_low;
+	struct completion rm_comp;
+	void (*wakeup_request)(void *);
 };
 static struct odu_bridge_ctx *odu_bridge_ctx;
 
@@ -1246,6 +1252,288 @@
 }
 EXPORT_SYMBOL(odu_bridge_cleanup);
 
+/* IPA Bridge implementation */
+#ifdef CONFIG_IPA3
+
+static void ipa_br_rm_notify(void *user_data, enum ipa_rm_event event,
+	unsigned long data)
+{
+	if (event == IPA_RM_RESOURCE_GRANTED)
+		complete(&odu_bridge_ctx->rm_comp);
+}
+
+static int ipa_br_request_prod(void)
+{
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	reinit_completion(&odu_bridge_ctx->rm_comp);
+	ODU_BRIDGE_DBG("requesting odu prod\n");
+	res = ipa_rm_request_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+	if (res) {
+		if (res != -EINPROGRESS) {
+			ODU_BRIDGE_ERR("failed to request prod %d\n", res);
+			return res;
+		}
+		wait_for_completion(&odu_bridge_ctx->rm_comp);
+	}
+
+	ODU_BRIDGE_FUNC_EXIT();
+	return 0;
+
+}
+
+static int ipa_br_release_prod(void)
+{
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	reinit_completion(&odu_bridge_ctx->rm_comp);
+	ODU_BRIDGE_DBG("requesting odu prod\n");
+	res = ipa_rm_release_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+	if (res) {
+		ODU_BRIDGE_ERR("failed to release prod %d\n", res);
+		return res;
+	}
+
+	ODU_BRIDGE_FUNC_EXIT();
+	return 0;
+
+}
+
+static int ipa_br_cons_request(void)
+{
+	ODU_BRIDGE_FUNC_ENTRY();
+	if (odu_bridge_ctx->is_suspended)
+		odu_bridge_ctx->wakeup_request(odu_bridge_ctx->priv);
+	ODU_BRIDGE_FUNC_EXIT();
+	return 0;
+}
+
+static int ipa_br_cons_release(void)
+{
+	ODU_BRIDGE_FUNC_ENTRY();
+	ODU_BRIDGE_FUNC_EXIT();
+	return 0;
+}
+
+/* IPA Bridge API is the new API which will replaces old odu_bridge API */
+int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl)
+{
+	int ret;
+	struct ipa_rm_create_params create_params;
+
+	if (!params || !params->wakeup_request || !hdl) {
+		ODU_BRIDGE_ERR("NULL arg\n");
+		return -EINVAL;
+	}
+
+
+	ret = odu_bridge_init(&params->info);
+	if (ret)
+		return ret;
+
+	odu_bridge_ctx->wakeup_request = params->wakeup_request;
+
+	/* create IPA RM resources for power management */
+	init_completion(&odu_bridge_ctx->rm_comp);
+	memset(&create_params, 0, sizeof(create_params));
+	create_params.name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
+	create_params.reg_params.user_data = odu_bridge_ctx;
+	create_params.reg_params.notify_cb = ipa_br_rm_notify;
+	create_params.floor_voltage = IPA_VOLTAGE_SVS;
+	ret = ipa_rm_create_resource(&create_params);
+	if (ret) {
+		ODU_BRIDGE_ERR("failed to create RM prod %d\n", ret);
+		goto fail_rm_prod;
+	}
+
+	ret = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+		IPA_RM_RESOURCE_APPS_CONS);
+	if (ret) {
+		ODU_BRIDGE_ERR("failed to add ODU->APPS dependency %d\n", ret);
+		goto fail_add_dep;
+	}
+
+	memset(&create_params, 0, sizeof(create_params));
+	create_params.name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+	create_params.request_resource = ipa_br_cons_request;
+	create_params.release_resource = ipa_br_cons_release;
+	create_params.floor_voltage = IPA_VOLTAGE_SVS;
+	ret = ipa_rm_create_resource(&create_params);
+	if (ret) {
+		ODU_BRIDGE_ERR("failed to create RM cons %d\n", ret);
+		goto fail_rm_cons;
+	}
+
+	/* handle is ignored for now */
+	*hdl = 0;
+
+	return 0;
+
+fail_rm_cons:
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+		IPA_RM_RESOURCE_APPS_CONS);
+fail_add_dep:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+fail_rm_prod:
+	odu_bridge_cleanup();
+	return ret;
+}
+EXPORT_SYMBOL(ipa_bridge_init);
+
+int ipa_bridge_connect(u32 hdl)
+{
+	int ret;
+
+	if (!odu_bridge_ctx) {
+		ODU_BRIDGE_ERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	if (odu_bridge_ctx->is_connected) {
+		ODU_BRIDGE_ERR("already connected\n");
+		return -EFAULT;
+	}
+
+	ret = ipa_br_request_prod();
+	if (ret)
+		return ret;
+
+	return odu_bridge_connect();
+}
+EXPORT_SYMBOL(ipa_bridge_connect);
+
+int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth)
+{
+	struct ipa_rm_perf_profile profile = {0};
+	int ret;
+
+	profile.max_supported_bandwidth_mbps = bandwidth;
+	ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_ODU_ADAPT_PROD, &profile);
+	if (ret) {
+		ODU_BRIDGE_ERR("failed to set perf profile to prod %d\n", ret);
+		return ret;
+	}
+
+	ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_ODU_ADAPT_CONS, &profile);
+	if (ret) {
+		ODU_BRIDGE_ERR("failed to set perf profile to cons %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_set_perf_profile);
+
+int ipa_bridge_disconnect(u32 hdl)
+{
+	int ret;
+
+	ret = odu_bridge_disconnect();
+	if (ret)
+		return ret;
+
+	ret = ipa_br_release_prod();
+	if (ret)
+		return ret;
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_disconnect);
+
+int ipa_bridge_suspend(u32 hdl)
+{
+	int ret;
+
+	if (!odu_bridge_ctx) {
+		ODU_BRIDGE_ERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	if (!odu_bridge_ctx->is_connected) {
+		ODU_BRIDGE_ERR("bridge is  disconnected\n");
+		return -EFAULT;
+	}
+
+	if (odu_bridge_ctx->is_suspended) {
+		ODU_BRIDGE_ERR("bridge is already suspended\n");
+		return -EFAULT;
+	}
+
+	/* stop cons channel to prevent downlink data during suspend */
+	ret = ipa_stop_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl);
+	if (ret) {
+		ODU_BRIDGE_ERR("failed to stop CONS channel %d\n", ret);
+		return ret;
+	}
+
+	ret = ipa_br_release_prod();
+	if (ret) {
+		ODU_BRIDGE_ERR("failed to release prod %d\n", ret);
+		ipa_start_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl);
+		return ret;
+	}
+	odu_bridge_ctx->is_suspended = true;
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_suspend);
+
+int ipa_bridge_resume(u32 hdl)
+{
+	int ret;
+
+	if (!odu_bridge_ctx) {
+		ODU_BRIDGE_ERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	if (!odu_bridge_ctx->is_connected) {
+		ODU_BRIDGE_ERR("bridge is  disconnected\n");
+		return -EFAULT;
+	}
+
+	if (!odu_bridge_ctx->is_suspended) {
+		ODU_BRIDGE_ERR("bridge is not suspended\n");
+		return -EFAULT;
+	}
+
+	ret = ipa_br_request_prod();
+	if (ret)
+		return ret;
+
+	ret = ipa_start_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl);
+	if (ret) {
+		ODU_BRIDGE_ERR("failed to start CONS channel %d\n", ret);
+		return ret;
+	}
+	odu_bridge_ctx->is_suspended = false;
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_bridge_resume);
+
+int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb,
+	struct ipa_tx_meta *metadata)
+{
+	return odu_bridge_tx_dp(skb, metadata);
+}
+EXPORT_SYMBOL(ipa_bridge_tx_dp);
+
+int ipa_bridge_cleanup(u32 hdl)
+{
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+		IPA_RM_RESOURCE_APPS_CONS);
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+	return odu_bridge_cleanup();
+}
+EXPORT_SYMBOL(ipa_bridge_cleanup);
+
+#endif /* CONFIG_IPA3 */
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("ODU bridge driver");
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index 07bca0c..32c8b25 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -384,5 +384,6 @@
 			      void *user_data);
 void ipa_ntn_uc_dereg_rdyCB(void);
 const char *ipa_get_version_string(enum ipa_hw_type ver);
+int ipa_start_gsi_channel(u32 clnt_hdl);
 
 #endif /* _IPA_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 53ab299..bfd0446 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -641,7 +641,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_nat_dma_cmd *)param)->entries,
 				pre_entry);
 			retval = -EFAULT;
@@ -688,7 +688,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_add_hdr *)param)->num_hdrs,
 				pre_entry);
 			retval = -EFAULT;
@@ -727,7 +727,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_del_hdr *)param)->num_hdls,
 				pre_entry);
 			retval = -EFAULT;
@@ -767,7 +767,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_add_rt_rule *)param)->
 				num_rules,
 				pre_entry);
@@ -807,7 +807,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_mdfy_rt_rule *)param)->
 				num_rules,
 				pre_entry);
@@ -847,7 +847,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
 				pre_entry);
 			retval = -EFAULT;
@@ -886,7 +886,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_add_flt_rule *)param)->
 				num_rules,
 				pre_entry);
@@ -926,7 +926,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_del_flt_rule *)param)->
 				num_hdls,
 				pre_entry);
@@ -966,7 +966,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_mdfy_flt_rule *)param)->
 				num_rules,
 				pre_entry);
@@ -1104,7 +1104,7 @@
 		if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
 			param)->num_tx_props
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_query_intf_tx_props *)
 				param)->num_tx_props, pre_entry);
 			retval = -EFAULT;
@@ -1149,7 +1149,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
 			param)->num_rx_props != pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_query_intf_rx_props *)
 				param)->num_rx_props, pre_entry);
 			retval = -EFAULT;
@@ -1194,7 +1194,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
 			param)->num_ext_props != pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_query_intf_ext_props *)
 				param)->num_ext_props, pre_entry);
 			retval = -EFAULT;
@@ -1232,7 +1232,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_msg_meta *)param)->msg_len
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_msg_meta *)param)->msg_len,
 				pre_entry);
 			retval = -EFAULT;
@@ -1372,7 +1372,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
 			param)->num_proc_ctxs != pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_add_hdr_proc_ctx *)
 				param)->num_proc_ctxs, pre_entry);
 			retval = -EFAULT;
@@ -1411,7 +1411,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
 			param)->num_hdls != pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_del_hdr_proc_ctx *)param)->
 				num_hdls,
 				pre_entry);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
index 8d9f0e0..f7b0864 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -1015,25 +1015,25 @@
 	if (rule->action != IPA_PASS_TO_EXCEPTION) {
 		if (!rule->eq_attrib_type) {
 			if (!rule->rt_tbl_hdl) {
-				IPAERR("invalid RT tbl\n");
+				IPAERR_RL("invalid RT tbl\n");
 				goto error;
 			}
 
 			rt_tbl = ipa_id_find(rule->rt_tbl_hdl);
 			if (rt_tbl == NULL) {
-				IPAERR("RT tbl not found\n");
+				IPAERR_RL("RT tbl not found\n");
 				goto error;
 			}
 
 			if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) {
-				IPAERR("RT table cookie is invalid\n");
+				IPAERR_RL("RT table cookie is invalid\n");
 				goto error;
 			}
 		} else {
 			if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ?
 				IPA_MEM_PART(v4_modem_rt_index_hi) :
 				IPA_MEM_PART(v6_modem_rt_index_hi))) {
-				IPAERR("invalid RT tbl\n");
+				IPAERR_RL("invalid RT tbl\n");
 				goto error;
 			}
 		}
@@ -1089,12 +1089,12 @@
 
 	entry = ipa_id_find(rule_hdl);
 	if (entry == NULL) {
-		IPAERR("lookup failed\n");
+		IPAERR_RL("lookup failed\n");
 		return -EINVAL;
 	}
 
 	if (entry->cookie != IPA_FLT_COOKIE) {
-		IPAERR("bad params\n");
+		IPAERR_RL("bad params\n");
 		return -EINVAL;
 	}
 	id = entry->id;
@@ -1121,12 +1121,12 @@
 
 	entry = ipa_id_find(frule->rule_hdl);
 	if (entry == NULL) {
-		IPAERR("lookup failed\n");
+		IPAERR_RL("lookup failed\n");
 		goto error;
 	}
 
 	if (entry->cookie != IPA_FLT_COOKIE) {
-		IPAERR("bad params\n");
+		IPAERR_RL("bad params\n");
 		goto error;
 	}
 
@@ -1136,25 +1136,25 @@
 	if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
 		if (!frule->rule.eq_attrib_type) {
 			if (!frule->rule.rt_tbl_hdl) {
-				IPAERR("invalid RT tbl\n");
+				IPAERR_RL("invalid RT tbl\n");
 				goto error;
 			}
 
 			rt_tbl = ipa_id_find(frule->rule.rt_tbl_hdl);
 			if (rt_tbl == NULL) {
-				IPAERR("RT tbl not found\n");
+				IPAERR_RL("RT tbl not found\n");
 				goto error;
 			}
 
 			if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) {
-				IPAERR("RT table cookie is invalid\n");
+				IPAERR_RL("RT table cookie is invalid\n");
 				goto error;
 			}
 		} else {
 			if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
 				IPA_MEM_PART(v4_modem_rt_index_hi) :
 				IPA_MEM_PART(v6_modem_rt_index_hi))) {
-				IPAERR("invalid RT tbl\n");
+				IPAERR_RL("invalid RT tbl\n");
 				goto error;
 			}
 		}
@@ -1178,7 +1178,7 @@
 	struct ipa_flt_tbl *tbl;
 
 	if (rule == NULL || rule_hdl == NULL) {
-		IPAERR("bad parms rule=%p rule_hdl=%p\n", rule, rule_hdl);
+		IPAERR_RL("bad parms rule=%p rule_hdl=%p\n", rule, rule_hdl);
 
 		return -EINVAL;
 	}
@@ -1197,14 +1197,14 @@
 	int ipa_ep_idx;
 
 	if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
-		IPAERR("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
+		IPAERR_RL("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
 				rule_hdl, ep);
 
 		return -EINVAL;
 	}
 	ipa_ep_idx = ipa2_get_ep_mapping(ep);
 	if (ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND) {
-		IPAERR("ep not valid ep=%d\n", ep);
+		IPAERR_RL("ep not valid ep=%d\n", ep);
 		return -EINVAL;
 	}
 	if (ipa_ctx->ep[ipa_ep_idx].valid == 0)
@@ -1231,7 +1231,7 @@
 
 	if (rules == NULL || rules->num_rules == 0 ||
 			rules->ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 
 		return -EINVAL;
 	}
@@ -1249,7 +1249,7 @@
 					rules->rules[i].at_rear,
 					&rules->rules[i].flt_rule_hdl);
 		if (result) {
-			IPAERR("failed to add flt rule %d\n", i);
+			IPAERR_RL("failed to add flt rule %d\n", i);
 			rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
 		} else {
 			rules->rules[i].status = 0;
@@ -1282,14 +1282,14 @@
 	int result;
 
 	if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
 	mutex_lock(&ipa_ctx->lock);
 	for (i = 0; i < hdls->num_hdls; i++) {
 		if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
-			IPAERR("failed to del rt rule %i\n", i);
+			IPAERR_RL("failed to del rt rule %i\n", i);
 			hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
 		} else {
 			hdls->hdl[i].status = 0;
@@ -1322,14 +1322,14 @@
 	int result;
 
 	if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
 	mutex_lock(&ipa_ctx->lock);
 	for (i = 0; i < hdls->num_rules; i++) {
 		if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
-			IPAERR("failed to mdfy rt rule %i\n", i);
+			IPAERR_RL("failed to mdfy rt rule %i\n", i);
 			hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
 		} else {
 			hdls->rules[i].status = 0;
@@ -1363,7 +1363,7 @@
 	int result;
 
 	if (ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -1399,7 +1399,7 @@
 	int id;
 
 	if (ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index a6a7613..c8663c9 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -558,13 +558,13 @@
 		proc_ctx->type, proc_ctx->hdr_hdl);
 
 	if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
-		IPAERR("invalid processing type %d\n", proc_ctx->type);
+		IPAERR_RL("invalid processing type %d\n", proc_ctx->type);
 		return -EINVAL;
 	}
 
 	hdr_entry = ipa_id_find(proc_ctx->hdr_hdl);
 	if (!hdr_entry || (hdr_entry->cookie != IPA_HDR_COOKIE)) {
-		IPAERR("hdr_hdl is invalid\n");
+		IPAERR_RL("hdr_hdl is invalid\n");
 		return -EINVAL;
 	}
 
@@ -592,7 +592,7 @@
 			ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
 		bin = IPA_HDR_PROC_CTX_BIN1;
 	} else {
-		IPAERR("unexpected needed len %d\n", needed_len);
+		IPAERR_RL("unexpected needed len %d\n", needed_len);
 		WARN_ON(1);
 		goto bad_len;
 	}
@@ -602,7 +602,7 @@
 		IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
 	if (list_empty(&htbl->head_free_offset_list[bin])) {
 		if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
-			IPAERR("hdr proc ctx table overflow\n");
+			IPAERR_RL("hdr proc ctx table overflow\n");
 			goto bad_len;
 		}
 
@@ -676,12 +676,12 @@
 	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		goto error;
 	}
 
 	if (!HDR_TYPE_IS_VALID(hdr->type)) {
-		IPAERR("invalid hdr type %d\n", hdr->type);
+		IPAERR_RL("invalid hdr type %d\n", hdr->type);
 		goto error;
 	}
 
@@ -713,7 +713,7 @@
 	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
 		bin = IPA_HDR_BIN4;
 	else {
-		IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+		IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len);
 		goto bad_hdr_len;
 	}
 
@@ -844,7 +844,7 @@
 
 	entry = ipa_id_find(proc_ctx_hdl);
 	if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -852,7 +852,7 @@
 		htbl->proc_ctx_cnt, entry->offset_entry->offset);
 
 	if (by_user && entry->user_deleted) {
-		IPAERR("proc_ctx already deleted by user\n");
+		IPAERR_RL("proc_ctx already deleted by user\n");
 		return -EINVAL;
 	}
 
@@ -890,12 +890,12 @@
 
 	entry = ipa_id_find(hdr_hdl);
 	if (entry == NULL) {
-		IPAERR("lookup failed\n");
+		IPAERR_RL("lookup failed\n");
 		return -EINVAL;
 	}
 
 	if (entry->cookie != IPA_HDR_COOKIE) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -907,7 +907,7 @@
 			htbl->hdr_cnt, entry->offset_entry->offset);
 
 	if (by_user && entry->user_deleted) {
-		IPAERR("hdr already deleted by user\n");
+		IPAERR_RL("hdr already deleted by user\n");
 		return -EINVAL;
 	}
 
@@ -956,12 +956,12 @@
 	int result = -EFAULT;
 
 	if (unlikely(!ipa_ctx)) {
-		IPAERR("IPA driver was not initialized\n");
+		IPAERR_RL("IPA driver was not initialized\n");
 		return -EINVAL;
 	}
 
 	if (hdrs == NULL || hdrs->num_hdrs == 0) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -970,7 +970,7 @@
 			hdrs->num_hdrs);
 	for (i = 0; i < hdrs->num_hdrs; i++) {
 		if (__ipa_add_hdr(&hdrs->hdr[i])) {
-			IPAERR("failed to add hdr %d\n", i);
+			IPAERR_RL("failed to add hdr %d\n", i);
 			hdrs->hdr[i].status = -1;
 		} else {
 			hdrs->hdr[i].status = 0;
@@ -1011,14 +1011,14 @@
 	}
 
 	if (hdls == NULL || hdls->num_hdls == 0) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
 	mutex_lock(&ipa_ctx->lock);
 	for (i = 0; i < hdls->num_hdls; i++) {
 		if (__ipa_del_hdr(hdls->hdl[i].hdl, by_user)) {
-			IPAERR("failed to del hdr %i\n", i);
+			IPAERR_RL("failed to del hdr %i\n", i);
 			hdls->hdl[i].status = -1;
 		} else {
 			hdls->hdl[i].status = 0;
@@ -1067,13 +1067,13 @@
 
 	if (ipa_ctx->ipa_hw_type <= IPA_HW_v2_0 ||
 	    ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) {
-		IPAERR("Processing context not supported on IPA HW %d\n",
+		IPAERR_RL("Processing context not supported on IPA HW %d\n",
 			ipa_ctx->ipa_hw_type);
 		return -EFAULT;
 	}
 
 	if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -1082,7 +1082,7 @@
 			proc_ctxs->num_proc_ctxs);
 	for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
 		if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
-			IPAERR("failed to add hdr pric ctx %d\n", i);
+			IPAERR_RL("failed to add hdr pric ctx %d\n", i);
 			proc_ctxs->proc_ctx[i].status = -1;
 		} else {
 			proc_ctxs->proc_ctx[i].status = 0;
@@ -1127,14 +1127,14 @@
 	}
 
 	if (hdls == NULL || hdls->num_hdls == 0) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
 	mutex_lock(&ipa_ctx->lock);
 	for (i = 0; i < hdls->num_hdls; i++) {
 		if (__ipa_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) {
-			IPAERR("failed to del hdr %i\n", i);
+			IPAERR_RL("failed to del hdr %i\n", i);
 			hdls->hdl[i].status = -1;
 		} else {
 			hdls->hdl[i].status = 0;
@@ -1371,7 +1371,7 @@
 	}
 
 	if (lookup == NULL) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 	mutex_lock(&ipa_ctx->lock);
@@ -1458,13 +1458,13 @@
 
 	entry = ipa_id_find(hdr_hdl);
 	if (entry == NULL) {
-		IPAERR("lookup failed\n");
+		IPAERR_RL("lookup failed\n");
 		result = -EINVAL;
 		goto bail;
 	}
 
 	if (entry->cookie != IPA_HDR_COOKIE) {
-		IPAERR("invalid header entry\n");
+		IPAERR_RL("invalid header entry\n");
 		result = -EINVAL;
 		goto bail;
 	}
@@ -1493,7 +1493,7 @@
 	int result = -EFAULT;
 
 	if (copy == NULL) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 	mutex_lock(&ipa_ctx->lock);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index 0c2410f..141bff1 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -68,6 +68,18 @@
 #define IPAERR(fmt, args...) \
 	pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
 
+#define IPAERR_RL(fmt, args...) \
+	do { \
+		pr_err_ratelimited(DRV_NAME " %s:%d " fmt, __func__, \
+		__LINE__, ## args);\
+		if (ipa_ctx) { \
+			IPA_IPC_LOGGING(ipa_ctx->logbuf, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+			IPA_IPC_LOGGING(ipa_ctx->logbuf_low, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
 #define WLAN_AMPDU_TX_EP 15
 #define WLAN_PROD_TX_EP  19
 #define WLAN1_CONS_RX_EP  14
@@ -1730,9 +1742,6 @@
 	iowrite32(val, base + offset);
 }
 
-int ipa_bridge_init(void);
-void ipa_bridge_cleanup(void);
-
 ssize_t ipa_read(struct file *filp, char __user *buf, size_t count,
 		 loff_t *f_pos);
 int ipa_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
index 12b1a99..e6954b7 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
@@ -404,13 +404,13 @@
 
 	if (meta == NULL || (buff == NULL && callback != NULL) ||
 	    (buff != NULL && callback == NULL)) {
-		IPAERR("invalid param meta=%p buff=%p, callback=%p\n",
+		IPAERR_RL("invalid param meta=%p buff=%p, callback=%p\n",
 		       meta, buff, callback);
 		return -EINVAL;
 	}
 
 	if (meta->msg_type >= IPA_EVENT_MAX_NUM) {
-		IPAERR("unsupported message type %d\n", meta->msg_type);
+		IPAERR_RL("unsupported message type %d\n", meta->msg_type);
 		return -EINVAL;
 	}
 
@@ -633,7 +633,7 @@
 	int result = -EINVAL;
 
 	if (meta == NULL || buff == NULL || !count) {
-		IPAERR("invalid param name=%p buff=%p count=%zu\n",
+		IPAERR_RL("invalid param name=%p buff=%p count=%zu\n",
 				meta, buff, count);
 		return result;
 	}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
index a7f983e..50b2706 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
@@ -252,8 +252,8 @@
 
 	mutex_lock(&nat_ctx->lock);
 	if (strcmp(mem->dev_name, NAT_DEV_NAME)) {
-		IPAERR("Nat device name mismatch\n");
-		IPAERR("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
+		IPAERR_RL("Nat device name mismatch\n");
+		IPAERR_RL("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
 		result = -EPERM;
 		goto bail;
 	}
@@ -272,7 +272,7 @@
 
 	if (mem->size <= 0 ||
 			nat_ctx->is_dev_init == true) {
-		IPAERR("Invalid Parameters or device is already init\n");
+		IPAERR_RL("Invalid Parameters or device is already init\n");
 		result = -EPERM;
 		goto bail;
 	}
@@ -335,8 +335,8 @@
 
 	/* check for integer overflow */
 	if (init->ipv4_rules_offset >
-		UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1))) {
-		IPAERR("Detected overflow\n");
+		(UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1)))) {
+		IPAERR_RL("Detected overflow\n");
 		return -EPERM;
 	}
 	/* Check Table Entry offset is not
@@ -345,8 +345,8 @@
 	tmp = init->ipv4_rules_offset +
 		(TBL_ENTRY_SIZE * (init->table_entries + 1));
 	if (tmp > ipa_ctx->nat_mem.size) {
-		IPAERR("Table rules offset not valid\n");
-		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+		IPAERR_RL("Table rules offset not valid\n");
+		IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
 			init->ipv4_rules_offset, (init->table_entries + 1),
 			tmp, ipa_ctx->nat_mem.size);
 		return -EPERM;
@@ -355,7 +355,7 @@
 	/* check for integer overflow */
 	if (init->expn_rules_offset >
 		UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries)) {
-		IPAERR("Detected overflow\n");
+		IPAERR_RL("Detected overflow\n");
 		return -EPERM;
 	}
 	/* Check Expn Table Entry offset is not
@@ -364,8 +364,8 @@
 	tmp = init->expn_rules_offset +
 		(TBL_ENTRY_SIZE * init->expn_table_entries);
 	if (tmp > ipa_ctx->nat_mem.size) {
-		IPAERR("Expn Table rules offset not valid\n");
-		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+		IPAERR_RL("Expn Table rules offset not valid\n");
+		IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
 			init->expn_rules_offset, init->expn_table_entries,
 			tmp, ipa_ctx->nat_mem.size);
 		return -EPERM;
@@ -374,7 +374,7 @@
 	/* check for integer overflow */
 	if (init->index_offset >
 		UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) {
-		IPAERR("Detected overflow\n");
+		IPAERR_RL("Detected overflow\n");
 		return -EPERM;
 	}
 	/* Check Indx Table Entry offset is not
@@ -383,8 +383,8 @@
 	tmp = init->index_offset +
 		(INDX_TBL_ENTRY_SIZE * (init->table_entries + 1));
 	if (tmp > ipa_ctx->nat_mem.size) {
-		IPAERR("Indx Table rules offset not valid\n");
-		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+		IPAERR_RL("Indx Table rules offset not valid\n");
+		IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
 			init->index_offset, (init->table_entries + 1),
 			tmp, ipa_ctx->nat_mem.size);
 		return -EPERM;
@@ -392,8 +392,8 @@
 
 	/* check for integer overflow */
 	if (init->index_expn_offset >
-		UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries)) {
-		IPAERR("Detected overflow\n");
+		(UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries))) {
+		IPAERR_RL("Detected overflow\n");
 		return -EPERM;
 	}
 	/* Check Expn Table entry offset is not
@@ -402,8 +402,8 @@
 	tmp = init->index_expn_offset +
 		(INDX_TBL_ENTRY_SIZE * init->expn_table_entries);
 	if (tmp > ipa_ctx->nat_mem.size) {
-		IPAERR("Indx Expn Table rules offset not valid\n");
-		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+		IPAERR_RL("Indx Expn Table rules offset not valid\n");
+		IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
 			init->index_expn_offset, init->expn_table_entries,
 			tmp, ipa_ctx->nat_mem.size);
 		return -EPERM;
@@ -448,16 +448,16 @@
 				(init->expn_rules_offset > offset) ||
 				(init->index_offset > offset) ||
 				(init->index_expn_offset > offset)) {
-			IPAERR("Failed due to integer overflow\n");
-			IPAERR("nat.mem.dma_handle: 0x%pa\n",
+			IPAERR_RL("Failed due to integer overflow\n");
+			IPAERR_RL("nat.mem.dma_handle: 0x%pa\n",
 				&ipa_ctx->nat_mem.dma_handle);
-			IPAERR("ipv4_rules_offset: 0x%x\n",
+			IPAERR_RL("ipv4_rules_offset: 0x%x\n",
 				init->ipv4_rules_offset);
-			IPAERR("expn_rules_offset: 0x%x\n",
+			IPAERR_RL("expn_rules_offset: 0x%x\n",
 				init->expn_rules_offset);
-			IPAERR("index_offset: 0x%x\n",
+			IPAERR_RL("index_offset: 0x%x\n",
 				init->index_offset);
-			IPAERR("index_expn_offset: 0x%x\n",
+			IPAERR_RL("index_expn_offset: 0x%x\n",
 				init->index_expn_offset);
 			result = -EPERM;
 			goto free_mem;
@@ -513,7 +513,7 @@
 	desc[1].len = size;
 	IPADBG("posting v4 init command\n");
 	if (ipa_send_cmd(2, desc)) {
-		IPAERR("Fail to send immediate command\n");
+		IPAERR_RL("Fail to send immediate command\n");
 		result = -EPERM;
 		goto free_mem;
 	}
@@ -578,7 +578,7 @@
 
 	IPADBG("\n");
 	if (dma->entries <= 0) {
-		IPAERR("Invalid number of commands %d\n",
+		IPAERR_RL("Invalid number of commands %d\n",
 			dma->entries);
 		ret = -EPERM;
 		goto bail;
@@ -586,7 +586,7 @@
 
 	for (cnt = 0; cnt < dma->entries; cnt++) {
 		if (dma->dma[cnt].table_index >= 1) {
-			IPAERR("Invalid table index %d\n",
+			IPAERR_RL("Invalid table index %d\n",
 				dma->dma[cnt].table_index);
 			ret = -EPERM;
 			goto bail;
@@ -597,7 +597,7 @@
 			if (dma->dma[cnt].offset >=
 				(ipa_ctx->nat_mem.size_base_tables + 1) *
 				NAT_TABLE_ENTRY_SIZE_BYTE) {
-				IPAERR("Invalid offset %d\n",
+				IPAERR_RL("Invalid offset %d\n",
 					dma->dma[cnt].offset);
 				ret = -EPERM;
 				goto bail;
@@ -609,7 +609,7 @@
 			if (dma->dma[cnt].offset >=
 				ipa_ctx->nat_mem.size_expansion_tables *
 				NAT_TABLE_ENTRY_SIZE_BYTE) {
-				IPAERR("Invalid offset %d\n",
+				IPAERR_RL("Invalid offset %d\n",
 					dma->dma[cnt].offset);
 				ret = -EPERM;
 				goto bail;
@@ -621,7 +621,7 @@
 			if (dma->dma[cnt].offset >=
 				(ipa_ctx->nat_mem.size_base_tables + 1) *
 				NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
-				IPAERR("Invalid offset %d\n",
+				IPAERR_RL("Invalid offset %d\n",
 					dma->dma[cnt].offset);
 				ret = -EPERM;
 				goto bail;
@@ -633,7 +633,7 @@
 			if (dma->dma[cnt].offset >=
 				ipa_ctx->nat_mem.size_expansion_tables *
 				NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
-				IPAERR("Invalid offset %d\n",
+				IPAERR_RL("Invalid offset %d\n",
 					dma->dma[cnt].offset);
 				ret = -EPERM;
 				goto bail;
@@ -642,7 +642,7 @@
 			break;
 
 		default:
-			IPAERR("Invalid base_addr %d\n",
+			IPAERR_RL("Invalid base_addr %d\n",
 				dma->dma[cnt].base_addr);
 			ret = -EPERM;
 			goto bail;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 1faa795..0a3c0e5 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -850,7 +850,7 @@
 	struct ipa_rt_tbl *entry;
 
 	if (in->ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -942,12 +942,12 @@
 	u32 id;
 
 	if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) {
-		IPAERR("bad parms\n");
+		IPAERR_RL("bad parms\n");
 		return -EINVAL;
 	}
 	id = entry->id;
 	if (ipa_id_find(id) == NULL) {
-		IPAERR("lookup failed\n");
+		IPAERR_RL("lookup failed\n");
 		return -EPERM;
 	}
 
@@ -1086,7 +1086,7 @@
 	int ret;
 
 	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -1096,7 +1096,7 @@
 					&rules->rules[i].rule,
 					rules->rules[i].at_rear,
 					&rules->rules[i].rt_rule_hdl)) {
-			IPAERR("failed to add rt rule %d\n", i);
+			IPAERR_RL("failed to add rt rule %d\n", i);
 			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
 		} else {
 			rules->rules[i].status = 0;
@@ -1123,12 +1123,12 @@
 	entry = ipa_id_find(rule_hdl);
 
 	if (entry == NULL) {
-		IPAERR("lookup failed\n");
+		IPAERR_RL("lookup failed\n");
 		return -EINVAL;
 	}
 
 	if (entry->cookie != IPA_RT_RULE_COOKIE) {
-		IPAERR("bad params\n");
+		IPAERR_RL("bad params\n");
 		return -EINVAL;
 	}
 
@@ -1142,7 +1142,7 @@
 			entry->tbl->rule_cnt);
 	if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
 		if (__ipa_del_rt_tbl(entry->tbl))
-			IPAERR("fail to del RT tbl\n");
+			IPAERR_RL("fail to del RT tbl\n");
 	}
 	entry->cookie = 0;
 	id = entry->id;
@@ -1169,14 +1169,14 @@
 	int ret;
 
 	if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
 	mutex_lock(&ipa_ctx->lock);
 	for (i = 0; i < hdls->num_hdls; i++) {
 		if (__ipa_del_rt_rule(hdls->hdl[i].hdl)) {
-			IPAERR("failed to del rt rule %i\n", i);
+			IPAERR_RL("failed to del rt rule %i\n", i);
 			hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
 		} else {
 			hdls->hdl[i].status = 0;
@@ -1209,7 +1209,7 @@
 	int ret;
 
 	if (ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -1253,7 +1253,7 @@
 	int id;
 
 	if (ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -1271,7 +1271,7 @@
 	 * filtering rules point to routing tables
 	 */
 	if (ipa2_reset_flt(ip))
-		IPAERR("fail to reset flt ip=%d\n", ip);
+		IPAERR_RL("fail to reset flt ip=%d\n", ip);
 
 	set = &ipa_ctx->rt_tbl_set[ip];
 	rset = &ipa_ctx->reap_rt_tbl_set[ip];
@@ -1357,7 +1357,7 @@
 	int result = -EFAULT;
 
 	if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 	mutex_lock(&ipa_ctx->lock);
@@ -1368,7 +1368,7 @@
 
 		/* commit for get */
 		if (ipa_ctx->ctrl->ipa_commit_rt(lookup->ip))
-			IPAERR("fail to commit RT tbl\n");
+			IPAERR_RL("fail to commit RT tbl\n");
 
 		result = 0;
 	}
@@ -1394,13 +1394,13 @@
 	mutex_lock(&ipa_ctx->lock);
 	entry = ipa_id_find(rt_tbl_hdl);
 	if (entry == NULL) {
-		IPAERR("lookup failed\n");
+		IPAERR_RL("lookup failed\n");
 		result = -EINVAL;
 		goto ret;
 	}
 
 	if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) {
-		IPAERR("bad parms\n");
+		IPAERR_RL("bad parms\n");
 		result = -EINVAL;
 		goto ret;
 	}
@@ -1418,10 +1418,10 @@
 	entry->ref_cnt--;
 	if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
 		if (__ipa_del_rt_tbl(entry))
-			IPAERR("fail to del RT tbl\n");
+			IPAERR_RL("fail to del RT tbl\n");
 		/* commit for put */
 		if (ipa_ctx->ctrl->ipa_commit_rt(ip))
-			IPAERR("fail to commit RT tbl\n");
+			IPAERR_RL("fail to commit RT tbl\n");
 	}
 
 	result = 0;
@@ -1441,19 +1441,19 @@
 	if (rtrule->rule.hdr_hdl) {
 		hdr = ipa_id_find(rtrule->rule.hdr_hdl);
 		if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
-			IPAERR("rt rule does not point to valid hdr\n");
+			IPAERR_RL("rt rule does not point to valid hdr\n");
 			goto error;
 		}
 	}
 
 	entry = ipa_id_find(rtrule->rt_rule_hdl);
 	if (entry == NULL) {
-		IPAERR("lookup failed\n");
+		IPAERR_RL("lookup failed\n");
 		goto error;
 	}
 
 	if (entry->cookie != IPA_RT_RULE_COOKIE) {
-		IPAERR("bad params\n");
+		IPAERR_RL("bad params\n");
 		goto error;
 	}
 
@@ -1486,14 +1486,14 @@
 	int result;
 
 	if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
 	mutex_lock(&ipa_ctx->lock);
 	for (i = 0; i < hdls->num_rules; i++) {
 		if (__ipa_mdfy_rt_rule(&hdls->rules[i])) {
-			IPAERR("failed to mdfy rt rule %i\n", i);
+			IPAERR_RL("failed to mdfy rt rule %i\n", i);
 			hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
 		} else {
 			hdls->rules[i].status = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index b7815cb..a454382 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -1673,7 +1673,7 @@
 
 	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
 	    ipa_ctx->ep[clnt_hdl].valid == 0) {
-		IPAERR("bad parm, %d\n", clnt_hdl);
+		IPAERR_RL("bad parm, %d\n", clnt_hdl);
 		return -EINVAL;
 	}
 
@@ -1686,7 +1686,7 @@
 	ep = &ipa_ctx->ep[clnt_hdl];
 
 	if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
-		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		IPAERR_RL("WDI channel bad state %d\n", ep->uc_offload_state);
 		return -EFAULT;
 	}
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index b133f9c..2c88244 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -847,7 +847,7 @@
 	}
 
 	if (client >= IPA_CLIENT_MAX || client < 0) {
-		IPAERR("Bad client number! client =%d\n", client);
+		IPAERR_RL("Bad client number! client =%d\n", client);
 		return INVALID_EP_MAPPING_INDEX;
 	}
 
@@ -1700,7 +1700,7 @@
 		if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
 		    attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
 		    IPA_FLT_FLOW_LABEL) {
-			IPAERR("v6 attrib's specified for v4 rule\n");
+			IPAERR_RL("v6 attrib's specified for v4 rule\n");
 			return -EPERM;
 		}
 
@@ -1712,7 +1712,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
 			if (ipa_ofst_meq32[ofst_meq32] == -1) {
-				IPAERR("ran out of meq32 eq\n");
+				IPAERR_RL("ran out of meq32 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -1732,7 +1732,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
 			if (ipa_ofst_meq32[ofst_meq32] == -1) {
-				IPAERR("ran out of meq32 eq\n");
+				IPAERR_RL("ran out of meq32 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -1746,7 +1746,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
 			if (ipa_ofst_meq32[ofst_meq32] == -1) {
-				IPAERR("ran out of meq32 eq\n");
+				IPAERR_RL("ran out of meq32 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -1760,11 +1760,11 @@
 
 		if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
 			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
-				IPAERR("ran out of ihl_rng16 eq\n");
+				IPAERR_RL("ran out of ihl_rng16 eq\n");
 				return -EPERM;
 			}
 			if (attrib->src_port_hi < attrib->src_port_lo) {
-				IPAERR("bad src port range param\n");
+				IPAERR_RL("bad src port range param\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1778,11 +1778,11 @@
 
 		if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
 			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
-				IPAERR("ran out of ihl_rng16 eq\n");
+				IPAERR_RL("ran out of ihl_rng16 eq\n");
 				return -EPERM;
 			}
 			if (attrib->dst_port_hi < attrib->dst_port_lo) {
-				IPAERR("bad dst port range param\n");
+				IPAERR_RL("bad dst port range param\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1796,7 +1796,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_TYPE) {
 			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
-				IPAERR("ran out of ihl_meq32 eq\n");
+				IPAERR_RL("ran out of ihl_meq32 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1809,7 +1809,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_CODE) {
 			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
-				IPAERR("ran out of ihl_meq32 eq\n");
+				IPAERR_RL("ran out of ihl_meq32 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1822,7 +1822,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_SPI) {
 			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
-				IPAERR("ran out of ihl_meq32 eq\n");
+				IPAERR_RL("ran out of ihl_meq32 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1836,7 +1836,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
 			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
-				IPAERR("ran out of ihl_rng16 eq\n");
+				IPAERR_RL("ran out of ihl_rng16 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1850,7 +1850,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
 			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
-				IPAERR("ran out of ihl_rng16 eq\n");
+				IPAERR_RL("ran out of ihl_rng16 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1877,7 +1877,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
 			if (ipa_ofst_meq128[ofst_meq128] == -1) {
-				IPAERR("ran out of meq128 eq\n");
+				IPAERR_RL("ran out of meq128 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -1892,7 +1892,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
 			if (ipa_ofst_meq128[ofst_meq128] == -1) {
-				IPAERR("ran out of meq128 eq\n");
+				IPAERR_RL("ran out of meq128 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -1907,7 +1907,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
 			if (ipa_ofst_meq128[ofst_meq128] == -1) {
-				IPAERR("ran out of meq128 eq\n");
+				IPAERR_RL("ran out of meq128 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -1922,7 +1922,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
 			if (ipa_ofst_meq128[ofst_meq128] == -1) {
-				IPAERR("ran out of meq128 eq\n");
+				IPAERR_RL("ran out of meq128 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -1937,7 +1937,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
 			if (ipa_ofst_meq32[ofst_meq32] == -1) {
-				IPAERR("ran out of meq128 eq\n");
+				IPAERR_RL("ran out of meq128 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -1955,7 +1955,7 @@
 		/* error check */
 		if (attrib->attrib_mask & IPA_FLT_TOS ||
 		    attrib->attrib_mask & IPA_FLT_PROTOCOL) {
-			IPAERR("v4 attrib's specified for v6 rule\n");
+			IPAERR_RL("v4 attrib's specified for v6 rule\n");
 			return -EPERM;
 		}
 
@@ -1967,7 +1967,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_TYPE) {
 			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
-				IPAERR("ran out of ihl_meq32 eq\n");
+				IPAERR_RL("ran out of ihl_meq32 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1980,7 +1980,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_CODE) {
 			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
-				IPAERR("ran out of ihl_meq32 eq\n");
+				IPAERR_RL("ran out of ihl_meq32 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1993,7 +1993,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_SPI) {
 			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
-				IPAERR("ran out of ihl_meq32 eq\n");
+				IPAERR_RL("ran out of ihl_meq32 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -2007,7 +2007,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
 			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
-				IPAERR("ran out of ihl_rng16 eq\n");
+				IPAERR_RL("ran out of ihl_rng16 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2021,7 +2021,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
 			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
-				IPAERR("ran out of ihl_rng16 eq\n");
+				IPAERR_RL("ran out of ihl_rng16 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2035,11 +2035,11 @@
 
 		if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
 			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
-				IPAERR("ran out of ihl_rng16 eq\n");
+				IPAERR_RL("ran out of ihl_rng16 eq\n");
 				return -EPERM;
 			}
 			if (attrib->src_port_hi < attrib->src_port_lo) {
-				IPAERR("bad src port range param\n");
+				IPAERR_RL("bad src port range param\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2053,11 +2053,11 @@
 
 		if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
 			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
-				IPAERR("ran out of ihl_rng16 eq\n");
+				IPAERR_RL("ran out of ihl_rng16 eq\n");
 				return -EPERM;
 			}
 			if (attrib->dst_port_hi < attrib->dst_port_lo) {
-				IPAERR("bad dst port range param\n");
+				IPAERR_RL("bad dst port range param\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2071,7 +2071,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
 			if (ipa_ofst_meq128[ofst_meq128] == -1) {
-				IPAERR("ran out of meq128 eq\n");
+				IPAERR_RL("ran out of meq128 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2097,7 +2097,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
 			if (ipa_ofst_meq128[ofst_meq128] == -1) {
-				IPAERR("ran out of meq128 eq\n");
+				IPAERR_RL("ran out of meq128 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2129,7 +2129,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
 			if (ipa_ofst_meq128[ofst_meq128] == -1) {
-				IPAERR("ran out of meq128 eq\n");
+				IPAERR_RL("ran out of meq128 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2174,7 +2174,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
 			if (ipa_ofst_meq128[ofst_meq128] == -1) {
-				IPAERR("ran out of meq128 eq\n");
+				IPAERR_RL("ran out of meq128 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2189,7 +2189,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
 			if (ipa_ofst_meq128[ofst_meq128] == -1) {
-				IPAERR("ran out of meq128 eq\n");
+				IPAERR_RL("ran out of meq128 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2204,7 +2204,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
 			if (ipa_ofst_meq128[ofst_meq128] == -1) {
-				IPAERR("ran out of meq128 eq\n");
+				IPAERR_RL("ran out of meq128 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2219,7 +2219,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
 			if (ipa_ofst_meq128[ofst_meq128] == -1) {
-				IPAERR("ran out of meq128 eq\n");
+				IPAERR_RL("ran out of meq128 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2234,7 +2234,7 @@
 
 		if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
 			if (ipa_ofst_meq32[ofst_meq32] == -1) {
-				IPAERR("ran out of meq128 eq\n");
+				IPAERR_RL("ran out of meq128 eq\n");
 				return -EPERM;
 			}
 			*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -2247,7 +2247,7 @@
 		}
 
 	} else {
-		IPAERR("unsupported ip %d\n", ip);
+		IPAERR_RL("unsupported ip %d\n", ip);
 		return -EPERM;
 	}
 
@@ -2257,7 +2257,7 @@
 	 */
 	if (attrib->attrib_mask == 0) {
 		if (ipa_ofst_meq32[ofst_meq32] == -1) {
-			IPAERR("ran out of meq32 eq\n");
+			IPAERR_RL("ran out of meq32 eq\n");
 			return -EPERM;
 		}
 		*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -3548,19 +3548,19 @@
 	}
 
 	if (param_in->client  >= IPA_CLIENT_MAX) {
-		IPAERR("bad parm client:%d\n", param_in->client);
+		IPAERR_RL("bad parm client:%d\n", param_in->client);
 		goto fail;
 	}
 
 	ipa_ep_idx = ipa2_get_ep_mapping(param_in->client);
 	if (ipa_ep_idx == -1) {
-		IPAERR("Invalid client.\n");
+		IPAERR_RL("Invalid client.\n");
 		goto fail;
 	}
 
 	ep = &ipa_ctx->ep[ipa_ep_idx];
 	if (!ep->valid) {
-		IPAERR("EP not allocated.\n");
+		IPAERR_RL("EP not allocated.\n");
 		goto fail;
 	}
 
@@ -3574,7 +3574,7 @@
 		ipa_ctx->ep[ipa_ep_idx].cfg.meta = meta;
 		result = ipa_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
 		if (result)
-			IPAERR("qmap_id %d write failed on ep=%d\n",
+			IPAERR_RL("qmap_id %d write failed on ep=%d\n",
 					meta.qmap_id, ipa_ep_idx);
 		result = 0;
 	}
diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile
index a4faaea..e3f8d45 100644
--- a/drivers/platform/msm/ipa/ipa_v3/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v3/Makefile
@@ -3,6 +3,7 @@
 obj-$(CONFIG_IPA3) += ipat.o
 ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
 	ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
-	ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o
+	ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o \
+	ipa_hw_stats.o
 
 obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 4b056f6c..97f2668 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -266,7 +266,9 @@
 	int cnt = 0;
 	int start_idx;
 	int end_idx;
+	unsigned long flags;
 
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
 	start_idx = (ipa3_ctx->ipa3_active_clients_logging.log_tail + 1) %
 			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
 	end_idx = ipa3_ctx->ipa3_active_clients_logging.log_head;
@@ -277,6 +279,8 @@
 				.log_buffer[i]);
 		cnt += nbytes;
 	}
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+		flags);
 
 	return cnt;
 }
@@ -286,7 +290,9 @@
 	int i;
 	struct ipa3_active_client_htable_entry *iterator;
 	int cnt = 0;
+	unsigned long flags;
 
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
 	cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
 	hash_for_each(ipa3_ctx->ipa3_active_clients_logging.htable, i,
 			iterator, list) {
@@ -319,6 +325,8 @@
 	cnt += scnprintf(buf + cnt, size - cnt,
 			"\nTotal active clients count: %d\n",
 			atomic_read(&ipa3_ctx->ipa3_active_clients.cnt));
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+		flags);
 
 	return cnt;
 }
@@ -368,6 +376,7 @@
 {
 	int i;
 
+	spin_lock_init(&ipa3_ctx->ipa3_active_clients_logging.lock);
 	ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] = kzalloc(
 			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES *
 			sizeof(char[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]),
@@ -399,20 +408,28 @@
 
 void ipa3_active_clients_log_clear(void)
 {
-	mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
 	ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
 	ipa3_ctx->ipa3_active_clients_logging.log_tail =
 			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
-	mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+		flags);
 }
 
 static void ipa3_active_clients_log_destroy(void)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
 	ipa3_ctx->ipa3_active_clients_logging.log_rdy = 0;
 	kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]);
 	ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
 	ipa3_ctx->ipa3_active_clients_logging.log_tail =
 			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+		flags);
 }
 
 enum ipa_smmu_cb_type {
@@ -542,7 +559,7 @@
 	msg_meta.msg_len = sizeof(struct ipa_wan_msg);
 	retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb);
 	if (retval) {
-		IPAERR("ipa3_send_msg failed: %d\n", retval);
+		IPAERR_RL("ipa3_send_msg failed: %d\n", retval);
 		kfree(wan_msg);
 		return retval;
 	}
@@ -719,7 +736,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_nat_dma_cmd *)param)->entries,
 				pre_entry);
 			retval = -EFAULT;
@@ -778,7 +795,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_add_hdr *)param)->num_hdrs,
 				pre_entry);
 			retval = -EFAULT;
@@ -817,7 +834,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_del_hdr *)param)->num_hdls,
 				pre_entry);
 			retval = -EFAULT;
@@ -857,7 +874,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_add_rt_rule *)param)->
 				num_rules,
 				pre_entry);
@@ -897,7 +914,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)->
 			num_rules != pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_add_rt_rule_after *)param)->
 				num_rules,
 				pre_entry);
@@ -939,7 +956,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_mdfy_rt_rule *)param)->
 				num_rules,
 				pre_entry);
@@ -979,7 +996,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
 				pre_entry);
 			retval = -EFAULT;
@@ -1018,7 +1035,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_add_flt_rule *)param)->
 				num_rules,
 				pre_entry);
@@ -1060,7 +1077,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)->
 			num_rules != pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_add_flt_rule_after *)param)->
 				num_rules,
 				pre_entry);
@@ -1101,7 +1118,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_del_flt_rule *)param)->
 				num_hdls,
 				pre_entry);
@@ -1141,7 +1158,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_mdfy_flt_rule *)param)->
 				num_rules,
 				pre_entry);
@@ -1279,7 +1296,7 @@
 		if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
 			param)->num_tx_props
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_query_intf_tx_props *)
 				param)->num_tx_props, pre_entry);
 			retval = -EFAULT;
@@ -1324,7 +1341,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
 			param)->num_rx_props != pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_query_intf_rx_props *)
 				param)->num_rx_props, pre_entry);
 			retval = -EFAULT;
@@ -1369,7 +1386,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
 			param)->num_ext_props != pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_query_intf_ext_props *)
 				param)->num_ext_props, pre_entry);
 			retval = -EFAULT;
@@ -1407,7 +1424,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_msg_meta *)param)->msg_len
 			!= pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_msg_meta *)param)->msg_len,
 				pre_entry);
 			retval = -EFAULT;
@@ -1547,7 +1564,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
 			param)->num_proc_ctxs != pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_add_hdr_proc_ctx *)
 				param)->num_proc_ctxs, pre_entry);
 			retval = -EFAULT;
@@ -1586,7 +1603,7 @@
 		/* add check in case user-space module compromised */
 		if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
 			param)->num_hdls != pre_entry)) {
-			IPAERR("current %d pre %d\n",
+			IPAERR_RL("current %d pre %d\n",
 				((struct ipa_ioc_del_hdr_proc_ctx *)param)->
 				num_hdls,
 				pre_entry);
@@ -3402,7 +3419,10 @@
 	struct ipa3_active_client_htable_entry *hfound;
 	u32 hkey;
 	char str_to_hash[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
+	unsigned long flags;
 
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients_logging.lock, flags);
+	int_ctx = true;
 	hfound = NULL;
 	memset(str_to_hash, 0, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
 	strlcpy(str_to_hash, id->id_string, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
@@ -3422,6 +3442,9 @@
 				int_ctx ? GFP_ATOMIC : GFP_KERNEL);
 		if (hentry == NULL) {
 			IPAERR("failed allocating active clients hash entry");
+			spin_unlock_irqrestore(
+				&ipa3_ctx->ipa3_active_clients_logging.lock,
+				flags);
 			return;
 		}
 		hentry->type = id->type;
@@ -3446,6 +3469,8 @@
 				id->id_string, id->file, id->line);
 		ipa3_active_clients_log_insert(temp_str);
 	}
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients_logging.lock,
+		flags);
 }
 
 void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
@@ -3922,14 +3947,17 @@
 	int i;
 	struct ipa3_flt_tbl *flt_tbl;
 
+	idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v4]);
+	idr_destroy(&ipa3_ctx->flt_rule_ids[IPA_IP_v6]);
+
 	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
 		if (!ipa_is_ep_support_flt(i))
 			continue;
 
 		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
-		idr_destroy(&flt_tbl->rule_ids);
+		flt_tbl->rule_ids = NULL;
 		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
-		idr_destroy(&flt_tbl->rule_ids);
+		flt_tbl->rule_ids = NULL;
 	}
 }
 
@@ -4104,6 +4132,7 @@
 	struct ipa3_uc_hdlrs uc_hdlrs = { 0 };
 	struct ipa3_flt_tbl *flt_tbl;
 	int i;
+	struct idr *idr;
 
 	if (ipa3_ctx == NULL) {
 		IPADBG("IPA driver haven't initialized\n");
@@ -4127,6 +4156,11 @@
 	/* Assign resource limitation to each group */
 	ipa3_set_resorce_groups_min_max_limits();
 
+	idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v4]);
+	idr_init(idr);
+	idr = &(ipa3_ctx->flt_rule_ids[IPA_IP_v6]);
+	idr_init(idr);
+
 	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
 		if (!ipa_is_ep_support_flt(i))
 			continue;
@@ -4137,7 +4171,7 @@
 			!ipa3_ctx->ip4_flt_tbl_hash_lcl;
 		flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
 			!ipa3_ctx->ip4_flt_tbl_nhash_lcl;
-		idr_init(&flt_tbl->rule_ids);
+		flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v4];
 
 		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
 		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
@@ -4145,7 +4179,7 @@
 			!ipa3_ctx->ip6_flt_tbl_hash_lcl;
 		flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
 			!ipa3_ctx->ip6_flt_tbl_nhash_lcl;
-		idr_init(&flt_tbl->rule_ids);
+		flt_tbl->rule_ids = &ipa3_ctx->flt_rule_ids[IPA_IP_v6];
 	}
 
 	if (!ipa3_ctx->apply_rg10_wa) {
@@ -4230,6 +4264,12 @@
 	else
 		IPADBG(":ntn init ok\n");
 
+	result = ipa_hw_stats_init();
+	if (result)
+		IPAERR("fail to init stats %d\n", result);
+	else
+		IPADBG(":stats init ok\n");
+
 	ipa3_register_panic_hdlr();
 
 	ipa3_ctx->q6_proxy_clk_vote_valid = true;
@@ -4790,12 +4830,16 @@
 				hdr_proc_ctx_tbl.head_free_offset_list[i]);
 	}
 	INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
+	idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids);
 	INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
+	idr_init(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids);
 
 	rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
 	INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+	idr_init(&rset->rule_ids);
 	rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
 	INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+	idr_init(&rset->rule_ids);
 
 	INIT_LIST_HEAD(&ipa3_ctx->intf_list);
 	INIT_LIST_HEAD(&ipa3_ctx->msg_list);
@@ -4915,6 +4959,12 @@
 fail_device_create:
 	unregister_chrdev_region(ipa3_ctx->dev_num, 1);
 fail_alloc_chrdev_region:
+	rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
+	idr_destroy(&rset->rule_ids);
+	rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
+	idr_destroy(&rset->rule_ids);
+	idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].rule_ids);
+	idr_destroy(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].rule_ids);
 	ipa3_free_dma_task_for_gsi();
 fail_dma_task:
 	idr_destroy(&ipa3_ctx->ipa_idr);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index f172dc4..9486b0a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -2169,6 +2169,8 @@
 		goto fail;
 	}
 
+	ipa_debugfs_init_stats(dent);
+
 	return;
 
 fail:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index b1e50ac..beca549 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -806,7 +806,7 @@
 	if (rule->rule_id) {
 		id = rule->rule_id;
 	} else {
-		id = ipa3_alloc_rule_id(&tbl->rule_ids);
+		id = ipa3_alloc_rule_id(tbl->rule_ids);
 		if (id < 0) {
 			IPAERR("failed to allocate rule id\n");
 			WARN_ON(1);
@@ -880,7 +880,7 @@
 	list_del(&entry->link);
 	/* if rule id was allocated from idr, remove it */
 	if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
-		idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+		idr_remove(entry->tbl->rule_ids, entry->rule_id);
 	kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
 
 error:
@@ -900,7 +900,7 @@
 		goto error;
 
 	if (rule == NULL || rule_hdl == NULL) {
-		IPAERR("bad parms rule=%p rule_hdl=%p\n", rule,
+		IPAERR_RL("bad parms rule=%p rule_hdl=%p\n", rule,
 				rule_hdl);
 		goto error;
 	}
@@ -927,7 +927,7 @@
 	list_del(&entry->link);
 	/* if rule id was allocated from idr, remove it */
 	if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
-		idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+		idr_remove(entry->tbl->rule_ids, entry->rule_id);
 	kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
 
 error:
@@ -942,12 +942,12 @@
 
 	entry = ipa3_id_find(rule_hdl);
 	if (entry == NULL) {
-		IPAERR("lookup failed\n");
+		IPAERR_RL("lookup failed\n");
 		return -EINVAL;
 	}
 
 	if (entry->cookie != IPA_FLT_COOKIE) {
-		IPAERR("bad params\n");
+		IPAERR_RL("bad params\n");
 		return -EINVAL;
 	}
 	id = entry->id;
@@ -961,7 +961,7 @@
 	entry->cookie = 0;
 	/* if rule id was allocated from idr, remove it */
 	if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
-		idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+		idr_remove(entry->tbl->rule_ids, entry->rule_id);
 
 	kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
 
@@ -979,12 +979,12 @@
 
 	entry = ipa3_id_find(frule->rule_hdl);
 	if (entry == NULL) {
-		IPAERR("lookup failed\n");
+		IPAERR_RL("lookup failed\n");
 		goto error;
 	}
 
 	if (entry->cookie != IPA_FLT_COOKIE) {
-		IPAERR("bad params\n");
+		IPAERR_RL("bad params\n");
 		goto error;
 	}
 
@@ -994,25 +994,25 @@
 	if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
 		if (!frule->rule.eq_attrib_type) {
 			if (!frule->rule.rt_tbl_hdl) {
-				IPAERR("invalid RT tbl\n");
+				IPAERR_RL("invalid RT tbl\n");
 				goto error;
 			}
 
 			rt_tbl = ipa3_id_find(frule->rule.rt_tbl_hdl);
 			if (rt_tbl == NULL) {
-				IPAERR("RT tbl not found\n");
+				IPAERR_RL("RT tbl not found\n");
 				goto error;
 			}
 
 			if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) {
-				IPAERR("RT table cookie is invalid\n");
+				IPAERR_RL("RT table cookie is invalid\n");
 				goto error;
 			}
 		} else {
 			if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
 				IPA_MEM_PART(v4_modem_rt_index_hi) :
 				IPA_MEM_PART(v6_modem_rt_index_hi))) {
-				IPAERR("invalid RT tbl\n");
+				IPAERR_RL("invalid RT tbl\n");
 				goto error;
 			}
 		}
@@ -1057,7 +1057,7 @@
 	int ipa_ep_idx;
 
 	if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
-		IPAERR("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
+		IPAERR_RL("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
 				rule_hdl, ep);
 
 		return -EINVAL;
@@ -1087,7 +1087,7 @@
 
 	if (rules == NULL || rules->num_rules == 0 ||
 			rules->ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -1102,7 +1102,7 @@
 			result = -1;
 
 		if (result) {
-			IPAERR("failed to add flt rule %d\n", i);
+			IPAERR_RL("failed to add flt rule %d\n", i);
 			rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
 		} else {
 			rules->rules[i].status = 0;
@@ -1110,7 +1110,7 @@
 	}
 
 	if (rules->global) {
-		IPAERR("no support for global filter rules\n");
+		IPAERR_RL("no support for global filter rules\n");
 		result = -EPERM;
 		goto bail;
 	}
@@ -1145,12 +1145,12 @@
 
 	if (rules == NULL || rules->num_rules == 0 ||
 			rules->ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
 	if (rules->ep >= IPA_CLIENT_MAX) {
-		IPAERR("bad parms ep=%d\n", rules->ep);
+		IPAERR_RL("bad parms ep=%d\n", rules->ep);
 		return -EINVAL;
 	}
 
@@ -1165,20 +1165,20 @@
 
 	entry = ipa3_id_find(rules->add_after_hdl);
 	if (entry == NULL) {
-		IPAERR("lookup failed\n");
+		IPAERR_RL("lookup failed\n");
 		result = -EINVAL;
 		goto bail;
 	}
 
 	if (entry->tbl != tbl) {
-		IPAERR("given entry does not match the table\n");
+		IPAERR_RL("given entry does not match the table\n");
 		result = -EINVAL;
 		goto bail;
 	}
 
 	if (tbl->sticky_rear)
 		if (&entry->link == tbl->head_flt_rule_list.prev) {
-			IPAERR("cannot add rule at end of a sticky table");
+			IPAERR_RL("cannot add rule at end of a sticky table");
 			result = -EINVAL;
 			goto bail;
 		}
@@ -1200,7 +1200,7 @@
 				&entry);
 
 		if (result) {
-			IPAERR("failed to add flt rule %d\n", i);
+			IPAERR_RL("failed to add flt rule %d\n", i);
 			rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
 		} else {
 			rules->rules[i].status = 0;
@@ -1234,14 +1234,14 @@
 	int result;
 
 	if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
 	mutex_lock(&ipa3_ctx->lock);
 	for (i = 0; i < hdls->num_hdls; i++) {
 		if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
-			IPAERR("failed to del flt rule %i\n", i);
+			IPAERR_RL("failed to del flt rule %i\n", i);
 			hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
 		} else {
 			hdls->hdl[i].status = 0;
@@ -1274,14 +1274,14 @@
 	int result;
 
 	if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
 	mutex_lock(&ipa3_ctx->lock);
 	for (i = 0; i < hdls->num_rules; i++) {
 		if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
-			IPAERR("failed to mdfy flt rule %i\n", i);
+			IPAERR_RL("failed to mdfy flt rule %i\n", i);
 			hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
 		} else {
 			hdls->rules[i].status = 0;
@@ -1315,7 +1315,7 @@
 	int result;
 
 	if (ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -1351,7 +1351,7 @@
 	int id;
 
 	if (ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -1374,7 +1374,7 @@
 				entry->rt_tbl->ref_cnt--;
 			/* if rule id was allocated from idr, remove it */
 			if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
-				idr_remove(&entry->tbl->rule_ids,
+				idr_remove(entry->tbl->rule_ids,
 					entry->rule_id);
 			entry->cookie = 0;
 			id = entry->id;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 0c1832c..da7bcd0 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -327,17 +327,17 @@
 		proc_ctx->type, proc_ctx->hdr_hdl);
 
 	if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
-		IPAERR("invalid processing type %d\n", proc_ctx->type);
+		IPAERR_RL("invalid processing type %d\n", proc_ctx->type);
 		return -EINVAL;
 	}
 
 	hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
 	if (!hdr_entry) {
-		IPAERR("hdr_hdl is invalid\n");
+		IPAERR_RL("hdr_hdl is invalid\n");
 		return -EINVAL;
 	}
 	if (hdr_entry->cookie != IPA_HDR_COOKIE) {
-		IPAERR("Invalid header cookie %u\n", hdr_entry->cookie);
+		IPAERR_RL("Invalid header cookie %u\n", hdr_entry->cookie);
 		WARN_ON(1);
 		return -EINVAL;
 	}
@@ -367,7 +367,7 @@
 			ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
 		bin = IPA_HDR_PROC_CTX_BIN1;
 	} else {
-		IPAERR("unexpected needed len %d\n", needed_len);
+		IPAERR_RL("unexpected needed len %d\n", needed_len);
 		WARN_ON(1);
 		goto bad_len;
 	}
@@ -377,7 +377,7 @@
 		IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
 	if (list_empty(&htbl->head_free_offset_list[bin])) {
 		if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
-			IPAERR("hdr proc ctx table overflow\n");
+			IPAERR_RL("hdr proc ctx table overflow\n");
 			goto bad_len;
 		}
 
@@ -450,12 +450,12 @@
 	int mem_size;
 
 	if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		goto error;
 	}
 
 	if (!HDR_TYPE_IS_VALID(hdr->type)) {
-		IPAERR("invalid hdr type %d\n", hdr->type);
+		IPAERR_RL("invalid hdr type %d\n", hdr->type);
 		goto error;
 	}
 
@@ -487,7 +487,7 @@
 	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
 		bin = IPA_HDR_BIN4;
 	else {
-		IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+		IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len);
 		goto bad_hdr_len;
 	}
 
@@ -609,7 +609,7 @@
 
 	entry = ipa3_id_find(proc_ctx_hdl);
 	if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -617,7 +617,7 @@
 		htbl->proc_ctx_cnt, entry->offset_entry->offset);
 
 	if (by_user && entry->user_deleted) {
-		IPAERR("proc_ctx already deleted by user\n");
+		IPAERR_RL("proc_ctx already deleted by user\n");
 		return -EINVAL;
 	}
 
@@ -655,12 +655,12 @@
 
 	entry = ipa3_id_find(hdr_hdl);
 	if (entry == NULL) {
-		IPAERR("lookup failed\n");
+		IPAERR_RL("lookup failed\n");
 		return -EINVAL;
 	}
 
 	if (entry->cookie != IPA_HDR_COOKIE) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -673,7 +673,7 @@
 			entry->offset_entry->offset);
 
 	if (by_user && entry->user_deleted) {
-		IPAERR("proc_ctx already deleted by user\n");
+		IPAERR_RL("proc_ctx already deleted by user\n");
 		return -EINVAL;
 	}
 
@@ -722,7 +722,7 @@
 	int result = -EFAULT;
 
 	if (hdrs == NULL || hdrs->num_hdrs == 0) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -731,7 +731,7 @@
 			hdrs->num_hdrs);
 	for (i = 0; i < hdrs->num_hdrs; i++) {
 		if (__ipa_add_hdr(&hdrs->hdr[i])) {
-			IPAERR("failed to add hdr %d\n", i);
+			IPAERR_RL("failed to add hdr %d\n", i);
 			hdrs->hdr[i].status = -1;
 		} else {
 			hdrs->hdr[i].status = 0;
@@ -767,14 +767,14 @@
 	int result = -EFAULT;
 
 	if (hdls == NULL || hdls->num_hdls == 0) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
 	mutex_lock(&ipa3_ctx->lock);
 	for (i = 0; i < hdls->num_hdls; i++) {
 		if (__ipa3_del_hdr(hdls->hdl[i].hdl, by_user)) {
-			IPAERR("failed to del hdr %i\n", i);
+			IPAERR_RL("failed to del hdr %i\n", i);
 			hdls->hdl[i].status = -1;
 		} else {
 			hdls->hdl[i].status = 0;
@@ -822,7 +822,7 @@
 	int result = -EFAULT;
 
 	if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -831,7 +831,7 @@
 			proc_ctxs->num_proc_ctxs);
 	for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
 		if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
-			IPAERR("failed to add hdr pric ctx %d\n", i);
+			IPAERR_RL("failed to add hdr pric ctx %d\n", i);
 			proc_ctxs->proc_ctx[i].status = -1;
 		} else {
 			proc_ctxs->proc_ctx[i].status = 0;
@@ -869,14 +869,14 @@
 	int result;
 
 	if (hdls == NULL || hdls->num_hdls == 0) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
 	mutex_lock(&ipa3_ctx->lock);
 	for (i = 0; i < hdls->num_hdls; i++) {
 		if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) {
-			IPAERR("failed to del hdr %i\n", i);
+			IPAERR_RL("failed to del hdr %i\n", i);
 			hdls->hdl[i].status = -1;
 		} else {
 			hdls->hdl[i].status = 0;
@@ -1083,7 +1083,7 @@
 	struct ipa3_hdr_entry *entry;
 
 	if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
-		IPAERR("Header name too long: %s\n", name);
+		IPAERR_RL("Header name too long: %s\n", name);
 		return NULL;
 	}
 
@@ -1113,7 +1113,7 @@
 	int result = -1;
 
 	if (lookup == NULL) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 	mutex_lock(&ipa3_ctx->lock);
@@ -1200,13 +1200,13 @@
 
 	entry = ipa3_id_find(hdr_hdl);
 	if (entry == NULL) {
-		IPAERR("lookup failed\n");
+		IPAERR_RL("lookup failed\n");
 		result = -EINVAL;
 		goto bail;
 	}
 
 	if (entry->cookie != IPA_HDR_COOKIE) {
-		IPAERR("invalid header entry\n");
+		IPAERR_RL("invalid header entry\n");
 		result = -EINVAL;
 		goto bail;
 	}
@@ -1235,7 +1235,7 @@
 	int result = -EFAULT;
 
 	if (copy == NULL) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 	mutex_lock(&ipa3_ctx->lock);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
new file mode 100644
index 0000000..d8785ed
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
@@ -0,0 +1,1973 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_hw_stats.h"
+
+#define IPA_CLIENT_BIT_32(client) \
+	((ipa3_get_ep_mapping(client) >= 0 && \
+		ipa3_get_ep_mapping(client) < IPA_STATS_MAX_PIPE_BIT) ? \
+		(1 << ipa3_get_ep_mapping(client)) : 0)
+
+int ipa_hw_stats_init(void)
+{
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
+		return 0;
+
+	/* initialize stats here */
+	ipa3_ctx->hw_stats.enabled = true;
+	return 0;
+}
+
+int ipa_init_quota_stats(u32 pipe_bitmask)
+{
+	struct ipahal_stats_init_pyld *pyld;
+	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_imm_cmd_register_write quota_base = {0};
+	struct ipahal_imm_cmd_pyld *quota_base_pyld;
+	struct ipahal_imm_cmd_register_write quota_mask = {0};
+	struct ipahal_imm_cmd_pyld *quota_mask_pyld;
+	struct ipa3_desc desc[3] = { {0} };
+	dma_addr_t dma_address;
+	int ret;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	/* reset driver's cache */
+	memset(&ipa3_ctx->hw_stats.quota, 0, sizeof(ipa3_ctx->hw_stats.quota));
+	ipa3_ctx->hw_stats.quota.init.enabled_bitmask = pipe_bitmask;
+	IPADBG_LOW("pipe_bitmask=0x%x\n", pipe_bitmask);
+
+	pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_QUOTA,
+		&ipa3_ctx->hw_stats.quota.init, false);
+	if (!pyld) {
+		IPAERR("failed to generate pyld\n");
+		return -EPERM;
+	}
+
+	if (pyld->len > IPA_MEM_PART(stats_quota_size)) {
+		IPAERR("SRAM partition too small: %d needed %d\n",
+			IPA_MEM_PART(stats_quota_size), pyld->len);
+		ret = -EPERM;
+		goto destroy_init_pyld;
+	}
+
+	dma_address = dma_map_single(ipa3_ctx->pdev,
+		pyld->data,
+		pyld->len,
+		DMA_TO_DEVICE);
+	if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) {
+		IPAERR("failed to DMA map\n");
+		ret = -EPERM;
+		goto destroy_init_pyld;
+	}
+
+	/* setting the registers and init the stats pyld are done atomically */
+	quota_mask.skip_pipeline_clear = false;
+	quota_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	quota_mask.offset = ipahal_get_reg_n_ofst(IPA_STAT_QUOTA_MASK_n,
+		ipa3_ctx->ee);
+	quota_mask.value = pipe_bitmask;
+	quota_mask.value_mask = ~0;
+	quota_mask_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&quota_mask, false);
+	if (!quota_mask_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto unmap;
+	}
+	desc[0].opcode = quota_mask_pyld->opcode;
+	desc[0].pyld = quota_mask_pyld->data;
+	desc[0].len = quota_mask_pyld->len;
+	desc[0].type = IPA_IMM_CMD_DESC;
+
+	quota_base.skip_pipeline_clear = false;
+	quota_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	quota_base.offset = ipahal_get_reg_n_ofst(IPA_STAT_QUOTA_BASE_n,
+		ipa3_ctx->ee);
+	quota_base.value = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(stats_quota_ofst);
+	quota_base.value_mask = ~0;
+	quota_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&quota_base, false);
+	if (!quota_base_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_quota_mask;
+	}
+	desc[1].opcode = quota_base_pyld->opcode;
+	desc[1].pyld = quota_base_pyld->data;
+	desc[1].len = quota_base_pyld->len;
+	desc[1].type = IPA_IMM_CMD_DESC;
+
+	cmd.is_read = false;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	cmd.size = pyld->len;
+	cmd.system_addr = dma_address;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(stats_quota_ofst);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_quota_base;
+	}
+	desc[2].opcode = cmd_pyld->opcode;
+	desc[2].pyld = cmd_pyld->data;
+	desc[2].len = cmd_pyld->len;
+	desc[2].type = IPA_IMM_CMD_DESC;
+
+	ret = ipa3_send_cmd(3, desc);
+	if (ret) {
+		IPAERR("failed to send immediate command (error %d)\n", ret);
+		goto destroy_imm;
+	}
+
+	ret = 0;
+
+destroy_imm:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+destroy_quota_base:
+	ipahal_destroy_imm_cmd(quota_base_pyld);
+destroy_quota_mask:
+	ipahal_destroy_imm_cmd(quota_mask_pyld);
+unmap:
+	dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
+destroy_init_pyld:
+	ipahal_destroy_stats_init_pyld(pyld);
+	return ret;
+}
+
+int ipa_get_quota_stats(struct ipa_quota_stats_all *out)
+{
+	int i;
+	int ret;
+	struct ipahal_stats_get_offset_quota get_offset = { { 0 } };
+	struct ipahal_stats_offset offset = { 0 };
+	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipa_mem_buffer mem;
+	struct ipa3_desc desc = { 0 };
+	struct ipahal_stats_quota_all *stats;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	get_offset.init = ipa3_ctx->hw_stats.quota.init;
+	ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_QUOTA, &get_offset,
+		&offset);
+	if (ret) {
+		IPAERR("failed to get offset from hal %d\n", ret);
+		return ret;
+	}
+
+	IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size);
+
+	mem.size = offset.size;
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
+		mem.size,
+		&mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA memory");
+		return ret;
+	}
+
+	cmd.is_read = true;
+	cmd.clear_after_read = true;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	cmd.size = mem.size;
+	cmd.system_addr = mem.phys_base;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(stats_quota_ofst) + offset.offset;
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		ret = -ENOMEM;
+		goto free_dma_mem;
+	}
+	desc.opcode = cmd_pyld->opcode;
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	desc.type = IPA_IMM_CMD_DESC;
+
+	ret = ipa3_send_cmd(1, &desc);
+	if (ret) {
+		IPAERR("failed to send immediate command (error %d)\n", ret);
+		goto destroy_imm;
+	}
+
+	stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+	if (!stats) {
+		IPADBG("failed to alloc memory\n");
+		ret = -ENOMEM;
+		goto destroy_imm;
+	}
+
+	ret = ipahal_parse_stats(IPAHAL_HW_STATS_QUOTA,
+		&ipa3_ctx->hw_stats.quota.init, mem.base, stats);
+	if (ret) {
+		IPAERR("failed to parse stats (error %d)\n", ret);
+		goto free_stats;
+	}
+
+	/*
+	 * update driver cache.
+	 * the stats were read from hardware with clear_after_read meaning
+	 * hardware stats are 0 now
+	 */
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		int ep_idx = ipa3_get_ep_mapping(i);
+
+		if (ep_idx == -1 || ep_idx >= IPA3_MAX_NUM_PIPES)
+			continue;
+
+		if (ipa3_ctx->ep[ep_idx].client != i)
+			continue;
+
+		ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv4_bytes +=
+			stats->stats[ep_idx].num_ipv4_bytes;
+		ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv4_pkts +=
+			stats->stats[ep_idx].num_ipv4_pkts;
+		ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv6_bytes +=
+			stats->stats[ep_idx].num_ipv6_bytes;
+		ipa3_ctx->hw_stats.quota.stats.client[i].num_ipv6_pkts +=
+			stats->stats[ep_idx].num_ipv6_pkts;
+	}
+
+	/* copy results to out parameter */
+	if (out)
+		*out = ipa3_ctx->hw_stats.quota.stats;
+	ret = 0;
+free_stats:
+	kfree(stats);
+destroy_imm:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+free_dma_mem:
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+	return ret;
+
+}
+
+int ipa_reset_quota_stats(enum ipa_client_type client)
+{
+	int ret;
+	struct ipa_quota_stats *stats;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	if (client >= IPA_CLIENT_MAX) {
+		IPAERR("invalid client %d\n", client);
+		return -EINVAL;
+	}
+
+	/* reading stats will reset them in hardware */
+	ret = ipa_get_quota_stats(NULL);
+	if (ret) {
+		IPAERR("ipa_get_quota_stats failed %d\n", ret);
+		return ret;
+	}
+
+	/* reset driver's cache */
+	stats = &ipa3_ctx->hw_stats.quota.stats.client[client];
+	memset(stats, 0, sizeof(*stats));
+	return 0;
+}
+
+int ipa_reset_all_quota_stats(void)
+{
+	int ret;
+	struct ipa_quota_stats_all *stats;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	/* reading stats will reset them in hardware */
+	ret = ipa_get_quota_stats(NULL);
+	if (ret) {
+		IPAERR("ipa_get_quota_stats failed %d\n", ret);
+		return ret;
+	}
+
+	/* reset driver's cache */
+	stats = &ipa3_ctx->hw_stats.quota.stats;
+	memset(stats, 0, sizeof(*stats));
+	return 0;
+}
+
+int ipa_init_teth_stats(struct ipa_teth_stats_endpoints *in)
+{
+	struct ipahal_stats_init_pyld *pyld;
+	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_imm_cmd_register_write teth_base = {0};
+	struct ipahal_imm_cmd_pyld *teth_base_pyld;
+	struct ipahal_imm_cmd_register_write teth_mask = { 0 };
+	struct ipahal_imm_cmd_pyld *teth_mask_pyld;
+	struct ipa3_desc desc[3] = { {0} };
+	dma_addr_t dma_address;
+	int ret;
+	int i;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	if (!in || !in->prod_mask) {
+		IPAERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < IPA_STATS_MAX_PIPE_BIT; i++) {
+		if ((in->prod_mask & (1 << i)) && !in->dst_ep_mask[i]) {
+			IPAERR("prod %d doesn't have cons\n", i);
+			return -EINVAL;
+		}
+	}
+	IPADBG_LOW("prod_mask=0x%x\n", in->prod_mask);
+
+	/* reset driver's cache */
+	memset(&ipa3_ctx->hw_stats.teth.init, 0,
+		sizeof(ipa3_ctx->hw_stats.teth.init));
+	for (i = 0; i < IPA_CLIENT_MAX; i++)
+		memset(&ipa3_ctx->hw_stats.teth.prod_stats[i], 0,
+			sizeof(ipa3_ctx->hw_stats.teth.prod_stats[i]));
+	ipa3_ctx->hw_stats.teth.init.prod_bitmask = in->prod_mask;
+	memcpy(ipa3_ctx->hw_stats.teth.init.cons_bitmask, in->dst_ep_mask,
+		sizeof(ipa3_ctx->hw_stats.teth.init.cons_bitmask));
+
+
+	pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_TETHERING,
+		&ipa3_ctx->hw_stats.teth.init, false);
+	if (!pyld) {
+		IPAERR("failed to generate pyld\n");
+		return -EPERM;
+	}
+
+	if (pyld->len > IPA_MEM_PART(stats_tethering_size)) {
+		IPAERR("SRAM partition too small: %d needed %d\n",
+			IPA_MEM_PART(stats_tethering_size), pyld->len);
+		ret = -EPERM;
+		goto destroy_init_pyld;
+	}
+
+	dma_address = dma_map_single(ipa3_ctx->pdev,
+		pyld->data,
+		pyld->len,
+		DMA_TO_DEVICE);
+	if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) {
+		IPAERR("failed to DMA map\n");
+		ret = -EPERM;
+		goto destroy_init_pyld;
+	}
+
+	/* setting the registers and init the stats pyld are done atomically */
+	teth_mask.skip_pipeline_clear = false;
+	teth_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	teth_mask.offset = ipahal_get_reg_n_ofst(IPA_STAT_TETHERING_MASK_n,
+		ipa3_ctx->ee);
+	teth_mask.value = in->prod_mask;
+	teth_mask.value_mask = ~0;
+	teth_mask_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&teth_mask, false);
+	if (!teth_mask_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto unmap;
+	}
+	desc[0].opcode = teth_mask_pyld->opcode;
+	desc[0].pyld = teth_mask_pyld->data;
+	desc[0].len = teth_mask_pyld->len;
+	desc[0].type = IPA_IMM_CMD_DESC;
+
+	teth_base.skip_pipeline_clear = false;
+	teth_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	teth_base.offset = ipahal_get_reg_n_ofst(IPA_STAT_TETHERING_BASE_n,
+		ipa3_ctx->ee);
+	teth_base.value = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(stats_tethering_ofst);
+	teth_base.value_mask = ~0;
+	teth_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&teth_base, false);
+	if (!teth_base_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_teth_mask;
+	}
+	desc[1].opcode = teth_base_pyld->opcode;
+	desc[1].pyld = teth_base_pyld->data;
+	desc[1].len = teth_base_pyld->len;
+	desc[1].type = IPA_IMM_CMD_DESC;
+
+	cmd.is_read = false;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	cmd.size = pyld->len;
+	cmd.system_addr = dma_address;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(stats_tethering_ofst);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_teth_base;
+	}
+	desc[2].opcode = cmd_pyld->opcode;
+	desc[2].pyld = cmd_pyld->data;
+	desc[2].len = cmd_pyld->len;
+	desc[2].type = IPA_IMM_CMD_DESC;
+
+	ret = ipa3_send_cmd(3, desc);
+	if (ret) {
+		IPAERR("failed to send immediate command (error %d)\n", ret);
+		goto destroy_imm;
+	}
+
+	ret = 0;
+
+destroy_imm:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+destroy_teth_base:
+	ipahal_destroy_imm_cmd(teth_base_pyld);
+destroy_teth_mask:
+	ipahal_destroy_imm_cmd(teth_mask_pyld);
+unmap:
+	dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
+destroy_init_pyld:
+	ipahal_destroy_stats_init_pyld(pyld);
+	return ret;
+}
+
+int ipa_get_teth_stats(enum ipa_client_type prod,
+	struct ipa_quota_stats_all *out)
+{
+	int i, j;
+	int ret;
+	struct ipahal_stats_get_offset_tethering get_offset = { { 0 } };
+	struct ipahal_stats_offset offset = {0};
+	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipa_mem_buffer mem;
+	struct ipa3_desc desc = { 0 };
+	struct ipahal_stats_tethering_all *stats;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	if (!IPA_CLIENT_IS_PROD(prod) || ipa3_get_ep_mapping(prod) == -1) {
+		IPAERR("invalid prod %d\n", prod);
+		return -EINVAL;
+	}
+
+	get_offset.init = ipa3_ctx->hw_stats.teth.init;
+	ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_TETHERING, &get_offset,
+		&offset);
+	if (ret) {
+		IPAERR("failed to get offset from hal %d\n", ret);
+		return ret;
+	}
+
+	IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size);
+
+	mem.size = offset.size;
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
+		mem.size,
+		&mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA memory\n");
+		return ret;
+	}
+
+	cmd.is_read = true;
+	cmd.clear_after_read = true;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	cmd.size = mem.size;
+	cmd.system_addr = mem.phys_base;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(stats_tethering_ofst) + offset.offset;
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		ret = -ENOMEM;
+		goto free_dma_mem;
+	}
+	desc.opcode = cmd_pyld->opcode;
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	desc.type = IPA_IMM_CMD_DESC;
+
+	ret = ipa3_send_cmd(1, &desc);
+	if (ret) {
+		IPAERR("failed to send immediate command (error %d)\n", ret);
+		goto destroy_imm;
+	}
+
+	stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+	if (!stats) {
+		IPADBG("failed to alloc memory\n");
+		ret = -ENOMEM;
+		goto destroy_imm;
+	}
+
+	ret = ipahal_parse_stats(IPAHAL_HW_STATS_TETHERING,
+		&ipa3_ctx->hw_stats.teth.init, mem.base, stats);
+	if (ret) {
+		IPAERR("failed to parse stats (error %d)\n", ret);
+		goto free_stats;
+	}
+
+	/*
+	 * update driver cache.
+	 * the stats were read from hardware with clear_after_read meaning
+	 * hardware stats are 0 now
+	 */
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		for (j = 0; j < IPA_CLIENT_MAX; j++) {
+			struct ipa_hw_stats_teth *sw_stats =
+				&ipa3_ctx->hw_stats.teth;
+			int prod_idx = ipa3_get_ep_mapping(i);
+			int cons_idx = ipa3_get_ep_mapping(j);
+
+			if (prod_idx == -1 || prod_idx >= IPA3_MAX_NUM_PIPES)
+				continue;
+
+			if (cons_idx == -1 || cons_idx >= IPA3_MAX_NUM_PIPES)
+				continue;
+
+			if (ipa3_ctx->ep[prod_idx].client != i ||
+			    ipa3_ctx->ep[cons_idx].client != j)
+				continue;
+
+			sw_stats->prod_stats[i].client[j].num_ipv4_bytes +=
+				stats->stats[prod_idx][cons_idx].num_ipv4_bytes;
+			sw_stats->prod_stats[i].client[j].num_ipv4_pkts +=
+				stats->stats[prod_idx][cons_idx].num_ipv4_pkts;
+			sw_stats->prod_stats[i].client[j].num_ipv6_bytes +=
+				stats->stats[prod_idx][cons_idx].num_ipv6_bytes;
+			sw_stats->prod_stats[i].client[j].num_ipv6_pkts +=
+				stats->stats[prod_idx][cons_idx].num_ipv6_pkts;
+		}
+	}
+
+	if (!out) {
+		ret = 0;
+		goto free_stats;
+	}
+
+	/* copy results to out parameter */
+	*out = ipa3_ctx->hw_stats.teth.prod_stats[prod];
+
+	ret = 0;
+free_stats:
+	kfree(stats);
+destroy_imm:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+free_dma_mem:
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+	return ret;
+
+}
+
+int ipa_reset_teth_stats(enum ipa_client_type prod, enum ipa_client_type cons)
+{
+	int ret;
+	struct ipa_quota_stats *stats;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	if (!IPA_CLIENT_IS_PROD(prod) || IPA_CLIENT_IS_CONS(cons) == -1) {
+		IPAERR("invalid prod %d or cons %d\n", prod, cons);
+		return -EINVAL;
+	}
+
+	/* reading stats will reset them in hardware */
+	ret = ipa_get_teth_stats(prod, NULL);
+	if (ret) {
+		IPAERR("ipa_get_teth_stats failed %d\n", ret);
+		return ret;
+	}
+
+	/* reset driver's cache */
+	stats = &ipa3_ctx->hw_stats.teth.prod_stats[prod].client[cons];
+	memset(stats, 0, sizeof(*stats));
+	return 0;
+}
+
+int ipa_reset_all_cons_teth_stats(enum ipa_client_type prod)
+{
+	int ret;
+	int i;
+	struct ipa_quota_stats *stats;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	if (!IPA_CLIENT_IS_PROD(prod)) {
+		IPAERR("invalid prod %d\n", prod);
+		return -EINVAL;
+	}
+
+	/* reading stats will reset them in hardware */
+	ret = ipa_get_teth_stats(prod, NULL);
+	if (ret) {
+		IPAERR("ipa_get_teth_stats failed %d\n", ret);
+		return ret;
+	}
+
+	/* reset driver's cache */
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		stats = &ipa3_ctx->hw_stats.teth.prod_stats[prod].client[i];
+		memset(stats, 0, sizeof(*stats));
+	}
+
+	return 0;
+}
+
+int ipa_reset_all_teth_stats(void)
+{
+	int i;
+	int ret;
+	struct ipa_quota_stats_all *stats;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	/* reading stats will reset them in hardware */
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		if (IPA_CLIENT_IS_PROD(i) && ipa3_get_ep_mapping(i) != -1) {
+			ret = ipa_get_teth_stats(i, NULL);
+			if (ret) {
+				IPAERR("ipa_get_teth_stats failed %d\n", ret);
+				return ret;
+			}
+			/* a single iteration will reset all hardware stats */
+			break;
+		}
+	}
+
+	/* reset driver's cache */
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		stats = &ipa3_ctx->hw_stats.teth.prod_stats[i];
+		memset(stats, 0, sizeof(*stats));
+	}
+
+	return 0;
+}
+
+int ipa_flt_rt_stats_add_rule_id(enum ipa_ip_type ip, bool filtering,
+	u16 rule_id)
+{
+	int rule_idx, rule_bit;
+	u32 *bmsk_ptr;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	if (ip < 0 || ip >= IPA_IP_MAX) {
+		IPAERR("wrong ip type %d\n", ip);
+		return -EINVAL;
+	}
+
+	rule_idx = rule_id / 32;
+	rule_bit = rule_id % 32;
+
+	if (rule_idx >= IPAHAL_MAX_RULE_ID_32) {
+		IPAERR("invalid rule_id %d\n", rule_id);
+		return -EINVAL;
+	}
+
+	if (ip == IPA_IP_v4 && filtering)
+		bmsk_ptr =
+			ipa3_ctx->hw_stats.flt_rt.flt_v4_init.rule_id_bitmask;
+	else if (ip == IPA_IP_v4)
+		bmsk_ptr =
+			ipa3_ctx->hw_stats.flt_rt.rt_v4_init.rule_id_bitmask;
+	else if (ip == IPA_IP_v6 && filtering)
+		bmsk_ptr =
+			ipa3_ctx->hw_stats.flt_rt.flt_v6_init.rule_id_bitmask;
+	else
+		bmsk_ptr =
+			ipa3_ctx->hw_stats.flt_rt.rt_v6_init.rule_id_bitmask;
+
+	bmsk_ptr[rule_idx] |= (1 << rule_bit);
+
+	return 0;
+}
+
+int ipa_flt_rt_stats_start(enum ipa_ip_type ip, bool filtering)
+{
+	struct ipahal_stats_init_pyld *pyld;
+	int smem_ofst, smem_size, stats_base, start_id_ofst, end_id_ofst;
+	int start_id, end_id;
+	struct ipahal_stats_init_flt_rt *init;
+	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_imm_cmd_register_write flt_rt_base = {0};
+	struct ipahal_imm_cmd_pyld *flt_rt_base_pyld;
+	struct ipahal_imm_cmd_register_write flt_rt_start_id = {0};
+	struct ipahal_imm_cmd_pyld *flt_rt_start_id_pyld;
+	struct ipahal_imm_cmd_register_write flt_rt_end_id = { 0 };
+	struct ipahal_imm_cmd_pyld *flt_rt_end_id_pyld;
+	struct ipa3_desc desc[4] = { {0} };
+	dma_addr_t dma_address;
+	int ret;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	if (ip == IPA_IP_v4 && filtering) {
+		init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init;
+		smem_ofst = IPA_MEM_PART(stats_flt_v4_ofst);
+		smem_size = IPA_MEM_PART(stats_flt_v4_size);
+		stats_base = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV4_BASE);
+		start_id_ofst =
+			ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV4_START_ID);
+		end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV4_END_ID);
+	} else if (ip == IPA_IP_v4) {
+		init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init;
+		smem_ofst = IPA_MEM_PART(stats_rt_v4_ofst);
+		smem_size = IPA_MEM_PART(stats_rt_v4_size);
+		stats_base = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV4_BASE);
+		start_id_ofst =
+			ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV4_START_ID);
+		end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV4_END_ID);
+	} else if (ip == IPA_IP_v6 && filtering) {
+		init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init;
+		smem_ofst = IPA_MEM_PART(stats_flt_v6_ofst);
+		smem_size = IPA_MEM_PART(stats_flt_v6_size);
+		stats_base = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV6_BASE);
+		start_id_ofst =
+			ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV6_START_ID);
+		end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_FILTER_IPV6_END_ID);
+	} else {
+		init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init;
+		smem_ofst = IPA_MEM_PART(stats_rt_v6_ofst);
+		smem_size = IPA_MEM_PART(stats_rt_v6_size);
+		stats_base = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV6_BASE);
+		start_id_ofst =
+			ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV6_START_ID);
+		end_id_ofst = ipahal_get_reg_ofst(IPA_STAT_ROUTER_IPV6_END_ID);
+	}
+
+	for (start_id = 0; start_id < IPAHAL_MAX_RULE_ID_32; start_id++) {
+		if (init->rule_id_bitmask[start_id])
+			break;
+	}
+
+	if (start_id == IPAHAL_MAX_RULE_ID_32) {
+		IPAERR("empty rule ids\n");
+		return -EINVAL;
+	}
+
+	/* every rule_id_bitmask contains 32 rules */
+	start_id *= 32;
+
+	for (end_id = IPAHAL_MAX_RULE_ID_32 - 1; end_id >= 0; end_id--) {
+		if (init->rule_id_bitmask[end_id])
+			break;
+	}
+	end_id = (end_id + 1) * 32 - 1;
+
+	pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_FNR, init,
+		false);
+	if (!pyld) {
+		IPAERR("failed to generate pyld\n");
+		return -EPERM;
+	}
+
+	if (pyld->len > smem_size) {
+		IPAERR("SRAM partition too small: %d needed %d\n",
+			smem_size, pyld->len);
+		ret = -EPERM;
+		goto destroy_init_pyld;
+	}
+
+	dma_address = dma_map_single(ipa3_ctx->pdev,
+		pyld->data,
+		pyld->len,
+		DMA_TO_DEVICE);
+	if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) {
+		IPAERR("failed to DMA map\n");
+		ret = -EPERM;
+		goto destroy_init_pyld;
+	}
+
+	/* setting the registers and init the stats pyld are done atomically */
+	flt_rt_start_id.skip_pipeline_clear = false;
+	flt_rt_start_id.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	flt_rt_start_id.offset = start_id_ofst;
+	flt_rt_start_id.value = start_id;
+	flt_rt_start_id.value_mask = 0x3FF;
+	flt_rt_start_id_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_REGISTER_WRITE, &flt_rt_start_id, false);
+	if (!flt_rt_start_id_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto unmap;
+	}
+	desc[0].opcode = flt_rt_start_id_pyld->opcode;
+	desc[0].pyld = flt_rt_start_id_pyld->data;
+	desc[0].len = flt_rt_start_id_pyld->len;
+	desc[0].type = IPA_IMM_CMD_DESC;
+
+	flt_rt_end_id.skip_pipeline_clear = false;
+	flt_rt_end_id.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	flt_rt_end_id.offset = end_id_ofst;
+	flt_rt_end_id.value = end_id;
+	flt_rt_end_id.value_mask = 0x3FF;
+	flt_rt_end_id_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_REGISTER_WRITE, &flt_rt_end_id, false);
+	if (!flt_rt_end_id_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_flt_rt_start_id;
+	}
+	desc[1].opcode = flt_rt_end_id_pyld->opcode;
+	desc[1].pyld = flt_rt_end_id_pyld->data;
+	desc[1].len = flt_rt_end_id_pyld->len;
+	desc[1].type = IPA_IMM_CMD_DESC;
+
+	flt_rt_base.skip_pipeline_clear = false;
+	flt_rt_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	flt_rt_base.offset = stats_base;
+	flt_rt_base.value = ipa3_ctx->smem_restricted_bytes +
+		smem_ofst;
+	flt_rt_base.value_mask = ~0;
+	flt_rt_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&flt_rt_base, false);
+	if (!flt_rt_base_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_flt_rt_end_id;
+	}
+	desc[2].opcode = flt_rt_base_pyld->opcode;
+	desc[2].pyld = flt_rt_base_pyld->data;
+	desc[2].len = flt_rt_base_pyld->len;
+	desc[2].type = IPA_IMM_CMD_DESC;
+
+	cmd.is_read = false;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	cmd.size = pyld->len;
+	cmd.system_addr = dma_address;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+			smem_ofst;
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_flt_rt_base;
+	}
+	desc[3].opcode = cmd_pyld->opcode;
+	desc[3].pyld = cmd_pyld->data;
+	desc[3].len = cmd_pyld->len;
+	desc[3].type = IPA_IMM_CMD_DESC;
+
+	ret = ipa3_send_cmd(4, desc);
+	if (ret) {
+		IPAERR("failed to send immediate command (error %d)\n", ret);
+		goto destroy_imm;
+	}
+
+	ret = 0;
+
+destroy_imm:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+destroy_flt_rt_base:
+	ipahal_destroy_imm_cmd(flt_rt_base_pyld);
+destroy_flt_rt_end_id:
+	ipahal_destroy_imm_cmd(flt_rt_end_id_pyld);
+destroy_flt_rt_start_id:
+	ipahal_destroy_imm_cmd(flt_rt_start_id_pyld);
+unmap:
+	dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
+destroy_init_pyld:
+	ipahal_destroy_stats_init_pyld(pyld);
+	return ret;
+}
+
+int ipa_flt_rt_stats_clear_rule_ids(enum ipa_ip_type ip, bool filtering)
+{
+	struct ipahal_stats_init_flt_rt *init;
+	int i;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	if (ip < 0 || ip >= IPA_IP_MAX) {
+		IPAERR("wrong ip type %d\n", ip);
+		return -EINVAL;
+	}
+
+	if (ip == IPA_IP_v4 && filtering)
+		init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init;
+	else if (ip == IPA_IP_v4)
+		init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init;
+	else if (ip == IPA_IP_v6 && filtering)
+		init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init;
+	else
+		init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init;
+
+	for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++)
+		init->rule_id_bitmask[i] = 0;
+
+	return 0;
+}
+
+static int __ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering,
+	u16 rule_id, struct ipa_flt_rt_stats *out)
+{
+	int ret;
+	int smem_ofst;
+	bool clear = false;
+	struct ipahal_stats_get_offset_flt_rt *get_offset;
+	struct ipahal_stats_offset offset = { 0 };
+	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipa_mem_buffer mem;
+	struct ipa3_desc desc = { 0 };
+	struct ipahal_stats_flt_rt stats;
+
+	if (rule_id >= IPAHAL_MAX_RULE_ID_32 * 32) {
+		IPAERR("invalid rule_id %d\n", rule_id);
+		return -EINVAL;
+	}
+
+	if (out == NULL)
+		clear = true;
+
+	get_offset = kzalloc(sizeof(*get_offset), GFP_KERNEL);
+	if (!get_offset) {
+		IPADBG("no mem\n");
+		return -ENOMEM;
+	}
+
+	if (ip == IPA_IP_v4 && filtering) {
+		get_offset->init = ipa3_ctx->hw_stats.flt_rt.flt_v4_init;
+		smem_ofst = IPA_MEM_PART(stats_flt_v4_ofst);
+	} else if (ip == IPA_IP_v4) {
+		get_offset->init = ipa3_ctx->hw_stats.flt_rt.rt_v4_init;
+		smem_ofst = IPA_MEM_PART(stats_rt_v4_ofst);
+	} else if (ip == IPA_IP_v6 && filtering) {
+		get_offset->init = ipa3_ctx->hw_stats.flt_rt.flt_v6_init;
+		smem_ofst = IPA_MEM_PART(stats_flt_v6_ofst);
+	} else {
+		get_offset->init = ipa3_ctx->hw_stats.flt_rt.rt_v6_init;
+		smem_ofst = IPA_MEM_PART(stats_rt_v6_ofst);
+	}
+
+	get_offset->rule_id = rule_id;
+
+	ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_FNR, get_offset,
+		&offset);
+	if (ret) {
+		IPAERR("failed to get offset from hal %d\n", ret);
+		goto free_offset;
+	}
+
+	IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size);
+
+	mem.size = offset.size;
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
+		mem.size,
+		&mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA memory\n");
+		goto free_offset;
+	}
+
+	cmd.is_read = true;
+	cmd.clear_after_read = clear;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	cmd.size = mem.size;
+	cmd.system_addr = mem.phys_base;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		smem_ofst + offset.offset;
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		ret = -ENOMEM;
+		goto free_dma_mem;
+	}
+	desc.opcode = cmd_pyld->opcode;
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	desc.type = IPA_IMM_CMD_DESC;
+
+	ret = ipa3_send_cmd(1, &desc);
+	if (ret) {
+		IPAERR("failed to send immediate command (error %d)\n", ret);
+		goto destroy_imm;
+	}
+
+	ret = ipahal_parse_stats(IPAHAL_HW_STATS_FNR,
+		&get_offset->init, mem.base, &stats);
+	if (ret) {
+		IPAERR("failed to parse stats (error %d)\n", ret);
+		goto destroy_imm;
+	}
+
+	if (out) {
+		out->num_pkts = stats.num_packets;
+		out->num_pkts_hash = stats.num_packets_hash;
+	}
+
+	ret = 0;
+
+destroy_imm:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+free_dma_mem:
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+free_offset:
+	kfree(get_offset);
+	return ret;
+
+}
+
+
+int ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id,
+	struct ipa_flt_rt_stats *out)
+{
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	if (ip < 0 || ip >= IPA_IP_MAX) {
+		IPAERR("wrong ip type %d\n", ip);
+		return -EINVAL;
+	}
+
+	return __ipa_get_flt_rt_stats(ip, filtering, rule_id, out);
+}
+
+int ipa_reset_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id)
+{
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	if (ip < 0 || ip >= IPA_IP_MAX) {
+		IPAERR("wrong ip type %d\n", ip);
+		return -EINVAL;
+	}
+
+	return __ipa_get_flt_rt_stats(ip, filtering, rule_id, NULL);
+}
+
+int ipa_reset_all_flt_rt_stats(enum ipa_ip_type ip, bool filtering)
+{
+	struct ipahal_stats_init_flt_rt *init;
+	int i;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	if (ip < 0 || ip >= IPA_IP_MAX) {
+		IPAERR("wrong ip type %d\n", ip);
+		return -EINVAL;
+	}
+
+	if (ip == IPA_IP_v4 && filtering)
+		init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init;
+	else if (ip == IPA_IP_v4)
+		init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init;
+	else if (ip == IPA_IP_v6 && filtering)
+		init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init;
+	else
+		init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init;
+
+	for (i = 0; i < IPAHAL_MAX_RULE_ID_32 * 32; i++) {
+		int idx = i / 32;
+		int bit = i % 32;
+
+		if (init->rule_id_bitmask[idx] & (1 << bit))
+			__ipa_get_flt_rt_stats(ip, filtering, i, NULL);
+	}
+
+	return 0;
+}
+
+int ipa_init_drop_stats(u32 pipe_bitmask)
+{
+	struct ipahal_stats_init_pyld *pyld;
+	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_imm_cmd_register_write drop_base = {0};
+	struct ipahal_imm_cmd_pyld *drop_base_pyld;
+	struct ipahal_imm_cmd_register_write drop_mask = {0};
+	struct ipahal_imm_cmd_pyld *drop_mask_pyld;
+	struct ipa3_desc desc[3] = { {0} };
+	dma_addr_t dma_address;
+	int ret;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	/* reset driver's cache */
+	memset(&ipa3_ctx->hw_stats.drop, 0, sizeof(ipa3_ctx->hw_stats.drop));
+	ipa3_ctx->hw_stats.drop.init.enabled_bitmask = pipe_bitmask;
+	IPADBG_LOW("pipe_bitmask=0x%x\n", pipe_bitmask);
+
+	pyld = ipahal_stats_generate_init_pyld(IPAHAL_HW_STATS_DROP,
+		&ipa3_ctx->hw_stats.drop.init, false);
+	if (!pyld) {
+		IPAERR("failed to generate pyld\n");
+		return -EPERM;
+	}
+
+	if (pyld->len > IPA_MEM_PART(stats_drop_size)) {
+		IPAERR("SRAM partition too small: %d needed %d\n",
+			IPA_MEM_PART(stats_drop_size), pyld->len);
+		ret = -EPERM;
+		goto destroy_init_pyld;
+	}
+
+	dma_address = dma_map_single(ipa3_ctx->pdev,
+		pyld->data,
+		pyld->len,
+		DMA_TO_DEVICE);
+	if (dma_mapping_error(ipa3_ctx->pdev, dma_address)) {
+		IPAERR("failed to DMA map\n");
+		ret = -EPERM;
+		goto destroy_init_pyld;
+	}
+
+	/* setting the registers and init the stats pyld are done atomically */
+	drop_mask.skip_pipeline_clear = false;
+	drop_mask.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	drop_mask.offset = ipahal_get_reg_n_ofst(IPA_STAT_DROP_CNT_MASK_n,
+		ipa3_ctx->ee);
+	drop_mask.value = pipe_bitmask;
+	drop_mask.value_mask = ~0;
+	drop_mask_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&drop_mask, false);
+	if (!drop_mask_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto unmap;
+	}
+	desc[0].opcode = drop_mask_pyld->opcode;
+	desc[0].pyld = drop_mask_pyld->data;
+	desc[0].len = drop_mask_pyld->len;
+	desc[0].type = IPA_IMM_CMD_DESC;
+
+	drop_base.skip_pipeline_clear = false;
+	drop_base.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	drop_base.offset = ipahal_get_reg_n_ofst(IPA_STAT_DROP_CNT_BASE_n,
+		ipa3_ctx->ee);
+	drop_base.value = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(stats_drop_ofst);
+	drop_base.value_mask = ~0;
+	drop_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&drop_base, false);
+	if (!drop_base_pyld) {
+		IPAERR("failed to construct register_write imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_drop_mask;
+	}
+	desc[1].opcode = drop_base_pyld->opcode;
+	desc[1].pyld = drop_base_pyld->data;
+	desc[1].len = drop_base_pyld->len;
+	desc[1].type = IPA_IMM_CMD_DESC;
+
+	cmd.is_read = false;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+	cmd.size = pyld->len;
+	cmd.system_addr = dma_address;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(stats_drop_ofst);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		ret = -ENOMEM;
+		goto destroy_drop_base;
+	}
+	desc[2].opcode = cmd_pyld->opcode;
+	desc[2].pyld = cmd_pyld->data;
+	desc[2].len = cmd_pyld->len;
+	desc[2].type = IPA_IMM_CMD_DESC;
+
+	ret = ipa3_send_cmd(3, desc);
+	if (ret) {
+		IPAERR("failed to send immediate command (error %d)\n", ret);
+		goto destroy_imm;
+	}
+
+	ret = 0;
+
+destroy_imm:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+destroy_drop_base:
+	ipahal_destroy_imm_cmd(drop_base_pyld);
+destroy_drop_mask:
+	ipahal_destroy_imm_cmd(drop_mask_pyld);
+unmap:
+	dma_unmap_single(ipa3_ctx->pdev, dma_address, pyld->len, DMA_TO_DEVICE);
+destroy_init_pyld:
+	ipahal_destroy_stats_init_pyld(pyld);
+	return ret;
+}
+
+int ipa_get_drop_stats(struct ipa_drop_stats_all *out)
+{
+	int i;
+	int ret;
+	struct ipahal_stats_get_offset_drop get_offset = { { 0 } };
+	struct ipahal_stats_offset offset = { 0 };
+	struct ipahal_imm_cmd_dma_shared_mem cmd = { 0 };
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipa_mem_buffer mem;
+	struct ipa3_desc desc = { 0 };
+	struct ipahal_stats_drop_all *stats;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	get_offset.init = ipa3_ctx->hw_stats.drop.init;
+	ret = ipahal_stats_get_offset(IPAHAL_HW_STATS_DROP, &get_offset,
+		&offset);
+	if (ret) {
+		IPAERR("failed to get offset from hal %d\n", ret);
+		return ret;
+	}
+
+	IPADBG_LOW("offset = %d size = %d\n", offset.offset, offset.size);
+
+	mem.size = offset.size;
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
+		mem.size,
+		&mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA memory\n");
+		return ret;
+	}
+
+	cmd.is_read = true;
+	cmd.clear_after_read = true;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	cmd.size = mem.size;
+	cmd.system_addr = mem.phys_base;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(stats_drop_ofst) + offset.offset;
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		ret = -ENOMEM;
+		goto free_dma_mem;
+	}
+	desc.opcode = cmd_pyld->opcode;
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	desc.type = IPA_IMM_CMD_DESC;
+
+	ret = ipa3_send_cmd(1, &desc);
+	if (ret) {
+		IPAERR("failed to send immediate command (error %d)\n", ret);
+		goto destroy_imm;
+	}
+
+	stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+	if (!stats) {
+		IPADBG("failed to alloc memory\n");
+		ret = -ENOMEM;
+		goto destroy_imm;
+	}
+
+	ret = ipahal_parse_stats(IPAHAL_HW_STATS_DROP,
+		&ipa3_ctx->hw_stats.drop.init, mem.base, stats);
+	if (ret) {
+		IPAERR("failed to parse stats (error %d)\n", ret);
+		goto free_stats;
+	}
+
+	/*
+	 * update driver cache.
+	 * the stats were read from hardware with clear_after_read meaning
+	 * hardware stats are 0 now
+	 */
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		int ep_idx = ipa3_get_ep_mapping(i);
+
+		if (ep_idx == -1 || ep_idx >= IPA3_MAX_NUM_PIPES)
+			continue;
+
+		if (ipa3_ctx->ep[ep_idx].client != i)
+			continue;
+
+		ipa3_ctx->hw_stats.drop.stats.client[i].drop_byte_cnt +=
+			stats->stats[ep_idx].drop_byte_cnt;
+		ipa3_ctx->hw_stats.drop.stats.client[i].drop_packet_cnt +=
+			stats->stats[ep_idx].drop_packet_cnt;
+	}
+
+
+	if (!out) {
+		ret = 0;
+		goto free_stats;
+	}
+
+	/* copy results to out parameter */
+	*out = ipa3_ctx->hw_stats.drop.stats;
+
+	ret = 0;
+free_stats:
+	kfree(stats);
+destroy_imm:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+free_dma_mem:
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+	return ret;
+
+}
+
+int ipa_reset_drop_stats(enum ipa_client_type client)
+{
+	int ret;
+	struct ipa_drop_stats *stats;
+
+	if (client >= IPA_CLIENT_MAX) {
+		IPAERR("invalid client %d\n", client);
+		return -EINVAL;
+	}
+
+	/* reading stats will reset them in hardware */
+	ret = ipa_get_drop_stats(NULL);
+	if (ret) {
+		IPAERR("ipa_get_drop_stats failed %d\n", ret);
+		return ret;
+	}
+
+	/* reset driver's cache */
+	stats = &ipa3_ctx->hw_stats.drop.stats.client[client];
+	memset(stats, 0, sizeof(*stats));
+	return 0;
+}
+
+int ipa_reset_all_drop_stats(void)
+{
+	int ret;
+	struct ipa_drop_stats_all *stats;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	/* reading stats will reset them in hardware */
+	ret = ipa_get_drop_stats(NULL);
+	if (ret) {
+		IPAERR("ipa_get_drop_stats failed %d\n", ret);
+		return ret;
+	}
+
+	/* reset driver's cache */
+	stats = &ipa3_ctx->hw_stats.drop.stats;
+	memset(stats, 0, sizeof(*stats));
+	return 0;
+}
+
+
+#ifndef CONFIG_DEBUG_FS
+int ipa_debugfs_init_stats(struct dentry *parent) { return 0; }
+#else
+#define IPA_MAX_MSG_LEN 4096
+static char dbg_buff[IPA_MAX_MSG_LEN];
+
+static ssize_t ipa_debugfs_reset_quota_stats(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	s8 client = 0;
+	int ret;
+
+	mutex_lock(&ipa3_ctx->lock);
+	if (sizeof(dbg_buff) < count + 1) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &client)) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	if (client == -1)
+		ipa_reset_all_quota_stats();
+	else
+		ipa_reset_quota_stats(client);
+
+	ret = count;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+static ssize_t ipa_debugfs_print_quota_stats(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+	struct ipa_quota_stats_all *out;
+	int i;
+	int res;
+
+	out = kzalloc(sizeof(*out), GFP_KERNEL);
+	if (!out)
+		return -ENOMEM;
+
+	mutex_lock(&ipa3_ctx->lock);
+	res = ipa_get_quota_stats(out);
+	if (res) {
+		mutex_unlock(&ipa3_ctx->lock);
+		kfree(out);
+		return res;
+	}
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		int ep_idx = ipa3_get_ep_mapping(i);
+
+		if (ep_idx == -1)
+			continue;
+
+		if (IPA_CLIENT_IS_TEST(i))
+			continue;
+
+		if (!(ipa3_ctx->hw_stats.quota.init.enabled_bitmask &
+			(1 << ep_idx)))
+			continue;
+
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"%s:\n",
+			ipa_clients_strings[i]);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"num_ipv4_bytes=%llu\n",
+			out->client[i].num_ipv4_bytes);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"num_ipv6_bytes=%llu\n",
+			out->client[i].num_ipv6_bytes);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"num_ipv4_pkts=%u\n",
+			out->client[i].num_ipv4_pkts);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"num_ipv6_pkts=%u\n",
+			out->client[i].num_ipv6_pkts);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"\n");
+
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+	kfree(out);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_debugfs_reset_tethering_stats(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	s8 client = 0;
+	int ret;
+
+	mutex_lock(&ipa3_ctx->lock);
+	if (sizeof(dbg_buff) < count + 1) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &client)) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	if (client == -1)
+		ipa_reset_all_teth_stats();
+	else
+		ipa_reset_all_cons_teth_stats(client);
+
+	ret = count;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+static ssize_t ipa_debugfs_print_tethering_stats(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+	struct ipa_quota_stats_all *out;
+	int i, j;
+	int res;
+
+	out = kzalloc(sizeof(*out), GFP_KERNEL);
+	if (!out)
+		return -ENOMEM;
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		int ep_idx = ipa3_get_ep_mapping(i);
+
+		if (ep_idx == -1)
+			continue;
+
+		if (!IPA_CLIENT_IS_PROD(i))
+			continue;
+
+		if (IPA_CLIENT_IS_TEST(i))
+			continue;
+
+		if (!(ipa3_ctx->hw_stats.teth.init.prod_bitmask &
+			(1 << ep_idx)))
+			continue;
+
+		res = ipa_get_teth_stats(i, out);
+		if (res) {
+			mutex_unlock(&ipa3_ctx->lock);
+			kfree(out);
+			return res;
+		}
+
+		for (j = 0; j < IPA_CLIENT_MAX; j++) {
+			int cons_idx = ipa3_get_ep_mapping(j);
+
+			if (cons_idx == -1)
+				continue;
+
+			if (IPA_CLIENT_IS_TEST(j))
+				continue;
+
+			if (!(ipa3_ctx->hw_stats.teth.init.cons_bitmask[ep_idx]
+				& (1 << cons_idx)))
+				continue;
+
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"%s->%s:\n",
+				ipa_clients_strings[i],
+				ipa_clients_strings[j]);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"num_ipv4_bytes=%llu\n",
+				out->client[j].num_ipv4_bytes);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"num_ipv6_bytes=%llu\n",
+				out->client[j].num_ipv6_bytes);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"num_ipv4_pkts=%u\n",
+				out->client[j].num_ipv4_pkts);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"num_ipv6_pkts=%u\n",
+				out->client[j].num_ipv6_pkts);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"\n");
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+	kfree(out);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_debugfs_control_flt_rt_stats(enum ipa_ip_type ip,
+	bool filtering, struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	u16 rule_id = 0;
+	int ret;
+
+	mutex_lock(&ipa3_ctx->lock);
+	if (sizeof(dbg_buff) < count + 1) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	dbg_buff[count] = '\0';
+		if (strcmp(dbg_buff, "start\n") == 0) {
+		ipa_flt_rt_stats_start(ip, filtering);
+	} else if (strcmp(dbg_buff, "clear\n") == 0) {
+		ipa_flt_rt_stats_clear_rule_ids(ip, filtering);
+	} else if (strcmp(dbg_buff, "reset\n") == 0) {
+		ipa_reset_all_flt_rt_stats(ip, filtering);
+	} else {
+		if (kstrtou16(dbg_buff, 0, &rule_id)) {
+			ret = -EFAULT;
+			goto bail;
+		}
+		ipa_flt_rt_stats_add_rule_id(ip, filtering, rule_id);
+	}
+
+	ret = count;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+static ssize_t ipa_debugfs_print_flt_rt_stats(enum ipa_ip_type ip,
+	bool filtering, struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+	struct ipahal_stats_init_flt_rt *init;
+	struct ipa_flt_rt_stats out;
+	int i;
+	int res;
+
+	if (ip == IPA_IP_v4 && filtering)
+		init = &ipa3_ctx->hw_stats.flt_rt.flt_v4_init;
+	else if (ip == IPA_IP_v4)
+		init = &ipa3_ctx->hw_stats.flt_rt.rt_v4_init;
+	else if (ip == IPA_IP_v6 && filtering)
+		init = &ipa3_ctx->hw_stats.flt_rt.flt_v6_init;
+	else
+		init = &ipa3_ctx->hw_stats.flt_rt.rt_v6_init;
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < IPAHAL_MAX_RULE_ID_32 * 32; i++) {
+		int idx = i / 32;
+		int bit = i % 32;
+
+		if (init->rule_id_bitmask[idx] & (1 << bit)) {
+			res = ipa_get_flt_rt_stats(ip, filtering, i, &out);
+			if (res) {
+				mutex_unlock(&ipa3_ctx->lock);
+				return res;
+			}
+
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"rule_id: %d\n", i);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"num_pkts: %d\n",
+				out.num_pkts);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"num_pkts_hash: %d\n",
+				out.num_pkts_hash);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"\n");
+		}
+	}
+
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_debugfs_reset_drop_stats(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	s8 client = 0;
+	int ret;
+
+	mutex_lock(&ipa3_ctx->lock);
+	if (sizeof(dbg_buff) < count + 1) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &client)) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	if (client == -1)
+		ipa_reset_all_drop_stats();
+	else
+		ipa_reset_drop_stats(client);
+
+	ret = count;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return count;
+}
+
+static ssize_t ipa_debugfs_print_drop_stats(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+	struct ipa_drop_stats_all *out;
+	int i;
+	int res;
+
+	out = kzalloc(sizeof(*out), GFP_KERNEL);
+	if (!out)
+		return -ENOMEM;
+
+	mutex_lock(&ipa3_ctx->lock);
+	res = ipa_get_drop_stats(out);
+	if (res) {
+		mutex_unlock(&ipa3_ctx->lock);
+		kfree(out);
+		return res;
+	}
+
+	for (i = 0; i < IPA_CLIENT_MAX; i++) {
+		int ep_idx = ipa3_get_ep_mapping(i);
+
+		if (ep_idx == -1)
+			continue;
+
+		if (!IPA_CLIENT_IS_CONS(i))
+			continue;
+
+		if (IPA_CLIENT_IS_TEST(i))
+			continue;
+
+		if (!(ipa3_ctx->hw_stats.drop.init.enabled_bitmask &
+			(1 << ep_idx)))
+			continue;
+
+
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"%s:\n",
+			ipa_clients_strings[i]);
+
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"drop_byte_cnt=%u\n",
+			out->client[i].drop_byte_cnt);
+
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"drop_packet_cnt=%u\n",
+			out->client[i].drop_packet_cnt);
+		nbytes += scnprintf(dbg_buff + nbytes,
+			IPA_MAX_MSG_LEN - nbytes,
+			"\n");
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+	kfree(out);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_debugfs_control_flt_v4_stats(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	return ipa_debugfs_control_flt_rt_stats(IPA_IP_v4, true, file, ubuf,
+		count, ppos);
+}
+
+static ssize_t ipa_debugfs_control_flt_v6_stats(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	return ipa_debugfs_control_flt_rt_stats(IPA_IP_v6, true, file, ubuf,
+		count, ppos);
+}
+
+static ssize_t ipa_debugfs_control_rt_v4_stats(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	return ipa_debugfs_control_flt_rt_stats(IPA_IP_v4, false, file, ubuf,
+		count, ppos);
+}
+
+static ssize_t ipa_debugfs_control_rt_v6_stats(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	return ipa_debugfs_control_flt_rt_stats(IPA_IP_v6, false, file, ubuf,
+		count, ppos);
+}
+
+static ssize_t ipa_debugfs_print_flt_v4_stats(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	return ipa_debugfs_print_flt_rt_stats(IPA_IP_v4, true, file, ubuf,
+		count, ppos);
+}
+
+static ssize_t ipa_debugfs_print_flt_v6_stats(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	return ipa_debugfs_print_flt_rt_stats(IPA_IP_v6, true, file, ubuf,
+		count, ppos);
+}
+
+static ssize_t ipa_debugfs_print_rt_v4_stats(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	return ipa_debugfs_print_flt_rt_stats(IPA_IP_v4, false, file, ubuf,
+		count, ppos);
+}
+
+static ssize_t ipa_debugfs_print_rt_v6_stats(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	return ipa_debugfs_print_flt_rt_stats(IPA_IP_v6, false, file, ubuf,
+		count, ppos);
+}
+
+static const struct file_operations ipa3_quota_ops = {
+	.read = ipa_debugfs_print_quota_stats,
+	.write = ipa_debugfs_reset_quota_stats,
+};
+
+static const struct file_operations ipa3_tethering_ops = {
+	.read = ipa_debugfs_print_tethering_stats,
+	.write = ipa_debugfs_reset_tethering_stats,
+};
+
+static const struct file_operations ipa3_flt_v4_ops = {
+	.read = ipa_debugfs_print_flt_v4_stats,
+	.write = ipa_debugfs_control_flt_v4_stats,
+};
+
+static const struct file_operations ipa3_flt_v6_ops = {
+	.read = ipa_debugfs_print_flt_v6_stats,
+	.write = ipa_debugfs_control_flt_v6_stats,
+};
+
+static const struct file_operations ipa3_rt_v4_ops = {
+	.read = ipa_debugfs_print_rt_v4_stats,
+	.write = ipa_debugfs_control_rt_v4_stats,
+};
+
+static const struct file_operations ipa3_rt_v6_ops = {
+	.read = ipa_debugfs_print_rt_v6_stats,
+	.write = ipa_debugfs_control_rt_v6_stats,
+};
+
+static const struct file_operations ipa3_drop_ops = {
+	.read = ipa_debugfs_print_drop_stats,
+	.write = ipa_debugfs_reset_drop_stats,
+};
+
+
+int ipa_debugfs_init_stats(struct dentry *parent)
+{
+	const mode_t read_write_mode = 0664;
+	struct dentry *file;
+	struct dentry *dent;
+
+	if (!ipa3_ctx->hw_stats.enabled)
+		return 0;
+
+	dent = debugfs_create_dir("hw_stats", parent);
+	if (IS_ERR_OR_NULL(dent)) {
+		IPAERR("fail to create folder in debug_fs\n");
+		return -EFAULT;
+	}
+
+	file = debugfs_create_file("quota", read_write_mode, dent, NULL,
+		&ipa3_quota_ops);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file %s\n", "quota");
+		goto fail;
+	}
+
+	file = debugfs_create_file("drop", read_write_mode, dent, NULL,
+		&ipa3_drop_ops);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file %s\n", "drop");
+		goto fail;
+	}
+
+	file = debugfs_create_file("tethering", read_write_mode, dent, NULL,
+		&ipa3_tethering_ops);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file %s\n", "tethering");
+		goto fail;
+	}
+
+	file = debugfs_create_file("flt_v4", read_write_mode, dent, NULL,
+		&ipa3_flt_v4_ops);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file %s\n", "flt_v4");
+		goto fail;
+	}
+
+	file = debugfs_create_file("flt_v6", read_write_mode, dent, NULL,
+		&ipa3_flt_v6_ops);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file %s\n", "flt_v6");
+		goto fail;
+	}
+
+	file = debugfs_create_file("rt_v4", read_write_mode, dent, NULL,
+		&ipa3_rt_v4_ops);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file %s\n", "rt_v4");
+		goto fail;
+	}
+
+	file = debugfs_create_file("rt_v6", read_write_mode, dent, NULL,
+		&ipa3_rt_v6_ops);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file %s\n", "rt_v6");
+		goto fail;
+	}
+
+	return 0;
+fail:
+	debugfs_remove_recursive(dent);
+	return -EFAULT;
+}
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 1bed1c8..96a022d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -33,6 +33,7 @@
 #include "ipahal/ipahal_reg.h"
 #include "ipahal/ipahal.h"
 #include "ipahal/ipahal_fltrt.h"
+#include "ipahal/ipahal_hw_stats.h"
 #include "../ipa_common_i.h"
 #include "ipa_uc_offload_i.h"
 
@@ -100,6 +101,18 @@
 		} \
 	} while (0)
 
+#define IPAERR_RL(fmt, args...) \
+	do { \
+		pr_err_ratelimited(DRV_NAME " %s:%d " fmt, __func__,\
+		__LINE__, ## args);\
+		if (ipa3_ctx) { \
+			IPA_IPC_LOGGING(ipa3_ctx->logbuf, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+			IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
 #define WLAN_AMPDU_TX_EP 15
 #define WLAN_PROD_TX_EP  19
 #define WLAN1_CONS_RX_EP  14
@@ -175,6 +188,7 @@
 };
 
 struct ipa3_active_clients_log_ctx {
+	spinlock_t lock;
 	char *log_buffer[IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES];
 	int log_head;
 	int log_tail;
@@ -238,7 +252,7 @@
  * @curr_mem: current routing tables block in sys memory
  * @prev_mem: previous routing table block in sys memory
  * @id: routing table id
- * @rule_ids: idr structure that holds the rule_id for each rule
+ * @rule_ids: common idr structure that holds the rule_id for each rule
  */
 struct ipa3_rt_tbl {
 	struct list_head link;
@@ -254,7 +268,7 @@
 	struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX];
 	struct ipa_mem_buffer prev_mem[IPA_RULE_TYPE_MAX];
 	int id;
-	struct idr rule_ids;
+	struct idr *rule_ids;
 };
 
 /**
@@ -376,7 +390,7 @@
  * @end: the last header index
  * @curr_mem: current filter tables block in sys memory
  * @prev_mem: previous filter table block in sys memory
- * @rule_ids: idr structure that holds the rule_id for each rule
+ * @rule_ids: common idr structure that holds the rule_id for each rule
  */
 struct ipa3_flt_tbl {
 	struct list_head head_flt_rule_list;
@@ -386,7 +400,7 @@
 	struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX];
 	struct ipa_mem_buffer prev_mem[IPA_RULE_TYPE_MAX];
 	bool sticky_rear;
-	struct idr rule_ids;
+	struct idr *rule_ids;
 };
 
 /**
@@ -420,10 +434,12 @@
  * struct ipa3_rt_tbl_set - collection of routing tables
  * @head_rt_tbl_list: collection of routing tables
  * @tbl_cnt: number of routing tables
+ * @rule_ids: idr structure that holds the rule_id for each rule
  */
 struct ipa3_rt_tbl_set {
 	struct list_head head_rt_tbl_list;
 	u32 tbl_cnt;
+	struct idr rule_ids;
 };
 
 /**
@@ -1022,6 +1038,56 @@
 	struct ipahal_imm_cmd_pyld *cmd_pyld;
 };
 
+struct ipa_quota_stats {
+	u64 num_ipv4_bytes;
+	u64 num_ipv6_bytes;
+	u32 num_ipv4_pkts;
+	u32 num_ipv6_pkts;
+};
+
+struct ipa_quota_stats_all {
+	struct ipa_quota_stats client[IPA_CLIENT_MAX];
+};
+
+struct ipa_drop_stats {
+	u32 drop_packet_cnt;
+	u32 drop_byte_cnt;
+};
+
+struct ipa_drop_stats_all {
+	struct ipa_drop_stats client[IPA_CLIENT_MAX];
+};
+
+struct ipa_hw_stats_quota {
+	struct ipahal_stats_init_quota init;
+	struct ipa_quota_stats_all stats;
+};
+
+struct ipa_hw_stats_teth {
+	struct ipahal_stats_init_tethering init;
+	struct ipa_quota_stats_all prod_stats[IPA_CLIENT_MAX];
+};
+
+struct ipa_hw_stats_flt_rt {
+	struct ipahal_stats_init_flt_rt flt_v4_init;
+	struct ipahal_stats_init_flt_rt flt_v6_init;
+	struct ipahal_stats_init_flt_rt rt_v4_init;
+	struct ipahal_stats_init_flt_rt rt_v6_init;
+};
+
+struct ipa_hw_stats_drop {
+	struct ipahal_stats_init_drop init;
+	struct ipa_drop_stats_all stats;
+};
+
+struct ipa_hw_stats {
+	bool enabled;
+	struct ipa_hw_stats_quota quota;
+	struct ipa_hw_stats_teth teth;
+	struct ipa_hw_stats_flt_rt flt_rt;
+	struct ipa_hw_stats_drop drop;
+};
+
 /**
  * struct ipa3_context - IPA context
  * @class: pointer to the struct class
@@ -1035,6 +1101,7 @@
  * @ep_flt_num: End-points supporting filtering number
  * @resume_on_connect: resume ep on ipa connect
  * @flt_tbl: list of all IPA filter tables
+ * @flt_rule_ids: idr structure that holds the rule_id for each rule
  * @mode: IPA operating mode
  * @mmio: iomem
  * @ipa_wrapper_base: IPA wrapper base address
@@ -1120,6 +1187,7 @@
 	u32 ep_flt_num;
 	bool resume_on_connect[IPA_CLIENT_MAX];
 	struct ipa3_flt_tbl flt_tbl[IPA3_MAX_NUM_PIPES][IPA_IP_MAX];
+	struct idr flt_rule_ids[IPA_IP_MAX];
 	void __iomem *mmio;
 	u32 ipa_wrapper_base;
 	u32 ipa_wrapper_size;
@@ -1234,6 +1302,7 @@
 	u32 ipa_tz_unlock_reg_num;
 	struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
 	struct ipa_dma_task_info dma_task_info;
+	struct ipa_hw_stats hw_stats;
 };
 
 struct ipa3_plat_drv_res {
@@ -1346,6 +1415,48 @@
  * +-------------------------+
  * |    CANARY               |
  * +-------------------------+
+ * | QUOTA STATS             |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | TETH STATS              |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V4 FLT STATS            |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V6 FLT STATS            |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V4 RT STATS             |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V6 RT STATS             |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | DROP STATS              |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
  * |  MODEM MEM              |
  * +-------------------------+
  * |    CANARY               |
@@ -1428,6 +1539,20 @@
 	u32 uc_event_ring_size;
 	u32 pdn_config_ofst;
 	u32 pdn_config_size;
+	u32 stats_quota_ofst;
+	u32 stats_quota_size;
+	u32 stats_tethering_ofst;
+	u32 stats_tethering_size;
+	u32 stats_flt_v4_ofst;
+	u32 stats_flt_v4_size;
+	u32 stats_flt_v6_ofst;
+	u32 stats_flt_v6_size;
+	u32 stats_rt_v4_ofst;
+	u32 stats_rt_v4_size;
+	u32 stats_rt_v6_ofst;
+	u32 stats_rt_v6_size;
+	u32 stats_drop_ofst;
+	u32 stats_drop_size;
 };
 
 struct ipa3_controller {
@@ -1875,10 +2000,6 @@
 				void *private_data,
 				void *interrupt_data);
 
-
-int ipa_bridge_init(void);
-void ipa_bridge_cleanup(void);
-
 ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count,
 		 loff_t *f_pos);
 int ipa3_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count);
@@ -1978,6 +2099,65 @@
 	(enum ipa_client_type client);
 void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val);
 
+/* Hardware stats */
+
+#define IPA_STATS_MAX_PIPE_BIT 32
+
+struct ipa_teth_stats_endpoints {
+	u32 prod_mask;
+	u32 dst_ep_mask[IPA_STATS_MAX_PIPE_BIT];
+};
+
+struct ipa_flt_rt_stats {
+	u32 num_pkts;
+	u32 num_pkts_hash;
+};
+
+int ipa_hw_stats_init(void);
+
+int ipa_debugfs_init_stats(struct dentry *parent);
+
+int ipa_init_quota_stats(u32 pipe_bitmask);
+
+int ipa_get_quota_stats(struct ipa_quota_stats_all *out);
+
+int ipa_reset_quota_stats(enum ipa_client_type client);
+
+int ipa_reset_all_quota_stats(void);
+
+int ipa_init_drop_stats(u32 pipe_bitmask);
+
+int ipa_get_drop_stats(struct ipa_drop_stats_all *out);
+
+int ipa_reset_drop_stats(enum ipa_client_type client);
+
+int ipa_reset_all_drop_stats(void);
+
+int ipa_init_teth_stats(struct ipa_teth_stats_endpoints *in);
+
+int ipa_get_teth_stats(enum ipa_client_type prod,
+	struct ipa_quota_stats_all *out);
+
+int ipa_reset_teth_stats(enum ipa_client_type prod, enum ipa_client_type cons);
+
+int ipa_reset_all_cons_teth_stats(enum ipa_client_type prod);
+
+int ipa_reset_all_teth_stats(void);
+
+int ipa_flt_rt_stats_add_rule_id(enum ipa_ip_type ip, bool filtering,
+	u16 rule_id);
+
+int ipa_flt_rt_stats_start(enum ipa_ip_type ip, bool filtering);
+
+int ipa_flt_rt_stats_clear_rule_ids(enum ipa_ip_type ip, bool filtering);
+
+int ipa_get_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id,
+	struct ipa_flt_rt_stats *out);
+
+int ipa_reset_flt_rt_stats(enum ipa_ip_type ip, bool filtering, u16 rule_id);
+
+int ipa_reset_all_flt_rt_stats(enum ipa_ip_type ip, bool filtering);
+
 u32 ipa3_get_num_pipes(void);
 struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(void);
 struct ipa_smmu_cb_ctx *ipa3_get_wlan_smmu_ctx(void);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
index 38e8d4e..2bd7b79 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
@@ -227,7 +227,7 @@
 
 	if (strnlen(lookup->name, IPA_RESOURCE_NAME_MAX) ==
 			IPA_RESOURCE_NAME_MAX) {
-		IPAERR("Interface name too long. (%s)\n", lookup->name);
+		IPAERR_RL("Interface name too long. (%s)\n", lookup->name);
 		return result;
 	}
 
@@ -268,7 +268,7 @@
 	}
 
 	if (strnlen(tx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
-		IPAERR("Interface name too long. (%s)\n", tx->name);
+		IPAERR_RL("Interface name too long. (%s)\n", tx->name);
 		return result;
 	}
 
@@ -315,7 +315,7 @@
 	}
 
 	if (strnlen(rx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
-		IPAERR("Interface name too long. (%s)\n", rx->name);
+		IPAERR_RL("Interface name too long. (%s)\n", rx->name);
 		return result;
 	}
 
@@ -410,13 +410,13 @@
 
 	if (meta == NULL || (buff == NULL && callback != NULL) ||
 	    (buff != NULL && callback == NULL)) {
-		IPAERR("invalid param meta=%p buff=%p, callback=%p\n",
+		IPAERR_RL("invalid param meta=%p buff=%p, callback=%p\n",
 		       meta, buff, callback);
 		return -EINVAL;
 	}
 
 	if (meta->msg_type >= IPA_EVENT_MAX_NUM) {
-		IPAERR("unsupported message type %d\n", meta->msg_type);
+		IPAERR_RL("unsupported message type %d\n", meta->msg_type);
 		return -EINVAL;
 	}
 
@@ -640,7 +640,7 @@
 	int result = -EINVAL;
 
 	if (meta == NULL || buff == NULL || !count) {
-		IPAERR("invalid param name=%p buff=%p count=%zu\n",
+		IPAERR_RL("invalid param name=%p buff=%p count=%zu\n",
 				meta, buff, count);
 		return result;
 	}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index a153f2d..958fc6c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -253,8 +253,8 @@
 
 	mutex_lock(&nat_ctx->lock);
 	if (strcmp(mem->dev_name, NAT_DEV_NAME)) {
-		IPAERR("Nat device name mismatch\n");
-		IPAERR("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
+		IPAERR_RL("Nat device name mismatch\n");
+		IPAERR_RL("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
 		result = -EPERM;
 		goto bail;
 	}
@@ -273,7 +273,7 @@
 
 	if (mem->size <= 0 ||
 			nat_ctx->is_dev_init == true) {
-		IPAERR("Invalid Parameters or device is already init\n");
+		IPAERR_RL("Invalid Parameters or device is already init\n");
 		result = -EPERM;
 		goto bail;
 	}
@@ -371,7 +371,7 @@
 	/* check for integer overflow */
 	if (init->ipv4_rules_offset >
 		UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1))) {
-		IPAERR("Detected overflow\n");
+		IPAERR_RL("Detected overflow\n");
 		return -EPERM;
 	}
 	/* Check Table Entry offset is not
@@ -380,8 +380,8 @@
 	tmp = init->ipv4_rules_offset +
 		(TBL_ENTRY_SIZE * (init->table_entries + 1));
 	if (tmp > ipa3_ctx->nat_mem.size) {
-		IPAERR("Table rules offset not valid\n");
-		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+		IPAERR_RL("Table rules offset not valid\n");
+		IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
 			init->ipv4_rules_offset, (init->table_entries + 1),
 			tmp, ipa3_ctx->nat_mem.size);
 		return -EPERM;
@@ -389,8 +389,8 @@
 
 	/* check for integer overflow */
 	if (init->expn_rules_offset >
-		UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries)) {
-		IPAERR("Detected overflow\n");
+		(UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries))) {
+		IPAERR_RL("Detected overflow\n");
 		return -EPERM;
 	}
 	/* Check Expn Table Entry offset is not
@@ -399,8 +399,8 @@
 	tmp = init->expn_rules_offset +
 		(TBL_ENTRY_SIZE * init->expn_table_entries);
 	if (tmp > ipa3_ctx->nat_mem.size) {
-		IPAERR("Expn Table rules offset not valid\n");
-		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+		IPAERR_RL("Expn Table rules offset not valid\n");
+		IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
 			init->expn_rules_offset, init->expn_table_entries,
 			tmp, ipa3_ctx->nat_mem.size);
 		return -EPERM;
@@ -409,7 +409,7 @@
 	/* check for integer overflow */
 	if (init->index_offset >
 		UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) {
-		IPAERR("Detected overflow\n");
+		IPAERR_RL("Detected overflow\n");
 		return -EPERM;
 	}
 	/* Check Indx Table Entry offset is not
@@ -418,8 +418,8 @@
 	tmp = init->index_offset +
 		(INDX_TBL_ENTRY_SIZE * (init->table_entries + 1));
 	if (tmp > ipa3_ctx->nat_mem.size) {
-		IPAERR("Indx Table rules offset not valid\n");
-		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+		IPAERR_RL("Indx Table rules offset not valid\n");
+		IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
 			init->index_offset, (init->table_entries + 1),
 			tmp, ipa3_ctx->nat_mem.size);
 		return -EPERM;
@@ -428,7 +428,7 @@
 	/* check for integer overflow */
 	if (init->index_expn_offset >
 		UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries)) {
-		IPAERR("Detected overflow\n");
+		IPAERR_RL("Detected overflow\n");
 		return -EPERM;
 	}
 	/* Check Expn Table entry offset is not
@@ -437,8 +437,8 @@
 	tmp = init->index_expn_offset +
 		(INDX_TBL_ENTRY_SIZE * init->expn_table_entries);
 	if (tmp > ipa3_ctx->nat_mem.size) {
-		IPAERR("Indx Expn Table rules offset not valid\n");
-		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+		IPAERR_RL("Indx Expn Table rules offset not valid\n");
+		IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
 			init->index_expn_offset, init->expn_table_entries,
 			tmp, ipa3_ctx->nat_mem.size);
 		return -EPERM;
@@ -476,16 +476,16 @@
 				(init->expn_rules_offset > offset) ||
 				(init->index_offset > offset) ||
 				(init->index_expn_offset > offset)) {
-			IPAERR("Failed due to integer overflow\n");
-			IPAERR("nat.mem.dma_handle: 0x%pa\n",
+			IPAERR_RL("Failed due to integer overflow\n");
+			IPAERR_RL("nat.mem.dma_handle: 0x%pa\n",
 				&ipa3_ctx->nat_mem.dma_handle);
-			IPAERR("ipv4_rules_offset: 0x%x\n",
+			IPAERR_RL("ipv4_rules_offset: 0x%x\n",
 				init->ipv4_rules_offset);
-			IPAERR("expn_rules_offset: 0x%x\n",
+			IPAERR_RL("expn_rules_offset: 0x%x\n",
 				init->expn_rules_offset);
-			IPAERR("index_offset: 0x%x\n",
+			IPAERR_RL("index_offset: 0x%x\n",
 				init->index_offset);
-			IPAERR("index_expn_offset: 0x%x\n",
+			IPAERR_RL("index_expn_offset: 0x%x\n",
 				init->index_expn_offset);
 			result = -EPERM;
 			goto destroy_imm_cmd;
@@ -544,7 +544,7 @@
 	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
 		IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
 	if (!cmd_pyld[num_cmd]) {
-		IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
+		IPAERR_RL("Fail to construct ip_v4_nat_init imm cmd\n");
 		result = -EPERM;
 		goto destroy_imm_cmd;
 	}
@@ -747,7 +747,7 @@
 
 	IPADBG("\n");
 	if (dma->entries <= 0) {
-		IPAERR("Invalid number of commands %d\n",
+		IPAERR_RL("Invalid number of commands %d\n",
 			dma->entries);
 		ret = -EPERM;
 		goto bail;
@@ -755,7 +755,7 @@
 
 	for (cnt = 0; cnt < dma->entries; cnt++) {
 		if (dma->dma[cnt].table_index >= 1) {
-			IPAERR("Invalid table index %d\n",
+			IPAERR_RL("Invalid table index %d\n",
 				dma->dma[cnt].table_index);
 			ret = -EPERM;
 			goto bail;
@@ -766,7 +766,7 @@
 			if (dma->dma[cnt].offset >=
 				(ipa3_ctx->nat_mem.size_base_tables + 1) *
 				NAT_TABLE_ENTRY_SIZE_BYTE) {
-				IPAERR("Invalid offset %d\n",
+				IPAERR_RL("Invalid offset %d\n",
 					dma->dma[cnt].offset);
 				ret = -EPERM;
 				goto bail;
@@ -778,7 +778,7 @@
 			if (dma->dma[cnt].offset >=
 				ipa3_ctx->nat_mem.size_expansion_tables *
 				NAT_TABLE_ENTRY_SIZE_BYTE) {
-				IPAERR("Invalid offset %d\n",
+				IPAERR_RL("Invalid offset %d\n",
 					dma->dma[cnt].offset);
 				ret = -EPERM;
 				goto bail;
@@ -790,7 +790,7 @@
 			if (dma->dma[cnt].offset >=
 				(ipa3_ctx->nat_mem.size_base_tables + 1) *
 				NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
-				IPAERR("Invalid offset %d\n",
+				IPAERR_RL("Invalid offset %d\n",
 					dma->dma[cnt].offset);
 				ret = -EPERM;
 				goto bail;
@@ -802,7 +802,7 @@
 			if (dma->dma[cnt].offset >=
 				ipa3_ctx->nat_mem.size_expansion_tables *
 				NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
-				IPAERR("Invalid offset %d\n",
+				IPAERR_RL("Invalid offset %d\n",
 					dma->dma[cnt].offset);
 				ret = -EPERM;
 				goto bail;
@@ -811,7 +811,7 @@
 			break;
 
 		default:
-			IPAERR("Invalid base_addr %d\n",
+			IPAERR_RL("Invalid base_addr %d\n",
 				dma->dma[cnt].base_addr);
 			ret = -EPERM;
 			goto bail;
@@ -853,7 +853,7 @@
 		cmd.data = dma->dma[cnt].data;
 		cmd_pyld = ipahal_construct_imm_cmd(cmd_name, &cmd, false);
 		if (!cmd_pyld) {
-			IPAERR("Fail to construct nat_dma imm cmd\n");
+			IPAERR_RL("Fail to construct nat_dma imm cmd\n");
 			continue;
 		}
 		desc[1].type = IPA_IMM_CMD_DESC;
@@ -1016,7 +1016,7 @@
 	cmd_pyld = ipahal_construct_imm_cmd(
 		IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
 	if (!cmd_pyld) {
-		IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
+		IPAERR_RL("Fail to construct ip_v4_nat_init imm cmd\n");
 		result = -EPERM;
 		goto destroy_regwrt_imm_cmd;
 	}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index 234b945..d648331 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -694,7 +694,7 @@
 	struct ipa3_rt_tbl_set *set;
 
 	if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
-		IPAERR("Name too long: %s\n", name);
+		IPAERR_RL("Name too long: %s\n", name);
 		return NULL;
 	}
 
@@ -720,7 +720,7 @@
 	struct ipa3_rt_tbl *entry;
 
 	if (in->ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -746,7 +746,7 @@
 	int max_tbl_indx;
 
 	if (name == NULL) {
-		IPAERR("no tbl name\n");
+		IPAERR_RL("no tbl name\n");
 		goto error;
 	}
 
@@ -759,7 +759,7 @@
 			max(IPA_MEM_PART(v6_modem_rt_index_hi),
 			IPA_MEM_PART(v6_apps_rt_index_hi));
 	} else {
-		IPAERR("bad ip family type\n");
+		IPAERR_RL("bad ip family type\n");
 		goto error;
 	}
 
@@ -801,7 +801,7 @@
 			!ipa3_ctx->ip4_rt_tbl_nhash_lcl :
 			!ipa3_ctx->ip6_rt_tbl_nhash_lcl;
 		set->tbl_cnt++;
-		idr_init(&entry->rule_ids);
+		entry->rule_ids = &set->rule_ids;
 		list_add(&entry->link, &set->head_rt_tbl_list);
 
 		IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx,
@@ -820,7 +820,7 @@
 ipa_insert_failed:
 	set->tbl_cnt--;
 	list_del(&entry->link);
-	idr_destroy(&entry->rule_ids);
+	idr_destroy(entry->rule_ids);
 fail_rt_idx_alloc:
 	entry->cookie = 0;
 	kmem_cache_free(ipa3_ctx->rt_tbl_cache, entry);
@@ -835,12 +835,12 @@
 	struct ipa3_rt_tbl_set *rset;
 
 	if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) {
-		IPAERR("bad parms\n");
+		IPAERR_RL("bad parms\n");
 		return -EINVAL;
 	}
 	id = entry->id;
 	if (ipa3_id_find(id) == NULL) {
-		IPAERR("lookup failed\n");
+		IPAERR_RL("lookup failed\n");
 		return -EPERM;
 	}
 
@@ -855,7 +855,7 @@
 
 	rset = &ipa3_ctx->reap_rt_tbl_set[ip];
 
-	idr_destroy(&entry->rule_ids);
+	entry->rule_ids = NULL;
 	if (entry->in_sys[IPA_RULE_HASHABLE] ||
 		entry->in_sys[IPA_RULE_NON_HASHABLE]) {
 		list_move(&entry->link, &rset->head_rt_tbl_list);
@@ -923,7 +923,7 @@
 	(*(entry))->tbl = tbl;
 	(*(entry))->hdr = hdr;
 	(*(entry))->proc_ctx = proc_ctx;
-	id = ipa3_alloc_rule_id(&tbl->rule_ids);
+	id = ipa3_alloc_rule_id(tbl->rule_ids);
 	if (id < 0) {
 		IPAERR("failed to allocate rule id\n");
 		WARN_ON(1);
@@ -967,7 +967,7 @@
 		entry->hdr->ref_cnt--;
 	else if (entry->proc_ctx)
 		entry->proc_ctx->ref_cnt--;
-	idr_remove(&tbl->rule_ids, entry->rule_id);
+	idr_remove(tbl->rule_ids, entry->rule_id);
 	list_del(&entry->link);
 	kmem_cache_free(ipa3_ctx->rt_rule_cache, entry);
 	return -EPERM;
@@ -987,7 +987,7 @@
 
 	tbl = __ipa_add_rt_tbl(ip, name);
 	if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
-		IPAERR("failed adding rt tbl name = %s\n",
+		IPAERR_RL("failed adding rt tbl name = %s\n",
 			name ? name : "");
 		goto error;
 	}
@@ -997,8 +997,8 @@
 	 */
 	if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
 	    (tbl->rule_cnt > 0) && (at_rear != 0)) {
-		IPAERR("cannot add rule at end of tbl rule_cnt=%d at_rear=%d\n",
-		       tbl->rule_cnt, at_rear);
+		IPAERR_RL("cannot add rule at end of tbl rule_cnt=%d at_rear=%d"
+				, tbl->rule_cnt, at_rear);
 		goto error;
 	}
 
@@ -1068,7 +1068,7 @@
 	int ret;
 
 	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -1078,7 +1078,7 @@
 					&rules->rules[i].rule,
 					rules->rules[i].at_rear,
 					&rules->rules[i].rt_rule_hdl)) {
-			IPAERR("failed to add rt rule %d\n", i);
+			IPAERR_RL("failed to add rt rule %d\n", i);
 			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
 		} else {
 			rules->rules[i].status = 0;
@@ -1114,7 +1114,7 @@
 	struct ipa3_rt_entry *entry = NULL;
 
 	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -1122,28 +1122,28 @@
 
 	tbl = __ipa3_find_rt_tbl(rules->ip, rules->rt_tbl_name);
 	if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
-		IPAERR("failed finding rt tbl name = %s\n",
+		IPAERR_RL("failed finding rt tbl name = %s\n",
 			rules->rt_tbl_name ? rules->rt_tbl_name : "");
 		ret = -EINVAL;
 		goto bail;
 	}
 
 	if (tbl->rule_cnt <= 0) {
-		IPAERR("tbl->rule_cnt <= 0");
+		IPAERR_RL("tbl->rule_cnt <= 0");
 		ret = -EINVAL;
 		goto bail;
 	}
 
 	entry = ipa3_id_find(rules->add_after_hdl);
 	if (!entry) {
-		IPAERR("failed finding rule %d in rt tbls\n",
+		IPAERR_RL("failed finding rule %d in rt tbls\n",
 			rules->add_after_hdl);
 		ret = -EINVAL;
 		goto bail;
 	}
 
 	if (entry->tbl != tbl) {
-		IPAERR("given rt rule does not match the table\n");
+		IPAERR_RL("given rt rule does not match the table\n");
 		ret = -EINVAL;
 		goto bail;
 	}
@@ -1154,7 +1154,7 @@
 	 */
 	if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
 			(&entry->link == tbl->head_rt_rule_list.prev)) {
-		IPAERR("cannot add rule at end of tbl rule_cnt=%d\n",
+		IPAERR_RL("cannot add rule at end of tbl rule_cnt=%d\n",
 			tbl->rule_cnt);
 		ret = -EINVAL;
 		goto bail;
@@ -1171,7 +1171,7 @@
 					&rules->rules[i].rule,
 					&rules->rules[i].rt_rule_hdl,
 					&entry)) {
-			IPAERR("failed to add rt rule %d\n", i);
+			IPAERR_RL("failed to add rt rule %d\n", i);
 			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
 		} else {
 			rules->rules[i].status = 0;
@@ -1180,7 +1180,7 @@
 
 	if (rules->commit)
 		if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
-			IPAERR("failed to commit\n");
+			IPAERR_RL("failed to commit\n");
 			ret = -EPERM;
 			goto bail;
 		}
@@ -1201,12 +1201,12 @@
 	entry = ipa3_id_find(rule_hdl);
 
 	if (entry == NULL) {
-		IPAERR("lookup failed\n");
+		IPAERR_RL("lookup failed\n");
 		return -EINVAL;
 	}
 
 	if (entry->cookie != IPA_RT_RULE_COOKIE) {
-		IPAERR("bad params\n");
+		IPAERR_RL("bad params\n");
 		return -EINVAL;
 	}
 
@@ -1219,10 +1219,10 @@
 	IPADBG("del rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n ref_cnt=%u",
 		entry->tbl->idx, entry->tbl->rule_cnt,
 		entry->rule_id, entry->tbl->ref_cnt);
-	idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+	idr_remove(entry->tbl->rule_ids, entry->rule_id);
 	if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
 		if (__ipa_del_rt_tbl(entry->tbl))
-			IPAERR("fail to del RT tbl\n");
+			IPAERR_RL("fail to del RT tbl\n");
 	}
 	entry->cookie = 0;
 	id = entry->id;
@@ -1249,14 +1249,14 @@
 	int ret;
 
 	if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
 	mutex_lock(&ipa3_ctx->lock);
 	for (i = 0; i < hdls->num_hdls; i++) {
 		if (__ipa3_del_rt_rule(hdls->hdl[i].hdl)) {
-			IPAERR("failed to del rt rule %i\n", i);
+			IPAERR_RL("failed to del rt rule %i\n", i);
 			hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
 		} else {
 			hdls->hdl[i].status = 0;
@@ -1289,7 +1289,7 @@
 	int ret;
 
 	if (ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -1333,7 +1333,7 @@
 	int id;
 
 	if (ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
@@ -1349,7 +1349,7 @@
 	 * filtering rules point to routing tables
 	 */
 	if (ipa3_reset_flt(ip))
-		IPAERR("fail to reset flt ip=%d\n", ip);
+		IPAERR_RL("fail to reset flt ip=%d\n", ip);
 
 	set = &ipa3_ctx->rt_tbl_set[ip];
 	rset = &ipa3_ctx->reap_rt_tbl_set[ip];
@@ -1378,7 +1378,7 @@
 			else if (rule->proc_ctx)
 				__ipa3_release_hdr_proc_ctx(rule->proc_ctx->id);
 			rule->cookie = 0;
-			idr_remove(&tbl->rule_ids, rule->rule_id);
+			idr_remove(tbl->rule_ids, rule->rule_id);
 			id = rule->id;
 			kmem_cache_free(ipa3_ctx->rt_rule_cache, rule);
 
@@ -1395,7 +1395,7 @@
 
 		/* do not remove the "default" routing tbl which has index 0 */
 		if (tbl->idx != apps_start_idx) {
-			idr_destroy(&tbl->rule_ids);
+			tbl->rule_ids = NULL;
 			if (tbl->in_sys[IPA_RULE_HASHABLE] ||
 				tbl->in_sys[IPA_RULE_NON_HASHABLE]) {
 				list_move(&tbl->link, &rset->head_rt_tbl_list);
@@ -1438,7 +1438,7 @@
 	int result = -EFAULT;
 
 	if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 	mutex_lock(&ipa3_ctx->lock);
@@ -1449,7 +1449,7 @@
 
 		/* commit for get */
 		if (ipa3_ctx->ctrl->ipa3_commit_rt(lookup->ip))
-			IPAERR("fail to commit RT tbl\n");
+			IPAERR_RL("fail to commit RT tbl\n");
 
 		result = 0;
 	}
@@ -1475,13 +1475,13 @@
 	mutex_lock(&ipa3_ctx->lock);
 	entry = ipa3_id_find(rt_tbl_hdl);
 	if (entry == NULL) {
-		IPAERR("lookup failed\n");
+		IPAERR_RL("lookup failed\n");
 		result = -EINVAL;
 		goto ret;
 	}
 
 	if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) {
-		IPAERR("bad parms\n");
+		IPAERR_RL("bad parms\n");
 		result = -EINVAL;
 		goto ret;
 	}
@@ -1501,10 +1501,10 @@
 		IPADBG("zero ref_cnt, delete rt tbl (idx=%u)\n",
 			entry->idx);
 		if (__ipa_del_rt_tbl(entry))
-			IPAERR("fail to del RT tbl\n");
+			IPAERR_RL("fail to del RT tbl\n");
 		/* commit for put */
 		if (ipa3_ctx->ctrl->ipa3_commit_rt(ip))
-			IPAERR("fail to commit RT tbl\n");
+			IPAERR_RL("fail to commit RT tbl\n");
 	}
 
 	result = 0;
@@ -1525,26 +1525,26 @@
 	if (rtrule->rule.hdr_hdl) {
 		hdr = ipa3_id_find(rtrule->rule.hdr_hdl);
 		if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
-			IPAERR("rt rule does not point to valid hdr\n");
+			IPAERR_RL("rt rule does not point to valid hdr\n");
 			goto error;
 		}
 	} else if (rtrule->rule.hdr_proc_ctx_hdl) {
 		proc_ctx = ipa3_id_find(rtrule->rule.hdr_proc_ctx_hdl);
 		if ((proc_ctx == NULL) ||
 			(proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) {
-			IPAERR("rt rule does not point to valid proc ctx\n");
+			IPAERR_RL("rt rule does not point to valid proc ctx\n");
 			goto error;
 		}
 	}
 
 	entry = ipa3_id_find(rtrule->rt_rule_hdl);
 	if (entry == NULL) {
-		IPAERR("lookup failed\n");
+		IPAERR_RL("lookup failed\n");
 		goto error;
 	}
 
 	if (entry->cookie != IPA_RT_RULE_COOKIE) {
-		IPAERR("bad params\n");
+		IPAERR_RL("bad params\n");
 		goto error;
 	}
 
@@ -1585,14 +1585,14 @@
 	int result;
 
 	if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
-		IPAERR("bad parm\n");
+		IPAERR_RL("bad parm\n");
 		return -EINVAL;
 	}
 
 	mutex_lock(&ipa3_ctx->lock);
 	for (i = 0; i < hdls->num_rules; i++) {
 		if (__ipa_mdfy_rt_rule(&hdls->rules[i])) {
-			IPAERR("failed to mdfy rt rule %i\n", i);
+			IPAERR_RL("failed to mdfy rt rule %i\n", i);
 			hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
 		} else {
 			hdls->rules[i].status = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 60dc04f..c97d2b3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -1668,7 +1668,7 @@
 
 	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
 	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
-		IPAERR("bad parm, %d\n", clnt_hdl);
+		IPAERR_RL("bad parm, %d\n", clnt_hdl);
 		return -EINVAL;
 	}
 
@@ -1681,7 +1681,7 @@
 	ep = &ipa3_ctx->ep[clnt_hdl];
 
 	if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
-		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		IPAERR_RL("WDI channel bad state %d\n", ep->uc_offload_state);
 		return -EFAULT;
 	}
 	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index a251359..e999896 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -21,6 +21,7 @@
 #include "ipa_i.h"
 #include "ipahal/ipahal.h"
 #include "ipahal/ipahal_fltrt.h"
+#include "ipahal/ipahal_hw_stats.h"
 #include "../ipa_rm_i.h"
 
 #define IPA_V3_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
@@ -1141,7 +1142,7 @@
 	[IPA_4_0][IPA_CLIENT_ODU_PROD]            = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
-			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 1, 0, 8, 16, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_ETHERNET_PROD]	  = {
@@ -1988,7 +1989,7 @@
  */
 int ipa3_cfg_filter(u32 disable)
 {
-	IPAERR("Filter disable is not supported!\n");
+	IPAERR_RL("Filter disable is not supported!\n");
 	return -EPERM;
 }
 
@@ -2123,7 +2124,7 @@
 	int ipa_ep_idx;
 
 	if (client >= IPA_CLIENT_MAX || client < 0) {
-		IPAERR("Bad client number! client =%d\n", client);
+		IPAERR_RL("Bad client number! client =%d\n", client);
 		return IPA_EP_NOT_ALLOCATED;
 	}
 
@@ -3281,19 +3282,19 @@
 	int result = -EINVAL;
 
 	if (param_in->client  >= IPA_CLIENT_MAX) {
-		IPAERR("bad parm client:%d\n", param_in->client);
+		IPAERR_RL("bad parm client:%d\n", param_in->client);
 		goto fail;
 	}
 
 	ipa_ep_idx = ipa3_get_ep_mapping(param_in->client);
 	if (ipa_ep_idx == -1) {
-		IPAERR("Invalid client.\n");
+		IPAERR_RL("Invalid client.\n");
 		goto fail;
 	}
 
 	ep = &ipa3_ctx->ep[ipa_ep_idx];
 	if (!ep->valid) {
-		IPAERR("EP not allocated.\n");
+		IPAERR_RL("EP not allocated.\n");
 		goto fail;
 	}
 
@@ -3307,7 +3308,7 @@
 		ipa3_ctx->ep[ipa_ep_idx].cfg.meta = meta;
 		result = ipa3_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
 		if (result)
-			IPAERR("qmap_id %d write failed on ep=%d\n",
+			IPAERR_RL("qmap_id %d write failed on ep=%d\n",
 					meta.qmap_id, ipa_ep_idx);
 		result = 0;
 	}
@@ -4455,6 +4456,7 @@
 	api_ctrl->ipa_create_wdi_mapping = ipa3_create_wdi_mapping;
 	api_ctrl->ipa_get_gsi_ep_info = ipa3_get_gsi_ep_info;
 	api_ctrl->ipa_stop_gsi_channel = ipa3_stop_gsi_channel;
+	api_ctrl->ipa_start_gsi_channel = ipa3_start_gsi_channel;
 	api_ctrl->ipa_register_ipa_ready_cb = ipa3_register_ipa_ready_cb;
 	api_ctrl->ipa_inc_client_enable_clks = ipa3_inc_client_enable_clks;
 	api_ctrl->ipa_dec_client_disable_clks = ipa3_dec_client_disable_clks;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
index b945eb06..67e491b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
@@ -1,3 +1,3 @@
 obj-$(CONFIG_IPA3) += ipa_hal.o
 
-ipa_hal-y := ipahal.o ipahal_reg.o ipahal_fltrt.o
+ipa_hal-y := ipahal.o ipahal_reg.o ipahal_fltrt.o ipahal_hw_stats.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index 57d44e3..c4b1f35 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -15,6 +15,8 @@
 #include "ipahal_i.h"
 #include "ipahal_reg_i.h"
 #include "ipahal_fltrt_i.h"
+#include "ipahal_hw_stats_i.h"
+
 
 struct ipahal_context *ipahal_ctx;
 
@@ -48,9 +50,6 @@
 	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT),
 };
 
-#define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \
-		(kzalloc((__size), ((__is_atomic_ctx)?GFP_ATOMIC:GFP_KERNEL)))
-
 static u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd);
 
 
@@ -1504,6 +1503,12 @@
 		goto bail_free_ctx;
 	}
 
+	if (ipahal_hw_stats_init(ipa_hw_type)) {
+		IPAHAL_ERR("failed to init ipahal hw stats\n");
+		result = -EFAULT;
+		goto bail_free_ctx;
+	}
+
 	ipahal_debugfs_init();
 
 	return 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c
new file mode 100644
index 0000000..c711ff4
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.c
@@ -0,0 +1,557 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipahal_hw_stats.h"
+#include "ipahal_hw_stats_i.h"
+#include "ipahal_i.h"
+
+struct ipahal_hw_stats_obj {
+	struct ipahal_stats_init_pyld *(*generate_init_pyld)(void *params,
+		bool is_atomic_ctx);
+	int (*get_offset)(void *params, struct ipahal_stats_offset *out);
+	int (*parse_stats)(void *init_params, void *raw_stats,
+		void *parsed_stats);
+};
+
+static int _count_ones(u32 number)
+{
+	int count = 0;
+
+	while (number) {
+		count++;
+		number = number & (number - 1);
+	}
+
+	return count;
+}
+
+static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_quota(
+	void *params, bool is_atomic_ctx)
+{
+	struct ipahal_stats_init_pyld *pyld;
+	struct ipahal_stats_init_quota *in =
+		(struct ipahal_stats_init_quota *)params;
+	int entries = _count_ones(in->enabled_bitmask);
+
+	IPAHAL_DBG_LOW("entries = %d\n", entries);
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) +
+		entries * sizeof(struct ipahal_stats_quota_hw), is_atomic_ctx);
+	if (!pyld) {
+		IPAHAL_ERR("no mem\n");
+		return NULL;
+	}
+
+	pyld->len = entries * sizeof(struct ipahal_stats_quota_hw);
+	return pyld;
+}
+
+static int ipahal_get_offset_quota(void *params,
+	struct ipahal_stats_offset *out)
+{
+	struct ipahal_stats_get_offset_quota *in =
+		(struct ipahal_stats_get_offset_quota *)params;
+	int entries = _count_ones(in->init.enabled_bitmask);
+
+	IPAHAL_DBG_LOW("\n");
+	out->offset = 0;
+	out->size = entries * sizeof(struct ipahal_stats_quota_hw);
+
+	return 0;
+}
+
+static int ipahal_parse_stats_quota(void *init_params, void *raw_stats,
+	void *parsed_stats)
+{
+	struct ipahal_stats_init_quota *init =
+		(struct ipahal_stats_init_quota *)init_params;
+	struct ipahal_stats_quota_hw *raw_hw =
+		(struct ipahal_stats_quota_hw *)raw_stats;
+	struct ipahal_stats_quota_all *out =
+		(struct ipahal_stats_quota_all *)parsed_stats;
+	int stat_idx = 0;
+	int i;
+
+	memset(out, 0, sizeof(*out));
+	IPAHAL_DBG_LOW("\n");
+	for (i = 0; i < IPAHAL_MAX_PIPES; i++) {
+		if (init->enabled_bitmask & (1 << i)) {
+			IPAHAL_DBG_LOW("pipe %d stat_idx %d\n", i, stat_idx);
+			out->stats[i].num_ipv4_bytes =
+				raw_hw[stat_idx].num_ipv4_bytes;
+			out->stats[i].num_ipv4_pkts =
+				raw_hw[stat_idx].num_ipv4_pkts;
+			out->stats[i].num_ipv6_pkts =
+				raw_hw[stat_idx].num_ipv6_pkts;
+			out->stats[i].num_ipv6_bytes =
+				raw_hw[stat_idx].num_ipv6_bytes;
+			stat_idx++;
+		}
+	}
+
+	return 0;
+}
+
+static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_tethering(
+	void *params, bool is_atomic_ctx)
+{
+	struct ipahal_stats_init_pyld *pyld;
+	struct ipahal_stats_init_tethering *in =
+		(struct ipahal_stats_init_tethering *)params;
+	int hdr_entries = _count_ones(in->prod_bitmask);
+	int entries = 0;
+	int i;
+	void *pyld_ptr;
+	u32 incremental_offset;
+
+	IPAHAL_DBG_LOW("prod entries = %d\n", hdr_entries);
+	for (i = 0; i < sizeof(in->prod_bitmask) * 8; i++) {
+		if (in->prod_bitmask & (1 << i)) {
+			if (in->cons_bitmask[i] == 0) {
+				IPAHAL_ERR("no cons bitmask for prod %d\n", i);
+				return NULL;
+			}
+			entries += _count_ones(in->cons_bitmask[i]);
+		}
+	}
+	IPAHAL_DBG_LOW("sum all entries = %d\n", entries);
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) +
+		hdr_entries * sizeof(struct ipahal_stats_tethering_hdr_hw) +
+		entries * sizeof(struct ipahal_stats_tethering_hw),
+		is_atomic_ctx);
+	if (!pyld) {
+		IPAHAL_ERR("no mem\n");
+		return NULL;
+	}
+
+	pyld->len = hdr_entries * sizeof(struct ipahal_stats_tethering_hdr_hw) +
+		entries * sizeof(struct ipahal_stats_tethering_hw);
+
+	pyld_ptr = pyld->data;
+	incremental_offset =
+		(hdr_entries * sizeof(struct ipahal_stats_tethering_hdr_hw))
+			/ 8;
+	for (i = 0; i < sizeof(in->prod_bitmask) * 8; i++) {
+		if (in->prod_bitmask & (1 << i)) {
+			struct ipahal_stats_tethering_hdr_hw *hdr = pyld_ptr;
+
+			hdr->dst_mask = in->cons_bitmask[i];
+			hdr->offset = incremental_offset;
+			IPAHAL_DBG_LOW("hdr->dst_mask=0x%x\n", hdr->dst_mask);
+			IPAHAL_DBG_LOW("hdr->offset=0x%x\n", hdr->offset);
+			/* add the stats entry */
+			incremental_offset += _count_ones(in->cons_bitmask[i]) *
+				sizeof(struct ipahal_stats_tethering_hw) / 8;
+			pyld_ptr += sizeof(*hdr);
+		}
+	}
+
+	return pyld;
+}
+
+static int ipahal_get_offset_tethering(void *params,
+	struct ipahal_stats_offset *out)
+{
+	struct ipahal_stats_get_offset_tethering *in =
+		(struct ipahal_stats_get_offset_tethering *)params;
+	int entries = 0;
+	int i;
+
+	for (i = 0; i < sizeof(in->init.prod_bitmask) * 8; i++) {
+		if (in->init.prod_bitmask & (1 << i)) {
+			if (in->init.cons_bitmask[i] == 0) {
+				IPAHAL_ERR("no cons bitmask for prod %d\n", i);
+				return -EPERM;
+			}
+			entries += _count_ones(in->init.cons_bitmask[i]);
+		}
+	}
+	IPAHAL_DBG_LOW("sum all entries = %d\n", entries);
+
+	/* skip the header */
+	out->offset = _count_ones(in->init.prod_bitmask) *
+		sizeof(struct ipahal_stats_tethering_hdr_hw);
+	out->size = entries * sizeof(struct ipahal_stats_tethering_hw);
+
+	return 0;
+}
+
+static int ipahal_parse_stats_tethering(void *init_params, void *raw_stats,
+	void *parsed_stats)
+{
+	struct ipahal_stats_init_tethering *init =
+		(struct ipahal_stats_init_tethering *)init_params;
+	struct ipahal_stats_tethering_hw *raw_hw =
+		(struct ipahal_stats_tethering_hw *)raw_stats;
+	struct ipahal_stats_tethering_all *out =
+		(struct ipahal_stats_tethering_all *)parsed_stats;
+	int i, j;
+	int stat_idx = 0;
+
+	memset(out, 0, sizeof(*out));
+	IPAHAL_DBG_LOW("\n");
+	for (i = 0; i < IPAHAL_MAX_PIPES; i++) {
+		for (j = 0; j < IPAHAL_MAX_PIPES; j++) {
+			if ((init->prod_bitmask & (1 << i)) &&
+			    init->cons_bitmask[i] & (1 << j)) {
+				IPAHAL_DBG_LOW("prod %d cons %d\n", i, j);
+				IPAHAL_DBG_LOW("stat_idx %d\n", stat_idx);
+				out->stats[i][j].num_ipv4_bytes =
+					raw_hw[stat_idx].num_ipv4_bytes;
+				IPAHAL_DBG_LOW("num_ipv4_bytes %lld\n",
+					out->stats[i][j].num_ipv4_bytes);
+				out->stats[i][j].num_ipv4_pkts =
+					raw_hw[stat_idx].num_ipv4_pkts;
+				IPAHAL_DBG_LOW("num_ipv4_pkts %lld\n",
+					out->stats[i][j].num_ipv4_pkts);
+				out->stats[i][j].num_ipv6_pkts =
+					raw_hw[stat_idx].num_ipv6_pkts;
+				IPAHAL_DBG_LOW("num_ipv6_pkts %lld\n",
+					out->stats[i][j].num_ipv6_pkts);
+				out->stats[i][j].num_ipv6_bytes =
+					raw_hw[stat_idx].num_ipv6_bytes;
+				IPAHAL_DBG_LOW("num_ipv6_bytes %lld\n",
+					out->stats[i][j].num_ipv6_bytes);
+				stat_idx++;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_flt_rt(
+	void *params, bool is_atomic_ctx)
+{
+	struct ipahal_stats_init_pyld *pyld;
+	struct ipahal_stats_init_flt_rt *in =
+		(struct ipahal_stats_init_flt_rt *)params;
+	int hdr_entries;
+	int num_rules = 0;
+	int i, start_entry;
+	void *pyld_ptr;
+	u32 incremental_offset;
+
+	for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++)
+		num_rules += _count_ones(in->rule_id_bitmask[i]);
+
+	if (num_rules == 0) {
+		IPAHAL_ERR("no rule ids provided\n");
+		return NULL;
+	}
+	IPAHAL_DBG_LOW("num_rules = %d\n", num_rules);
+
+	hdr_entries = IPAHAL_MAX_RULE_ID_32;
+	for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++) {
+		if (in->rule_id_bitmask[i] != 0)
+			break;
+		hdr_entries--;
+	}
+	start_entry = i;
+
+	for (i = IPAHAL_MAX_RULE_ID_32 - 1; i >= start_entry; i--) {
+		if (in->rule_id_bitmask[i] != 0)
+			break;
+		hdr_entries--;
+	}
+	IPAHAL_DBG_LOW("hdr_entries = %d\n", hdr_entries);
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) +
+		hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw) +
+		num_rules * sizeof(struct ipahal_stats_flt_rt_hw),
+		is_atomic_ctx);
+	if (!pyld) {
+		IPAHAL_ERR("no mem\n");
+		return NULL;
+	}
+
+	pyld->len = hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw) +
+		num_rules * sizeof(struct ipahal_stats_flt_rt_hw);
+
+	pyld_ptr = pyld->data;
+	incremental_offset =
+		(hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw))
+			/ 8;
+	for (i = start_entry; i < hdr_entries; i++) {
+		struct ipahal_stats_flt_rt_hdr_hw *hdr = pyld_ptr;
+
+		hdr->en_mask = in->rule_id_bitmask[i];
+		hdr->cnt_offset = incremental_offset;
+		/* add the stats entry */
+		incremental_offset += _count_ones(in->rule_id_bitmask[i]) *
+			sizeof(struct ipahal_stats_flt_rt_hw) / 8;
+		pyld_ptr += sizeof(*hdr);
+	}
+
+	return pyld;
+}
+
+static int ipahal_get_offset_flt_rt(void *params,
+	struct ipahal_stats_offset *out)
+{
+	struct ipahal_stats_get_offset_flt_rt *in =
+		(struct ipahal_stats_get_offset_flt_rt *)params;
+	int i;
+	int hdr_entries;
+	int skip_rules = 0;
+	int start_entry;
+	int rule_bit = in->rule_id % 32;
+	int rule_idx = in->rule_id / 32;
+
+	if (rule_idx >= IPAHAL_MAX_RULE_ID_32) {
+		IPAHAL_ERR("invalid rule_id %d\n", in->rule_id);
+		return -EPERM;
+	}
+
+	hdr_entries = IPAHAL_MAX_RULE_ID_32;
+	for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++) {
+		if (in->init.rule_id_bitmask[i] != 0)
+			break;
+		hdr_entries--;
+	}
+
+	if (hdr_entries == 0) {
+		IPAHAL_ERR("no rule ids provided\n");
+		return -EPERM;
+	}
+	start_entry = i;
+
+	for (i = IPAHAL_MAX_RULE_ID_32 - 1; i >= 0; i--) {
+		if (in->init.rule_id_bitmask[i] != 0)
+			break;
+		hdr_entries--;
+	}
+	IPAHAL_DBG_LOW("hdr_entries = %d\n", hdr_entries);
+
+	/* skip the header */
+	out->offset = hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw);
+
+	/* skip the previous rules  */
+	for (i = start_entry; i < rule_idx; i++)
+		skip_rules += _count_ones(in->init.rule_id_bitmask[i]);
+
+	for (i = 0; i < rule_bit; i++)
+		if (in->init.rule_id_bitmask[rule_idx] & (1 << i))
+			skip_rules++;
+
+	out->offset += skip_rules * sizeof(struct ipahal_stats_flt_rt_hw);
+	out->size = sizeof(struct ipahal_stats_flt_rt_hw);
+
+	return 0;
+}
+
+static int ipahal_parse_stats_flt_rt(void *init_params, void *raw_stats,
+	void *parsed_stats)
+{
+	struct ipahal_stats_flt_rt_hw *raw_hw =
+		(struct ipahal_stats_flt_rt_hw *)raw_stats;
+	struct ipahal_stats_flt_rt *out =
+		(struct ipahal_stats_flt_rt *)parsed_stats;
+
+	memset(out, 0, sizeof(*out));
+	IPAHAL_DBG_LOW("\n");
+	out->num_packets = raw_hw->num_packets;
+	out->num_packets_hash = raw_hw->num_packets_hash;
+
+	return 0;
+}
+
+static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_drop(
+	void *params, bool is_atomic_ctx)
+{
+	struct ipahal_stats_init_pyld *pyld;
+	struct ipahal_stats_init_drop *in =
+		(struct ipahal_stats_init_drop *)params;
+	int entries = _count_ones(in->enabled_bitmask);
+
+	IPAHAL_DBG_LOW("entries = %d\n", entries);
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) +
+		entries * sizeof(struct ipahal_stats_drop_hw), is_atomic_ctx);
+	if (!pyld) {
+		IPAHAL_ERR("no mem\n");
+		return NULL;
+	}
+
+	pyld->len = entries * sizeof(struct ipahal_stats_drop_hw);
+
+	return pyld;
+}
+
+static int ipahal_get_offset_drop(void *params,
+	struct ipahal_stats_offset *out)
+{
+	struct ipahal_stats_get_offset_drop *in =
+		(struct ipahal_stats_get_offset_drop *)params;
+	int entries = _count_ones(in->init.enabled_bitmask);
+
+	IPAHAL_DBG_LOW("\n");
+	out->offset = 0;
+	out->size = entries * sizeof(struct ipahal_stats_drop_hw);
+
+	return 0;
+}
+
+static int ipahal_parse_stats_drop(void *init_params, void *raw_stats,
+	void *parsed_stats)
+{
+	struct ipahal_stats_init_drop *init =
+		(struct ipahal_stats_init_drop *)init_params;
+	struct ipahal_stats_drop_hw *raw_hw =
+		(struct ipahal_stats_drop_hw *)raw_stats;
+	struct ipahal_stats_drop_all *out =
+		(struct ipahal_stats_drop_all *)parsed_stats;
+	int stat_idx = 0;
+	int i;
+
+	memset(out, 0, sizeof(*out));
+	IPAHAL_DBG_LOW("\n");
+	for (i = 0; i < IPAHAL_MAX_PIPES; i++) {
+		if (init->enabled_bitmask & (1 << i)) {
+			out->stats[i].drop_byte_cnt =
+				raw_hw[stat_idx].drop_byte_cnt;
+			out->stats[i].drop_packet_cnt =
+				raw_hw[stat_idx].drop_packet_cnt;
+			stat_idx++;
+		}
+	}
+
+	return 0;
+}
+
+static struct ipahal_hw_stats_obj
+	ipahal_hw_stats_objs[IPA_HW_MAX][IPAHAL_HW_STATS_MAX] = {
+	/* IPAv4 */
+	[IPA_HW_v4_0][IPAHAL_HW_STATS_QUOTA] = {
+		ipahal_generate_init_pyld_quota,
+		ipahal_get_offset_quota,
+		ipahal_parse_stats_quota
+	},
+	[IPA_HW_v4_0][IPAHAL_HW_STATS_TETHERING] = {
+		ipahal_generate_init_pyld_tethering,
+		ipahal_get_offset_tethering,
+		ipahal_parse_stats_tethering
+	},
+	[IPA_HW_v4_0][IPAHAL_HW_STATS_FNR] = {
+		ipahal_generate_init_pyld_flt_rt,
+		ipahal_get_offset_flt_rt,
+		ipahal_parse_stats_flt_rt
+	},
+	[IPA_HW_v4_0][IPAHAL_HW_STATS_DROP] = {
+		ipahal_generate_init_pyld_drop,
+		ipahal_get_offset_drop,
+		ipahal_parse_stats_drop
+	},
+};
+
+int ipahal_hw_stats_init(enum ipa_hw_type ipa_hw_type)
+{
+	int i;
+	int j;
+	struct ipahal_hw_stats_obj zero_obj;
+
+	IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+	if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+		IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+		return -EINVAL;
+	}
+
+	memset(&zero_obj, 0, sizeof(zero_obj));
+	for (i = IPA_HW_v4_0 ; i < ipa_hw_type ; i++) {
+		for (j = 0; j < IPAHAL_HW_STATS_MAX; j++) {
+			if (!memcmp(&ipahal_hw_stats_objs[i + 1][j], &zero_obj,
+				sizeof(struct ipahal_hw_stats_obj))) {
+				memcpy(&ipahal_hw_stats_objs[i + 1][j],
+					&ipahal_hw_stats_objs[i][j],
+					sizeof(struct ipahal_hw_stats_obj));
+			} else {
+				/*
+				 * explicitly overridden stat.
+				 * Check validity
+				 */
+				if (!ipahal_hw_stats_objs[i + 1][j].
+					get_offset) {
+					IPAHAL_ERR(
+					  "stat=%d get_offset null ver=%d\n",
+					  j, i+1);
+					WARN_ON(1);
+				}
+				if (!ipahal_hw_stats_objs[i + 1][j].
+				    parse_stats) {
+					IPAHAL_ERR(
+					  "stat=%d parse_stats null ver=%d\n",
+						j, i + 1);
+					WARN_ON(1);
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+int ipahal_stats_get_offset(enum ipahal_hw_stats_type type, void *params,
+	struct ipahal_stats_offset *out)
+{
+	if (type < 0 || type >= IPAHAL_HW_STATS_MAX) {
+		IPAHAL_ERR("Invalid type stat=%d\n", type);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	if (!params || !out) {
+		IPAHAL_ERR("Null arg\n");
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	return ipahal_hw_stats_objs[ipahal_ctx->hw_type][type].get_offset(
+		params, out);
+}
+
+struct ipahal_stats_init_pyld *ipahal_stats_generate_init_pyld(
+	enum ipahal_hw_stats_type type, void *params, bool is_atomic_ctx)
+{
+	if (type < 0 || type >= IPAHAL_HW_STATS_MAX) {
+		IPAHAL_ERR("Invalid type stat=%d\n", type);
+		WARN_ON(1);
+		return NULL;
+	}
+
+	if (!params) {
+		IPAHAL_ERR("Null arg\n");
+		WARN_ON(1);
+		return NULL;
+	}
+
+	return ipahal_hw_stats_objs[ipahal_ctx->hw_type][type].
+		generate_init_pyld(params, is_atomic_ctx);
+}
+
+int ipahal_parse_stats(enum ipahal_hw_stats_type type, void *init_params,
+	void *raw_stats, void *parsed_stats)
+{
+	if (type < 0 || type >= IPAHAL_HW_STATS_MAX) {
+		IPAHAL_ERR("Invalid type stat=%d\n", type);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	if (!raw_stats || !parsed_stats) {
+		IPAHAL_ERR("Null arg\n");
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	return ipahal_hw_stats_objs[ipahal_ctx->hw_type][type].parse_stats(
+		init_params, raw_stats, parsed_stats);
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h
new file mode 100644
index 0000000..cbb1dc3
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h
@@ -0,0 +1,248 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_HW_STATS_H_
+#define _IPAHAL_HW_STATS_H_
+
+#include <linux/ipa.h>
+
+#define IPAHAL_MAX_PIPES 32
+#define IPAHAL_MAX_RULE_ID_32 (1024 / 32) /* 10 bits of rule id */
+
+enum ipahal_hw_stats_type {
+	IPAHAL_HW_STATS_QUOTA,
+	IPAHAL_HW_STATS_TETHERING,
+	IPAHAL_HW_STATS_FNR,
+	IPAHAL_HW_STATS_DROP,
+	IPAHAL_HW_STATS_MAX
+};
+
+/*
+ * struct ipahal_stats_init_pyld - Statistics initialization payload
+ * @len: length of payload
+ * @data: actual payload data
+ */
+struct ipahal_stats_init_pyld {
+	u16 len;
+	u16 reserved;
+	u8 data[0];
+};
+
+/*
+ * struct ipahal_stats_offset - Statistics offset parameters
+ * @offset: offset of the statistic from beginning of stats table
+ * @size: size of the statistics
+ */
+struct ipahal_stats_offset {
+	u32 offset;
+	u16 size;
+};
+
+/*
+ * struct ipahal_stats_init_quota - Initializations parameters for quota
+ * @enabled_bitmask: bit mask of pipes to be monitored
+ */
+struct ipahal_stats_init_quota {
+	u32 enabled_bitmask;
+};
+
+/*
+ * struct ipahal_stats_get_offset_quota - Get offset parameters for quota
+ * @init: initialization parameters used in initialization of stats
+ */
+struct ipahal_stats_get_offset_quota {
+	struct ipahal_stats_init_quota init;
+};
+
+/*
+ * struct ipahal_stats_quota - Quota statistics
+ * @num_ipv4_bytes: IPv4 bytes
+ * @num_ipv6_bytes: IPv6 bytes
+ * @num_ipv4_pkts: IPv4 packets
+ * @num_ipv6_pkts: IPv6 packets
+ */
+struct ipahal_stats_quota {
+	u64 num_ipv4_bytes;
+	u64 num_ipv6_bytes;
+	u64 num_ipv4_pkts;
+	u64 num_ipv6_pkts;
+};
+
+/*
+ * struct ipahal_stats_quota_all - Quota statistics for all pipes
+ * @stats: array of statistics per pipe
+ */
+struct ipahal_stats_quota_all {
+	struct ipahal_stats_quota stats[IPAHAL_MAX_PIPES];
+};
+
+/*
+ * struct ipahal_stats_init_tethering - Initializations parameters for tethering
+ * @prod_bitmask: bit mask of producer pipes to be monitored
+ * @cons_bitmask: bit mask of consumer pipes to be monitored per producer
+ */
+struct ipahal_stats_init_tethering {
+	u32 prod_bitmask;
+	u32 cons_bitmask[IPAHAL_MAX_PIPES];
+};
+
+/*
+ * struct ipahal_stats_get_offset_tethering - Get offset parameters for
+ *	tethering
+ * @init: initialization parameters used in initialization of stats
+ */
+struct ipahal_stats_get_offset_tethering {
+	struct ipahal_stats_init_tethering init;
+};
+
+/*
+ * struct ipahal_stats_tethering - Tethering statistics
+ * @num_ipv4_bytes: IPv4 bytes
+ * @num_ipv6_bytes: IPv6 bytes
+ * @num_ipv4_pkts: IPv4 packets
+ * @num_ipv6_pkts: IPv6 packets
+ */
+struct ipahal_stats_tethering {
+	u64 num_ipv4_bytes;
+	u64 num_ipv6_bytes;
+	u64 num_ipv4_pkts;
+	u64 num_ipv6_pkts;
+};
+
+/*
+ * struct ipahal_stats_tethering_all - Tethering statistics for all pipes
+ * @stats: matrix of statistics per pair of pipes
+ */
+struct ipahal_stats_tethering_all {
+	struct ipahal_stats_tethering
+		stats[IPAHAL_MAX_PIPES][IPAHAL_MAX_PIPES];
+};
+
+/*
+ * struct ipahal_stats_init_flt_rt - Initializations parameters for flt_rt
+ * @rule_id_bitmask: array describes which rule ids to monitor.
+ *	rule_id bit is determined by:
+ *		index to the array => rule_id / 32
+ *		bit to enable => rule_id % 32
+ */
+struct ipahal_stats_init_flt_rt {
+	u32 rule_id_bitmask[IPAHAL_MAX_RULE_ID_32];
+};
+
+/*
+ * struct ipahal_stats_get_offset_flt_rt - Get offset parameters for flt_rt
+ * @init: initialization parameters used in initialization of stats
+ * @rule_id: rule_id to get the offset for
+ */
+struct ipahal_stats_get_offset_flt_rt {
+	struct ipahal_stats_init_flt_rt init;
+	u32 rule_id;
+};
+
+/*
+ * struct ipahal_stats_flt_rt - flt_rt statistics
+ * @num_packets: Total number of packets hit this rule
+ * @num_packets_hash: Total number of packets hit this rule in hash table
+ */
+struct ipahal_stats_flt_rt {
+	u32 num_packets;
+	u32 num_packets_hash;
+};
+
+/*
+ * struct ipahal_stats_init_drop - Initializations parameters for Drop
+ * @enabled_bitmask: bit mask of pipes to be monitored
+ */
+struct ipahal_stats_init_drop {
+	u32 enabled_bitmask;
+};
+
+/*
+ * struct ipahal_stats_get_offset_drop - Get offset parameters for Drop
+ * @init: initialization parameters used in initialization of stats
+ */
+struct ipahal_stats_get_offset_drop {
+	struct ipahal_stats_init_drop init;
+};
+
+/*
+ * struct ipahal_stats_drop - Packet Drop statistics
+ * @drop_packet_cnt: number of packets dropped
+ * @drop_byte_cnt: number of bytes dropped
+ */
+struct ipahal_stats_drop {
+	u32 drop_packet_cnt;
+	u32 drop_byte_cnt;
+};
+
+/*
+ * struct ipahal_stats_drop_all - Drop statistics for all pipes
+ * @stats: array of statistics per pipes
+ */
+struct ipahal_stats_drop_all {
+	struct ipahal_stats_drop stats[IPAHAL_MAX_PIPES];
+};
+
+/*
+ * ipahal_stats_generate_init_pyld - Generate the init payload for stats
+ * @type: type of stats
+ * @params: init_pyld parameters based of stats type
+ * @is_atomic_ctx: is calling context atomic ?
+ *
+ * This function will generate the initialization payload for a particular
+ * statistic in hardware. IPA driver is expected to use this payload to
+ * initialize the SRAM.
+ *
+ * Return: pointer to ipahal_stats_init_pyld on success or NULL on failure.
+ */
+struct ipahal_stats_init_pyld *ipahal_stats_generate_init_pyld(
+	enum ipahal_hw_stats_type type, void *params, bool is_atomic_ctx);
+
+/*
+ * ipahal_destroy_stats_init_pyld() - Destroy/Release bulk that was built
+ *  by the ipahal_stats_generate_init_pyld function.
+ */
+static inline void ipahal_destroy_stats_init_pyld(
+	struct ipahal_stats_init_pyld *pyld)
+{
+	kfree(pyld);
+}
+
+/*
+ * ipahal_stats_get_offset - Get the offset / size of payload for stats
+ * @type: type of stats
+ * @params: get_offset parameters based of stats type
+ * @out: out parameter for the offset and size.
+ *
+ * This function will return the offset of the counter from beginning of
+ * the table.IPA driver is expected to read this portion in SRAM and pass
+ * it to ipahal_parse_stats() to interprete the stats.
+ *
+ * Return: 0 on success and negative on failure
+ */
+int ipahal_stats_get_offset(enum ipahal_hw_stats_type type, void *params,
+	struct ipahal_stats_offset *out);
+
+/*
+ * ipahal_parse_stats - parse statistics
+ * @type: type of stats
+ * @init_params: init_pyld parameters used on init
+ * @raw_stats: stats read from IPA SRAM
+ * @parsed_stats: pointer to parsed stats based on type
+ *
+ * Return: 0 on success and negative on failure
+ */
+int ipahal_parse_stats(enum ipahal_hw_stats_type type, void *init_params,
+	void *raw_stats, void *parsed_stats);
+
+
+#endif /* _IPAHAL_HW_STATS_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h
new file mode 100644
index 0000000..3bb761d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats_i.h
@@ -0,0 +1,55 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_HW_STATS_I_H_
+#define _IPAHAL_HW_STATS_I_H_
+
+#include "ipahal_hw_stats.h"
+
+int ipahal_hw_stats_init(enum ipa_hw_type ipa_hw_type);
+
+struct ipahal_stats_quota_hw {
+	u64 num_ipv4_bytes;
+	u64 num_ipv4_pkts:32;
+	u64 num_ipv6_pkts:32;
+	u64 num_ipv6_bytes;
+};
+
+struct ipahal_stats_tethering_hdr_hw {
+	u64 dst_mask:32;
+	u64 offset:32;
+};
+
+struct ipahal_stats_tethering_hw {
+	u64 num_ipv4_bytes;
+	u64 num_ipv4_pkts:32;
+	u64 num_ipv6_pkts:32;
+	u64 num_ipv6_bytes;
+};
+
+struct ipahal_stats_flt_rt_hdr_hw {
+	u64 en_mask:32;
+	u64 reserved:16;
+	u64 cnt_offset:16;
+};
+
+struct ipahal_stats_flt_rt_hw {
+	u64 num_packets_hash:32;
+	u64 num_packets:32;
+};
+
+struct ipahal_stats_drop_hw {
+	u64 drop_byte_cnt:40;
+	u64 drop_packet_cnt:24;
+};
+
+#endif /* _IPAHAL_HW_STATS_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
index 1c4b287..5eb1aef 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -46,6 +46,9 @@
 			IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
 	} while (0)
 
+#define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \
+	(kzalloc((__size), ((__is_atomic_ctx) ? GFP_ATOMIC : GFP_KERNEL)))
+
 /*
  * struct ipahal_context - HAL global context data
  * @hw_type: IPA H/W type/version.
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 0dccb5b..8609476 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -87,6 +87,24 @@
 	__stringify(IPA_DPS_SEQUENCER_FIRST),
 	__stringify(IPA_HPS_SEQUENCER_FIRST),
 	__stringify(IPA_CLKON_CFG),
+	__stringify(IPA_STAT_QUOTA_BASE_n),
+	__stringify(IPA_STAT_QUOTA_MASK_n),
+	__stringify(IPA_STAT_TETHERING_BASE_n),
+	__stringify(IPA_STAT_TETHERING_MASK_n),
+	__stringify(IPA_STAT_FILTER_IPV4_BASE),
+	__stringify(IPA_STAT_FILTER_IPV6_BASE),
+	__stringify(IPA_STAT_ROUTER_IPV4_BASE),
+	__stringify(IPA_STAT_ROUTER_IPV6_BASE),
+	__stringify(IPA_STAT_FILTER_IPV4_START_ID),
+	__stringify(IPA_STAT_FILTER_IPV6_START_ID),
+	__stringify(IPA_STAT_ROUTER_IPV4_START_ID),
+	__stringify(IPA_STAT_ROUTER_IPV6_START_ID),
+	__stringify(IPA_STAT_FILTER_IPV4_END_ID),
+	__stringify(IPA_STAT_FILTER_IPV6_END_ID),
+	__stringify(IPA_STAT_ROUTER_IPV4_END_ID),
+	__stringify(IPA_STAT_ROUTER_IPV6_END_ID),
+	__stringify(IPA_STAT_DROP_CNT_BASE_n),
+	__stringify(IPA_STAT_DROP_CNT_MASK_n),
 };
 
 static void ipareg_construct_dummy(enum ipahal_reg_name reg,
@@ -1510,6 +1528,60 @@
 		ipareg_construct_endp_init_conn_track_n,
 		ipareg_parse_dummy,
 		0x00000850, 0x70},
+	[IPA_HW_v4_0][IPA_STAT_QUOTA_BASE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000700, 0x4 },
+	[IPA_HW_v4_0][IPA_STAT_QUOTA_MASK_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000708, 0x4 },
+	[IPA_HW_v4_0][IPA_STAT_TETHERING_BASE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000710, 0x4 },
+	[IPA_HW_v4_0][IPA_STAT_TETHERING_MASK_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000718, 0x4 },
+	[IPA_HW_v4_0][IPA_STAT_FILTER_IPV4_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000720, 0x0 },
+	[IPA_HW_v4_0][IPA_STAT_FILTER_IPV6_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000724, 0x0 },
+	[IPA_HW_v4_0][IPA_STAT_ROUTER_IPV4_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000728, 0x0 },
+	[IPA_HW_v4_0][IPA_STAT_ROUTER_IPV6_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000072C, 0x0 },
+	[IPA_HW_v4_0][IPA_STAT_FILTER_IPV4_START_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000730, 0x0 },
+	[IPA_HW_v4_0][IPA_STAT_FILTER_IPV6_START_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000734, 0x0 },
+	[IPA_HW_v4_0][IPA_STAT_ROUTER_IPV4_START_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000738, 0x0 },
+	[IPA_HW_v4_0][IPA_STAT_ROUTER_IPV6_START_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000073C, 0x0 },
+	[IPA_HW_v4_0][IPA_STAT_FILTER_IPV4_END_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000740, 0x0 },
+	[IPA_HW_v4_0][IPA_STAT_FILTER_IPV6_END_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000744, 0x0 },
+	[IPA_HW_v4_0][IPA_STAT_ROUTER_IPV4_END_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000748, 0x0 },
+	[IPA_HW_v4_0][IPA_STAT_ROUTER_IPV6_END_ID] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000074C, 0x0 },
+	[IPA_HW_v4_0][IPA_STAT_DROP_CNT_BASE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000750, 0x4 },
+	[IPA_HW_v4_0][IPA_STAT_DROP_CNT_MASK_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000758, 0x4 },
 };
 
 /*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index 3df49ce..a2864cd 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -90,6 +90,24 @@
 	IPA_DPS_SEQUENCER_FIRST,
 	IPA_HPS_SEQUENCER_FIRST,
 	IPA_CLKON_CFG,
+	IPA_STAT_QUOTA_BASE_n,
+	IPA_STAT_QUOTA_MASK_n,
+	IPA_STAT_TETHERING_BASE_n,
+	IPA_STAT_TETHERING_MASK_n,
+	IPA_STAT_FILTER_IPV4_BASE,
+	IPA_STAT_FILTER_IPV6_BASE,
+	IPA_STAT_ROUTER_IPV4_BASE,
+	IPA_STAT_ROUTER_IPV6_BASE,
+	IPA_STAT_FILTER_IPV4_START_ID,
+	IPA_STAT_FILTER_IPV6_START_ID,
+	IPA_STAT_ROUTER_IPV4_START_ID,
+	IPA_STAT_ROUTER_IPV6_START_ID,
+	IPA_STAT_FILTER_IPV4_END_ID,
+	IPA_STAT_FILTER_IPV6_END_ID,
+	IPA_STAT_ROUTER_IPV4_END_ID,
+	IPA_STAT_ROUTER_IPV6_END_ID,
+	IPA_STAT_DROP_CNT_BASE_n,
+	IPA_STAT_DROP_CNT_MASK_n,
 	IPA_REG_MAX,
 };
 
diff --git a/drivers/platform/msm/ipa/test/Makefile b/drivers/platform/msm/ipa/test/Makefile
index c20fd2b..af46bf2 100644
--- a/drivers/platform/msm/ipa/test/Makefile
+++ b/drivers/platform/msm/ipa/test/Makefile
@@ -1,2 +1,2 @@
 obj-$(CONFIG_IPA_UT) += ipa_ut_mod.o
-ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o ipa_test_dma.o
+ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o ipa_test_dma.o ipa_test_hw_stats.o
diff --git a/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c b/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
new file mode 100644
index 0000000..d37920e
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
@@ -0,0 +1,330 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_ut_framework.h"
+#include <linux/netdevice.h>
+
+struct ipa_test_hw_stats_ctx {
+	u32 odu_prod_hdl;
+	u32 odu_cons_hdl;
+	u32 rt4_usb;
+	u32 rt6_usb;
+	u32 rt4_odu_cons;
+	u32 rt6_odu_cons;
+	atomic_t odu_pending;
+};
+
+static struct ipa_test_hw_stats_ctx *ctx;
+
+static int ipa_test_hw_stats_suite_setup(void **ppriv)
+{
+	IPA_UT_DBG("Start Setup\n");
+
+	if (!ctx)
+		ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+
+	return 0;
+}
+
+static int ipa_test_hw_stats_suite_teardown(void *priv)
+{
+	IPA_UT_DBG("Start Teardown\n");
+
+	return 0;
+}
+
+static void odu_prod_notify(void *priv, enum ipa_dp_evt_type evt,
+	unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+
+	switch (evt) {
+	case IPA_RECEIVE:
+		dev_kfree_skb_any(skb);
+		break;
+	case IPA_WRITE_DONE:
+		atomic_dec(&ctx->odu_pending);
+		dev_kfree_skb_any(skb);
+		break;
+	default:
+		IPA_UT_ERR("unexpected evt %d\n", evt);
+	}
+}
+static void odu_cons_notify(void *priv, enum ipa_dp_evt_type evt,
+	unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+	int ret;
+
+	switch (evt) {
+	case IPA_RECEIVE:
+		if (atomic_read(&ctx->odu_pending) >= 64)
+			msleep(20);
+		atomic_inc(&ctx->odu_pending);
+		skb_put(skb, 100);
+		ret = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, NULL);
+		while (ret) {
+			msleep(100);
+			ret = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, NULL);
+		}
+		break;
+	case IPA_WRITE_DONE:
+		dev_kfree_skb_any(skb);
+		break;
+	default:
+		IPA_UT_ERR("unexpected evt %d\n", evt);
+	}
+}
+
+static int ipa_test_hw_stats_configure(void *priv)
+{
+	struct ipa_sys_connect_params odu_prod_params;
+	struct ipa_sys_connect_params odu_emb_cons_params;
+	int res;
+
+	/* first connect all additional pipe */
+	memset(&odu_prod_params, 0, sizeof(odu_prod_params));
+	memset(&odu_emb_cons_params, 0, sizeof(odu_emb_cons_params));
+
+	odu_prod_params.client = IPA_CLIENT_ODU_PROD;
+	odu_prod_params.desc_fifo_sz = 0x1000;
+	odu_prod_params.priv = NULL;
+	odu_prod_params.notify = odu_prod_notify;
+	res = ipa_setup_sys_pipe(&odu_prod_params,
+		&ctx->odu_prod_hdl);
+	if (res) {
+		IPA_UT_ERR("fail to setup sys pipe ODU_PROD %d\n", res);
+		return res;
+	}
+
+	odu_emb_cons_params.client = IPA_CLIENT_ODU_EMB_CONS;
+	odu_emb_cons_params.desc_fifo_sz = 0x1000;
+	odu_emb_cons_params.priv = NULL;
+	odu_emb_cons_params.notify = odu_cons_notify;
+	res = ipa_setup_sys_pipe(&odu_emb_cons_params,
+		&ctx->odu_cons_hdl);
+	if (res) {
+		IPA_UT_ERR("fail to setup sys pipe ODU_EMB_CONS %d\n", res);
+		ipa_teardown_sys_pipe(ctx->odu_prod_hdl);
+		return res;
+	}
+
+	IPA_UT_INFO("Configured. Please connect USB RNDIS now\n");
+
+	return 0;
+}
+
+static int ipa_test_hw_stats_add_FnR(void *priv)
+{
+	struct ipa_ioc_add_rt_rule *rt_rule;
+	struct ipa_ioc_add_flt_rule *flt_rule;
+	struct ipa_ioc_get_rt_tbl rt_lookup;
+	int ret;
+
+	rt_rule = kzalloc(sizeof(*rt_rule) + 1 * sizeof(struct ipa_rt_rule_add),
+		GFP_KERNEL);
+	if (!rt_rule) {
+		IPA_UT_DBG("no mem\n");
+		return -ENOMEM;
+	}
+
+	flt_rule = kzalloc(sizeof(*flt_rule) +
+		1 * sizeof(struct ipa_flt_rule_add), GFP_KERNEL);
+	if (!flt_rule) {
+		IPA_UT_DBG("no mem\n");
+		ret = -ENOMEM;
+		goto free_rt;
+	}
+
+	rt_rule->commit = 1;
+	rt_rule->ip = IPA_IP_v4;
+	rt_lookup.ip = rt_rule->ip;
+	strlcpy(rt_rule->rt_tbl_name, "V4_RT_TO_USB_CONS",
+		IPA_RESOURCE_NAME_MAX);
+	strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX);
+	rt_rule->num_rules = 1;
+	rt_rule->rules[0].rule.dst = IPA_CLIENT_USB_CONS;
+	rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+	rt_rule->rules[0].rule.attrib.dst_port = 5002;
+	rt_rule->rules[0].rule.hashable = true;
+	if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) {
+		IPA_UT_ERR("failed to install V4 rules\n");
+		ret = -EFAULT;
+		goto free_flt;
+	}
+	if (ipa_get_rt_tbl(&rt_lookup)) {
+		IPA_UT_ERR("failed to query V4 rules\n");
+		ret = -EFAULT;
+		goto free_flt;
+	}
+	ctx->rt4_usb = rt_lookup.hdl;
+
+	memset(rt_rule, 0, sizeof(*rt_rule));
+	rt_rule->commit = 1;
+	rt_rule->ip = IPA_IP_v6;
+	rt_lookup.ip = rt_rule->ip;
+	strlcpy(rt_rule->rt_tbl_name, "V6_RT_TO_USB_CONS",
+		IPA_RESOURCE_NAME_MAX);
+	strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX);
+	rt_rule->num_rules = 1;
+	rt_rule->rules[0].rule.dst = IPA_CLIENT_USB_CONS;
+	rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+	rt_rule->rules[0].rule.attrib.dst_port = 5002;
+	rt_rule->rules[0].rule.hashable = true;
+	if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) {
+		IPA_UT_ERR("failed to install V4 rules\n");
+		ret = -EFAULT;
+		goto free_flt;
+	}
+	if (ipa_get_rt_tbl(&rt_lookup)) {
+		IPA_UT_ERR("failed to query V4 rules\n");
+		ret = -EFAULT;
+		goto free_flt;
+	}
+	ctx->rt6_usb = rt_lookup.hdl;
+
+	memset(rt_rule, 0, sizeof(*rt_rule));
+	rt_rule->commit = 1;
+	rt_rule->ip = IPA_IP_v4;
+	rt_lookup.ip = rt_rule->ip;
+	strlcpy(rt_rule->rt_tbl_name, "V4_RT_TO_ODU_CONS",
+		IPA_RESOURCE_NAME_MAX);
+	strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX);
+	rt_rule->num_rules = 1;
+	rt_rule->rules[0].rule.dst = IPA_CLIENT_ODU_EMB_CONS;
+	rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+	rt_rule->rules[0].rule.attrib.dst_port = 5002;
+	rt_rule->rules[0].rule.hashable = true;
+	if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) {
+		IPA_UT_ERR("failed to install V4 rules\n");
+		ret = -EFAULT;
+		goto free_flt;
+	}
+	if (ipa_get_rt_tbl(&rt_lookup)) {
+		IPA_UT_ERR("failed to query V4 rules\n");
+		return -EFAULT;
+	}
+	ctx->rt4_odu_cons = rt_lookup.hdl;
+
+	memset(rt_rule, 0, sizeof(*rt_rule));
+	rt_rule->commit = 1;
+	rt_rule->ip = IPA_IP_v6;
+	rt_lookup.ip = rt_rule->ip;
+	strlcpy(rt_rule->rt_tbl_name, "V6_RT_TO_ODU_CONS",
+		IPA_RESOURCE_NAME_MAX);
+	strlcpy(rt_lookup.name, rt_rule->rt_tbl_name, IPA_RESOURCE_NAME_MAX);
+	rt_rule->num_rules = 1;
+	rt_rule->rules[0].rule.dst = IPA_CLIENT_ODU_EMB_CONS;
+	rt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+	rt_rule->rules[0].rule.attrib.dst_port = 5002;
+	rt_rule->rules[0].rule.hashable = true;
+	if (ipa_add_rt_rule(rt_rule) || rt_rule->rules[0].status) {
+		IPA_UT_ERR("failed to install V4 rules\n");
+		ret = -EFAULT;
+		goto free_flt;
+	}
+	if (ipa_get_rt_tbl(&rt_lookup)) {
+		IPA_UT_ERR("failed to query V4 rules\n");
+		ret = -EFAULT;
+		goto free_flt;
+	}
+	ctx->rt6_odu_cons = rt_lookup.hdl;
+
+	flt_rule->commit = 1;
+	flt_rule->ip = IPA_IP_v4;
+	flt_rule->ep = IPA_CLIENT_USB_PROD;
+	flt_rule->num_rules = 1;
+	flt_rule->rules[0].at_rear = 1;
+	flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING;
+	flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+	flt_rule->rules[0].rule.attrib.dst_port = 5002;
+	flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt4_odu_cons;
+	flt_rule->rules[0].rule.hashable = 1;
+	if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) {
+		IPA_UT_ERR("failed to install V4 rules\n");
+		ret = -EFAULT;
+		goto free_flt;
+	}
+
+	memset(flt_rule, 0, sizeof(*flt_rule));
+	flt_rule->commit = 1;
+	flt_rule->ip = IPA_IP_v6;
+	flt_rule->ep = IPA_CLIENT_USB_PROD;
+	flt_rule->num_rules = 1;
+	flt_rule->rules[0].at_rear = 1;
+	flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING;
+	flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+	flt_rule->rules[0].rule.attrib.dst_port = 5002;
+	flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt6_odu_cons;
+	flt_rule->rules[0].rule.hashable = 1;
+	if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) {
+		IPA_UT_ERR("failed to install V6 rules\n");
+		ret = -EFAULT;
+		goto free_flt;
+	}
+
+	memset(flt_rule, 0, sizeof(*flt_rule));
+	flt_rule->commit = 1;
+	flt_rule->ip = IPA_IP_v4;
+	flt_rule->ep = IPA_CLIENT_ODU_PROD;
+	flt_rule->num_rules = 1;
+	flt_rule->rules[0].at_rear = 1;
+	flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING;
+	flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+	flt_rule->rules[0].rule.attrib.dst_port = 5002;
+	flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt4_usb;
+	flt_rule->rules[0].rule.hashable = 1;
+	if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) {
+		IPA_UT_ERR("failed to install V4 rules\n");
+		ret = -EFAULT;
+		goto free_flt;
+	}
+
+	memset(flt_rule, 0, sizeof(*flt_rule));
+	flt_rule->commit = 1;
+	flt_rule->ip = IPA_IP_v6;
+	flt_rule->ep = IPA_CLIENT_ODU_PROD;
+	flt_rule->num_rules = 1;
+	flt_rule->rules[0].at_rear = 1;
+	flt_rule->rules[0].rule.action = IPA_PASS_TO_ROUTING;
+	flt_rule->rules[0].rule.attrib.attrib_mask = IPA_FLT_DST_PORT;
+	flt_rule->rules[0].rule.attrib.dst_port = 5002;
+	flt_rule->rules[0].rule.rt_tbl_hdl = ctx->rt6_usb;
+	flt_rule->rules[0].rule.hashable = 1;
+	if (ipa_add_flt_rule(flt_rule) || flt_rule->rules[0].status) {
+		IPA_UT_ERR("failed to install V6 rules\n");
+		ret = -EFAULT;
+		goto free_flt;
+	}
+
+	IPA_UT_INFO(
+		"Rules added. Please start data transfer on ports 5001/5002\n");
+	ret = 0;
+free_flt:
+	kfree(flt_rule);
+free_rt:
+	kfree(rt_rule);
+	return ret;
+
+}
+
+/* Suite definition block */
+IPA_UT_DEFINE_SUITE_START(hw_stats, "HW stats test",
+	ipa_test_hw_stats_suite_setup, ipa_test_hw_stats_suite_teardown)
+{
+	IPA_UT_ADD_TEST(configure, "Configure the setup",
+		ipa_test_hw_stats_configure, false, IPA_HW_v4_0, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(add_rules, "Add FLT and RT rules",
+		ipa_test_hw_stats_add_FnR, false, IPA_HW_v4_0, IPA_HW_MAX),
+
+} IPA_UT_DEFINE_SUITE_END(hw_stats);
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
index 4a9d3b0..823edcf 100644
--- a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
+++ b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
@@ -23,6 +23,7 @@
 IPA_UT_DECLARE_SUITE(mhi);
 IPA_UT_DECLARE_SUITE(dma);
 IPA_UT_DECLARE_SUITE(example);
+IPA_UT_DECLARE_SUITE(hw_stats);
 
 
 /**
@@ -34,6 +35,7 @@
 	IPA_UT_REGISTER_SUITE(mhi),
 	IPA_UT_REGISTER_SUITE(dma),
 	IPA_UT_REGISTER_SUITE(example),
+	IPA_UT_REGISTER_SUITE(hw_stats),
 } IPA_UT_DEFINE_ALL_SUITES_END;
 
 #endif /* _IPA_UT_SUITE_LIST_H_ */
diff --git a/drivers/platform/msm/seemp_core/seemp_logk.c b/drivers/platform/msm/seemp_core/seemp_logk.c
index e55260d..204142b 100644
--- a/drivers/platform/msm/seemp_core/seemp_logk.c
+++ b/drivers/platform/msm/seemp_core/seemp_logk.c
@@ -624,16 +624,17 @@
 
 			/* determine legitimacy of report */
 			if (report->report_valid &&
-				report->sequence_number <=
-					header->num_incidents &&
 				(last_sequence_number == 0
 					|| report->sequence_number >
 						last_sequence_number)) {
 				seemp_logk_rtic(report->report_type,
-					report->report.incident.actor,
-					report->report.incident.asset_id,
-					report->report.incident.asset_category,
-					report->report.incident.response);
+					report->actor,
+					/* leave this empty until
+					 * asset id is provided
+					 */
+					"",
+					report->asset_category,
+					report->response);
 				last_sequence_number = report->sequence_number;
 			} else {
 				last_pos = cur_pos - 1;
diff --git a/drivers/platform/msm/seemp_core/seemp_logk.h b/drivers/platform/msm/seemp_core/seemp_logk.h
index 871de0e..eecf4f7 100644
--- a/drivers/platform/msm/seemp_core/seemp_logk.h
+++ b/drivers/platform/msm/seemp_core/seemp_logk.h
@@ -164,39 +164,23 @@
 	__u64 report_version;     /* Version of the EL2 report */
 	__u64 mp_catalog_version;
 		/* Version of MP catalogue used for kernel protection */
+	__u64 num_incidents;      /* Number of Incidents Observed by EL2 */
 	__u8 protection_enabled;  /* Kernel Assets protected by EL2 */
 	__u8 pad1;
 	__u8 pad2;
 	__u8 pad3;
 	__u32 pad4;
-	__u64 num_incidents;      /* Number of Incidents Observed by EL2 */
-};
-
-/* individual report contents */
-union el2_report {
-	struct {
-		__u8 asset_id[0x20]; /* Asset Identifier */
-		__u64 actor;
-			/* Actor that caused the Incident.  */
-		__u8 asset_category; /* Asset Category */
-		__u8 response;       /* Response From EL2 */
-		__u16 pad1;
-		__u32 pad2;
-	} incident;
-	struct {
-		__u64 reserved;      /* TBD */
-	} info;
 };
 
 /* individual report */
 struct el2_report_data_t {
+	__u64 sequence_number; /* Sequence number of the report */
+	__u64 actor; /* Actor that caused the Incident.  */
 	__u8 report_valid;
 		/* Flag to indicate whether report instance is valid */
 	__u8 report_type;        /* Report Type */
-	__u8 pad1;
-	__u8 pad2;
-	__u64 sequence_number;   /* Sequence number of the report */
-	union el2_report report;       /* Report Contents */
+	__u8 asset_category; /* Asset Category */
+	__u8 response;       /* Response From EL2 */
 };
 
 #endif
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index 62e0978..92321ad 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -3095,84 +3095,6 @@
 	return 0;
 }
 
-static ssize_t
-usb_bam_show_inactivity_timer(struct device *dev, struct device_attribute *attr,
-		    char *buf)
-{
-	char *buff = buf;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(bam_enable_strings); i++) {
-		buff += snprintf(buff, PAGE_SIZE, "%s: %dms\n",
-					bam_enable_strings[i],
-					msm_usb_bam[i].inactivity_timer_ms);
-	}
-
-	return buff - buf;
-}
-
-static ssize_t usb_bam_store_inactivity_timer(struct device *dev,
-				     struct device_attribute *attr,
-				     const char *buff, size_t count)
-{
-	char buf[USB_BAM_MAX_STR_LEN];
-	char *trimmed_buf, *bam_str, *bam_name, *timer;
-	int timer_d;
-	int bam, ret;
-
-	if (strnstr(buff, "help", USB_BAM_MAX_STR_LEN)) {
-		pr_info("Usage: <bam_name> <ms>,<bam_name> <ms>,...\n");
-		pr_info("\tbam_name: [%s, %s, %s]\n",
-			bam_enable_strings[DWC3_CTRL],
-			bam_enable_strings[CI_CTRL],
-			bam_enable_strings[HSIC_CTRL]);
-		pr_info("\tms: time in ms. Use 0 to disable timer\n");
-		return count;
-	}
-
-	strlcpy(buf, buff, sizeof(buf));
-	trimmed_buf = strim(buf);
-
-	while (trimmed_buf) {
-		bam_str = strsep(&trimmed_buf, ",");
-		if (bam_str) {
-			bam_name = strsep(&bam_str, " ");
-			bam = get_bam_type_from_core_name(bam_name);
-			if (bam < 0 || bam >= MAX_BAMS) {
-				log_event_err("%s: Invalid bam, type=%d ,name=%s\n",
-					__func__, bam, bam_name);
-				return -EINVAL;
-			}
-
-			timer = strsep(&bam_str, " ");
-
-			if (!timer)
-				continue;
-
-			ret = kstrtoint(timer, 0, &timer_d);
-			if (ret) {
-				log_event_err("%s: err:%d with value:(%d)\n",
-						__func__, ret, timer_d);
-				return ret;
-			}
-
-			/* Apply new timer setting if bam has running pipes */
-			if (msm_usb_bam[bam].inactivity_timer_ms != timer_d) {
-				msm_usb_bam[bam].inactivity_timer_ms = timer_d;
-				if (msm_usb_bam[bam].pipes_enabled_per_bam > 0
-						&& !info[bam].in_lpm)
-					usb_bam_set_inactivity_timer(bam);
-			}
-		}
-	}
-
-	return count;
-}
-
-static DEVICE_ATTR(inactivity_timer, 0600,
-		   usb_bam_show_inactivity_timer,
-		   usb_bam_store_inactivity_timer);
-
 static int usb_bam_panic_notifier(struct notifier_block *this,
 		unsigned long event, void *ptr)
 {
@@ -3221,12 +3143,6 @@
 
 	dev_dbg(&pdev->dev, "usb_bam_probe\n");
 
-	ret = device_create_file(&pdev->dev, &dev_attr_inactivity_timer);
-	if (ret) {
-		dev_err(&pdev->dev, "failed to create fs node\n");
-		return ret;
-	}
-
 	io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!io_res) {
 		dev_err(&pdev->dev, "missing BAM memory resource\n");
diff --git a/drivers/power/supply/qcom/Makefile b/drivers/power/supply/qcom/Makefile
index bc19b24..6cc83ab 100644
--- a/drivers/power/supply/qcom/Makefile
+++ b/drivers/power/supply/qcom/Makefile
@@ -2,6 +2,6 @@
 obj-$(CONFIG_SMB135X_CHARGER)   += smb135x-charger.o pmic-voter.o
 obj-$(CONFIG_SMB1355_SLAVE_CHARGER)   += smb1355-charger.o pmic-voter.o
 obj-$(CONFIG_SMB1351_USB_CHARGER) += smb1351-charger.o pmic-voter.o battery.o
-obj-$(CONFIG_QPNP_SMB2)		+= qpnp-smb2.o smb-lib.o pmic-voter.o storm-watch.o battery.o
+obj-$(CONFIG_QPNP_SMB2)		+= step-chg-jeita.o battery.o qpnp-smb2.o smb-lib.o pmic-voter.o storm-watch.o
 obj-$(CONFIG_SMB138X_CHARGER)	+= smb138x-charger.o smb-lib.o pmic-voter.o storm-watch.o battery.o
 obj-$(CONFIG_QPNP_QNOVO)	+= qpnp-qnovo.o battery.o
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index cdd09dd..c77b808 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -67,6 +67,7 @@
 #define MAX_LINE_LENGTH			(ADDR_LEN + (ITEMS_PER_LINE *	\
 					CHARS_PER_ITEM) + 1)		\
 
+#define NUM_PARTITIONS			3
 #define FG_SRAM_ADDRESS_MAX		255
 #define FG_SRAM_LEN			504
 #define PROFILE_LEN			224
@@ -192,6 +193,18 @@
 		int val);
 };
 
+struct fg_dma_address {
+	/* Starting word address of the partition */
+	u16 partition_start;
+	/* Last word address of the partition */
+	u16 partition_end;
+	/*
+	 * Byte offset in the FG_DMA peripheral that maps to the partition_start
+	 * in SRAM
+	 */
+	u16 spmi_addr_base;
+};
+
 enum fg_alg_flag_id {
 	ALG_FLAG_SOC_LT_OTG_MIN = 0,
 	ALG_FLAG_SOC_LT_RECHARGE,
@@ -360,12 +373,12 @@
 	struct power_supply	*parallel_psy;
 	struct iio_channel	*batt_id_chan;
 	struct iio_channel	*die_temp_chan;
-	struct fg_memif		*sram;
 	struct fg_irq_info	*irqs;
 	struct votable		*awake_votable;
 	struct votable		*delta_bsoc_irq_en_votable;
 	struct votable		*batt_miss_irq_en_votable;
 	struct fg_sram_param	*sp;
+	struct fg_dma_address	*addr_map;
 	struct fg_alg_flag	*alg_flags;
 	int			*debug_mask;
 	char			batt_profile[PROFILE_LEN];
@@ -409,8 +422,10 @@
 	bool			esr_flt_cold_temp_en;
 	bool			slope_limit_en;
 	bool			use_ima_single_mode;
+	bool			use_dma;
 	struct completion	soc_update;
 	struct completion	soc_ready;
+	struct completion	mem_grant;
 	struct delayed_work	profile_load_work;
 	struct work_struct	status_change_work;
 	struct work_struct	cycle_count_work;
@@ -459,10 +474,15 @@
 			u8 offset, u8 *val, int len);
 extern int fg_interleaved_mem_write(struct fg_chip *chip, u16 address,
 			u8 offset, u8 *val, int len, bool atomic_access);
+extern int fg_direct_mem_read(struct fg_chip *chip, u16 address,
+			u8 offset, u8 *val, int len);
+extern int fg_direct_mem_write(struct fg_chip *chip, u16 address,
+			u8 offset, u8 *val, int len, bool atomic_access);
 extern int fg_read(struct fg_chip *chip, int addr, u8 *val, int len);
 extern int fg_write(struct fg_chip *chip, int addr, u8 *val, int len);
 extern int fg_masked_write(struct fg_chip *chip, int addr, u8 mask, u8 val);
 extern int fg_ima_init(struct fg_chip *chip);
+extern int fg_dma_init(struct fg_chip *chip);
 extern int fg_clear_ima_errors_if_any(struct fg_chip *chip, bool check_hw_sts);
 extern int fg_clear_dma_errors_if_any(struct fg_chip *chip);
 extern int fg_debugfs_create(struct fg_chip *chip);
diff --git a/drivers/power/supply/qcom/fg-memif.c b/drivers/power/supply/qcom/fg-memif.c
index 8a949bf..0abc9df 100644
--- a/drivers/power/supply/qcom/fg-memif.c
+++ b/drivers/power/supply/qcom/fg-memif.c
@@ -746,6 +746,257 @@
 	return rc;
 }
 
+#define MEM_GRANT_WAIT_MS	200
+static int fg_direct_mem_request(struct fg_chip *chip, bool request)
+{
+	int rc, ret;
+	u8 val, mask;
+	bool tried_again = false;
+
+	if (request)
+		reinit_completion(&chip->mem_grant);
+
+	mask = MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT;
+	val = request ? MEM_ACCESS_REQ_BIT : 0;
+	rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip), mask, val);
+	if (rc < 0) {
+		pr_err("failed to configure mem_if_mem_intf_cfg rc=%d\n", rc);
+		return rc;
+	}
+
+	mask = MEM_ARB_LO_LATENCY_EN_BIT | MEM_ARB_REQ_BIT;
+	val = request ? mask : 0;
+	rc = fg_masked_write(chip, MEM_IF_MEM_ARB_CFG(chip), mask, val);
+	if (rc < 0) {
+		pr_err("failed to configure mem_if_mem_arb_cfg rc:%d\n", rc);
+		return rc;
+	}
+
+	if (request)
+		pr_debug("requesting access\n");
+	else
+		pr_debug("releasing access\n");
+
+	if (!request)
+		return 0;
+
+wait:
+	ret = wait_for_completion_interruptible_timeout(
+		&chip->mem_grant, msecs_to_jiffies(MEM_GRANT_WAIT_MS));
+	/* If we were interrupted wait again one more time. */
+	if (ret <= 0) {
+		if ((ret == -ERESTARTSYS || ret == 0) && !tried_again) {
+			pr_debug("trying again, ret=%d\n", ret);
+			tried_again = true;
+			goto wait;
+		} else {
+			pr_err("wait for mem_grant timed out ret=%d\n",
+				ret);
+		}
+	}
+
+	if (ret <= 0) {
+		val = 0;
+		mask = MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT;
+		rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip), mask,
+					val);
+		if (rc < 0) {
+			pr_err("failed to configure mem_if_mem_intf_cfg rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		mask = MEM_ARB_LO_LATENCY_EN_BIT | MEM_ARB_REQ_BIT;
+		rc = fg_masked_write(chip, MEM_IF_MEM_ARB_CFG(chip), mask,
+					val);
+		if (rc < 0) {
+			pr_err("failed to configure mem_if_mem_arb_cfg rc:%d\n",
+				rc);
+			return rc;
+		}
+
+		return -ETIMEDOUT;
+	}
+
+	return rc;
+}
+
+static int fg_get_dma_address(struct fg_chip *chip, u16 sram_addr, u8 offset,
+				u16 *addr)
+{
+	int i;
+	u16 start_sram_addr, end_sram_addr;
+
+	for (i = 0; i < NUM_PARTITIONS; i++) {
+		start_sram_addr = chip->addr_map[i].partition_start;
+		end_sram_addr = chip->addr_map[i].partition_end;
+		if (sram_addr >= start_sram_addr &&
+			sram_addr <= end_sram_addr) {
+			*addr = chip->addr_map[i].spmi_addr_base + offset +
+					(sram_addr - start_sram_addr) *
+						BYTES_PER_SRAM_WORD;
+			return 0;
+		}
+	}
+
+	pr_err("Couldn't find address for %d from address map\n", sram_addr);
+	return -ENXIO;
+}
+
+static int fg_get_partition_count(struct fg_chip *chip, u16 sram_addr, int len,
+				int *count)
+{
+	int i, num = 0;
+	u16 end_addr, last_addr = 0;
+
+	end_addr = sram_addr + len / BYTES_PER_SRAM_WORD;
+	if (!(len % BYTES_PER_SRAM_WORD))
+		end_addr -= 1;
+
+	if (sram_addr == end_addr) {
+		*count = 1;
+		return 0;
+	}
+
+	for (i = 0; i < NUM_PARTITIONS; i++) {
+		pr_debug("address: %d last_addr: %d\n", sram_addr, last_addr);
+		if (sram_addr >= chip->addr_map[i].partition_start
+			&& sram_addr <= chip->addr_map[i].partition_end
+			&& last_addr < end_addr) {
+			num++;
+			last_addr = chip->addr_map[i].partition_end;
+			sram_addr = chip->addr_map[i+1].partition_start;
+		}
+	}
+
+	if (num > 0) {
+		*count = num;
+		return 0;
+	}
+
+	pr_err("Couldn't find number of partitions for address %d\n",
+		sram_addr);
+	return -ENXIO;
+}
+
+static int fg_get_partition_avail_bytes(struct fg_chip *chip, u16 sram_addr,
+					int len, int *rem_len)
+{
+	int i, part_len = 0, temp;
+	u16 end_addr;
+
+	for (i = 0; i < NUM_PARTITIONS; i++) {
+		if (sram_addr >= chip->addr_map[i].partition_start
+			&& sram_addr <= chip->addr_map[i].partition_end) {
+			part_len = (chip->addr_map[i].partition_end -
+					chip->addr_map[i].partition_start + 1);
+			part_len *= BYTES_PER_SRAM_WORD;
+			end_addr = chip->addr_map[i].partition_end;
+			break;
+		}
+	}
+
+	if (part_len <= 0) {
+		pr_err("Bad address? total_len=%d\n", part_len);
+		return -ENXIO;
+	}
+
+	temp = (end_addr - sram_addr + 1) * BYTES_PER_SRAM_WORD;
+	if (temp > part_len || !temp) {
+		pr_err("Bad length=%d\n", temp);
+		return -ENXIO;
+	}
+
+	*rem_len = temp;
+	pr_debug("address %d len %d rem_len %d\n", sram_addr, len, *rem_len);
+	return 0;
+}
+
+static int __fg_direct_mem_rw(struct fg_chip *chip, u16 sram_addr, u8 offset,
+				u8 *val, int len, bool access)
+{
+	int rc, ret, num_partitions, num_bytes = 0;
+	u16 addr;
+	u8 *ptr = val;
+	char *temp_str;
+
+	if (offset > 3) {
+		pr_err("offset too large %d\n", offset);
+		return -EINVAL;
+	}
+
+	rc = fg_get_partition_count(chip, sram_addr, len, &num_partitions);
+	if (rc < 0)
+		return rc;
+
+	pr_debug("number of partitions: %d\n", num_partitions);
+
+	rc = fg_direct_mem_request(chip, true);
+	if (rc < 0) {
+		pr_err("Error in requesting direct_mem access rc=%d\n", rc);
+		return rc;
+	}
+
+	while (num_partitions-- && len) {
+		rc = fg_get_dma_address(chip, sram_addr, offset, &addr);
+		if (rc < 0) {
+			pr_err("Incorrect address %d/offset %d\n", sram_addr,
+				offset);
+			break;
+		}
+
+		rc = fg_get_partition_avail_bytes(chip, sram_addr + offset, len,
+						&num_bytes);
+		if (rc < 0)
+			break;
+
+		if (num_bytes > len)
+			num_bytes = len;
+
+		pr_debug("reading from address: [%d %d] dma_address = %x\n",
+			sram_addr, offset, addr);
+
+		if (access == FG_READ) {
+			rc = fg_read(chip, addr, ptr, num_bytes);
+			temp_str = "read";
+		} else {
+			rc = fg_write(chip, addr, ptr, num_bytes);
+			temp_str = "write";
+		}
+
+		if (rc < 0) {
+			pr_err("Error in %sing address %d rc=%d\n", temp_str,
+				sram_addr, rc);
+			break;
+		}
+
+		ptr += num_bytes;
+		len -= num_bytes;
+		sram_addr += (num_bytes / BYTES_PER_SRAM_WORD);
+		offset = 0;
+	}
+
+	ret = fg_direct_mem_request(chip, false);
+	if (ret < 0) {
+		pr_err("Error in releasing direct_mem access rc=%d\n", rc);
+		return ret;
+	}
+
+	return rc;
+}
+
+int fg_direct_mem_read(struct fg_chip *chip, u16 sram_addr, u8 offset,
+				u8 *val, int len)
+{
+	return __fg_direct_mem_rw(chip, sram_addr, offset, val, len, FG_READ);
+}
+
+int fg_direct_mem_write(struct fg_chip *chip, u16 sram_addr, u8 offset,
+				u8 *val, int len, bool atomic_access)
+{
+	return __fg_direct_mem_rw(chip, sram_addr, offset, val, len, FG_WRITE);
+}
+
 int fg_ima_init(struct fg_chip *chip)
 {
 	int rc;
@@ -778,3 +1029,59 @@
 
 	return 0;
 }
+
+/*
+ * This SRAM partition to DMA address partition mapping remains identical for
+ * PMICs that use GEN3 FG.
+ */
+static struct fg_dma_address fg_gen3_addr_map[NUM_PARTITIONS] = {
+	/* system partition */
+	{
+		.partition_start = 0,
+		.partition_end = 23,
+		.spmi_addr_base = FG_DMA0_BASE + SRAM_ADDR_OFFSET,
+	},
+	/* battery profile partition */
+	{
+		.partition_start = 24,
+		.partition_end = 79,
+		.spmi_addr_base = FG_DMA1_BASE + SRAM_ADDR_OFFSET,
+	},
+	/* scratch pad partition */
+	{
+		.partition_start = 80,
+		.partition_end =  125,
+		.spmi_addr_base = FG_DMA2_BASE + SRAM_ADDR_OFFSET,
+	},
+};
+int fg_dma_init(struct fg_chip *chip)
+{
+	int rc;
+
+	chip->addr_map = fg_gen3_addr_map;
+
+	/* Clear DMA errors if any before clearing IMA errors */
+	rc = fg_clear_dma_errors_if_any(chip);
+	if (rc < 0) {
+		pr_err("Error in checking DMA errors rc:%d\n", rc);
+		return rc;
+	}
+
+	/* Configure the DMA peripheral addressing to partition */
+	rc = fg_masked_write(chip, MEM_IF_DMA_CTL(chip), ADDR_KIND_BIT,
+				ADDR_KIND_BIT);
+	if (rc < 0) {
+		pr_err("failed to configure DMA_CTL rc:%d\n", rc);
+		return rc;
+	}
+
+	/* Release the DMA initially so that request can happen */
+	rc = fg_direct_mem_request(chip, false);
+	if (rc < 0) {
+		pr_err("Error in releasing direct_mem access rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	return 0;
+}
diff --git a/drivers/power/supply/qcom/fg-reg.h b/drivers/power/supply/qcom/fg-reg.h
index cd0b2fb..8ca4376 100644
--- a/drivers/power/supply/qcom/fg-reg.h
+++ b/drivers/power/supply/qcom/fg-reg.h
@@ -31,6 +31,7 @@
 #define BATT_SOC_LOW_PWR_STS(chip)		(chip->batt_soc_base + 0x56)
 
 /* BATT_SOC_INT_RT_STS */
+#define SOC_READY_BIT				BIT(1)
 #define MSOC_EMPTY_BIT				BIT(5)
 
 /* BATT_SOC_EN_CTL */
@@ -266,6 +267,7 @@
 
 /* FG_MEM_IF register and bit definitions */
 #define MEM_IF_INT_RT_STS(chip)			((chip->mem_if_base) + 0x10)
+#define MEM_IF_MEM_ARB_CFG(chip)		((chip->mem_if_base) + 0x40)
 #define MEM_IF_MEM_INTF_CFG(chip)		((chip->mem_if_base) + 0x50)
 #define MEM_IF_IMA_CTL(chip)			((chip->mem_if_base) + 0x51)
 #define MEM_IF_IMA_CFG(chip)			((chip->mem_if_base) + 0x52)
@@ -286,6 +288,11 @@
 
 /* MEM_IF_INT_RT_STS */
 #define MEM_XCP_BIT				BIT(1)
+#define MEM_GNT_BIT				BIT(2)
+
+/* MEM_IF_MEM_ARB_CFG */
+#define MEM_ARB_LO_LATENCY_EN_BIT		BIT(1)
+#define MEM_ARB_REQ_BIT				BIT(0)
 
 /* MEM_IF_MEM_INTF_CFG */
 #define MEM_ACCESS_REQ_BIT			BIT(7)
@@ -325,5 +332,13 @@
 #define DMA_READ_ERROR_BIT			BIT(2)
 
 /* MEM_IF_DMA_CTL */
+#define ADDR_KIND_BIT				BIT(1)
 #define DMA_CLEAR_LOG_BIT			BIT(0)
+
+/* FG_DMAx */
+#define FG_DMA0_BASE				0x4800
+#define FG_DMA1_BASE				0x4900
+#define FG_DMA2_BASE				0x4A00
+#define FG_DMA3_BASE				0x4B00
+#define SRAM_ADDR_OFFSET			0x20
 #endif
diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c
index 38d9594..d9ca47c 100644
--- a/drivers/power/supply/qcom/fg-util.c
+++ b/drivers/power/supply/qcom/fg-util.c
@@ -255,8 +255,6 @@
 		reinit_completion(&chip->soc_update);
 		enable_irq(chip->irqs[SOC_UPDATE_IRQ].irq);
 		atomic_access = true;
-	} else {
-		flags = FG_IMA_DEFAULT;
 	}
 wait:
 	/*
@@ -282,11 +280,17 @@
 		}
 	}
 
-	rc = fg_interleaved_mem_write(chip, address, offset, val, len,
-			atomic_access);
+	if (chip->use_dma)
+		rc = fg_direct_mem_write(chip, address, offset, val, len,
+				false);
+	else
+		rc = fg_interleaved_mem_write(chip, address, offset, val, len,
+				atomic_access);
+
 	if (rc < 0)
 		pr_err("Error in writing SRAM address 0x%x[%d], rc=%d\n",
 			address, offset, rc);
+
 out:
 	if (atomic_access)
 		disable_irq_nosync(chip->irqs[SOC_UPDATE_IRQ].irq);
@@ -313,9 +317,14 @@
 
 	if (!(flags & FG_IMA_NO_WLOCK))
 		vote(chip->awake_votable, SRAM_READ, true, 0);
+
 	mutex_lock(&chip->sram_rw_lock);
 
-	rc = fg_interleaved_mem_read(chip, address, offset, val, len);
+	if (chip->use_dma)
+		rc = fg_direct_mem_read(chip, address, offset, val, len);
+	else
+		rc = fg_interleaved_mem_read(chip, address, offset, val, len);
+
 	if (rc < 0)
 		pr_err("Error in reading SRAM address 0x%x[%d], rc=%d\n",
 			address, offset, rc);
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index e5a3a07..d522926 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -533,7 +533,7 @@
 	rc = fg_sram_read(chip, chip->sp[id].addr_word, chip->sp[id].addr_byte,
 		buf, chip->sp[id].len, FG_IMA_DEFAULT);
 	if (rc < 0) {
-		pr_err("Error reading address 0x%04x[%d] rc=%d\n",
+		pr_err("Error reading address %d[%d] rc=%d\n",
 			chip->sp[id].addr_word, chip->sp[id].addr_byte, rc);
 		return rc;
 	}
@@ -1611,7 +1611,7 @@
 static int fg_charge_full_update(struct fg_chip *chip)
 {
 	union power_supply_propval prop = {0, };
-	int rc, msoc, bsoc, recharge_soc;
+	int rc, msoc, bsoc, recharge_soc, msoc_raw;
 	u8 full_soc[2] = {0xFF, 0xFF};
 
 	if (!chip->dt.hold_soc_while_full)
@@ -1647,6 +1647,7 @@
 		pr_err("Error in getting msoc, rc=%d\n", rc);
 		goto out;
 	}
+	msoc_raw = DIV_ROUND_CLOSEST(msoc * FULL_SOC_RAW, FULL_CAPACITY);
 
 	fg_dbg(chip, FG_STATUS, "msoc: %d bsoc: %x health: %d status: %d full: %d\n",
 		msoc, bsoc, chip->health, chip->charge_status,
@@ -1670,7 +1671,7 @@
 			fg_dbg(chip, FG_STATUS, "Terminated charging @ SOC%d\n",
 				msoc);
 		}
-	} else if ((bsoc >> 8) <= recharge_soc && chip->charge_full) {
+	} else if (msoc_raw < recharge_soc && chip->charge_full) {
 		chip->delta_soc = FULL_CAPACITY - msoc;
 
 		/*
@@ -1700,8 +1701,8 @@
 				rc);
 			goto out;
 		}
-		fg_dbg(chip, FG_STATUS, "bsoc: %d recharge_soc: %d delta_soc: %d\n",
-			bsoc >> 8, recharge_soc, chip->delta_soc);
+		fg_dbg(chip, FG_STATUS, "msoc_raw = %d bsoc: %d recharge_soc: %d delta_soc: %d\n",
+			msoc_raw, bsoc >> 8, recharge_soc, chip->delta_soc);
 	} else {
 		goto out;
 	}
@@ -3503,6 +3504,9 @@
 
 static int fg_memif_init(struct fg_chip *chip)
 {
+	if (chip->use_dma)
+		return fg_dma_init(chip);
+
 	return fg_ima_init(chip);
 }
 
@@ -3542,6 +3546,26 @@
 
 /* INTERRUPT HANDLERS STAY HERE */
 
+static irqreturn_t fg_dma_grant_irq_handler(int irq, void *data)
+{
+	struct fg_chip *chip = data;
+	u8 status;
+	int rc;
+
+	rc = fg_read(chip, MEM_IF_INT_RT_STS(chip), &status, 1);
+	if (rc < 0) {
+		pr_err("failed to read addr=0x%04x, rc=%d\n",
+			MEM_IF_INT_RT_STS(chip), rc);
+		return IRQ_HANDLED;
+	}
+
+	fg_dbg(chip, FG_IRQ, "irq %d triggered, status:%d\n", irq, status);
+	if (status & MEM_GNT_BIT)
+		complete_all(&chip->mem_grant);
+
+	return IRQ_HANDLED;
+}
+
 static irqreturn_t fg_mem_xcp_irq_handler(int irq, void *data)
 {
 	struct fg_chip *chip = data;
@@ -3822,7 +3846,8 @@
 	/* MEM_IF irqs */
 	[DMA_GRANT_IRQ] = {
 		.name		= "dma-grant",
-		.handler	= fg_dummy_irq_handler,
+		.handler	= fg_dma_grant_irq_handler,
+		.wakeable	= true,
 	},
 	[MEM_XCP_IRQ] = {
 		.name		= "mem-xcp",
@@ -4046,6 +4071,7 @@
 
 	switch (chip->pmic_rev_id->pmic_subtype) {
 	case PMI8998_SUBTYPE:
+		chip->use_dma = true;
 		if (chip->pmic_rev_id->rev4 < PMI8998_V2P0_REV4) {
 			chip->sp = pmi8998_v1_sram_params;
 			chip->alg_flags = pmi8998_v1_alg_flags;
@@ -4466,6 +4492,7 @@
 	mutex_init(&chip->charge_full_lock);
 	init_completion(&chip->soc_update);
 	init_completion(&chip->soc_ready);
+	init_completion(&chip->mem_grant);
 	INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
 	INIT_WORK(&chip->status_change_work, status_change_work);
 	INIT_WORK(&chip->cycle_count_work, cycle_count_work);
@@ -4479,6 +4506,25 @@
 		goto exit;
 	}
 
+	platform_set_drvdata(pdev, chip);
+
+	rc = fg_register_interrupts(chip);
+	if (rc < 0) {
+		dev_err(chip->dev, "Error in registering interrupts, rc:%d\n",
+			rc);
+		goto exit;
+	}
+
+	/* Keep SOC_UPDATE irq disabled until we require it */
+	if (fg_irqs[SOC_UPDATE_IRQ].irq)
+		disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq);
+
+	/* Keep BSOC_DELTA_IRQ disabled until we require it */
+	vote(chip->delta_bsoc_irq_en_votable, DELTA_BSOC_IRQ_VOTER, false, 0);
+
+	/* Keep BATT_MISSING_IRQ disabled until we require it */
+	vote(chip->batt_miss_irq_en_votable, BATT_MISS_IRQ_VOTER, false, 0);
+
 	rc = fg_hw_init(chip);
 	if (rc < 0) {
 		dev_err(chip->dev, "Error in initializing FG hardware, rc:%d\n",
@@ -4486,8 +4532,6 @@
 		goto exit;
 	}
 
-	platform_set_drvdata(pdev, chip);
-
 	/* Register the power supply */
 	fg_psy_cfg.drv_data = chip;
 	fg_psy_cfg.of_node = NULL;
@@ -4508,23 +4552,6 @@
 		goto exit;
 	}
 
-	rc = fg_register_interrupts(chip);
-	if (rc < 0) {
-		dev_err(chip->dev, "Error in registering interrupts, rc:%d\n",
-			rc);
-		goto exit;
-	}
-
-	/* Keep SOC_UPDATE_IRQ disabled until we require it */
-	if (fg_irqs[SOC_UPDATE_IRQ].irq)
-		disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq);
-
-	/* Keep BSOC_DELTA_IRQ disabled until we require it */
-	vote(chip->delta_bsoc_irq_en_votable, DELTA_BSOC_IRQ_VOTER, false, 0);
-
-	/* Keep BATT_MISSING_IRQ disabled until we require it */
-	vote(chip->batt_miss_irq_en_votable, BATT_MISS_IRQ_VOTER, false, 0);
-
 	rc = fg_debugfs_create(chip);
 	if (rc < 0) {
 		dev_err(chip->dev, "Error in creating debugfs entries, rc:%d\n",
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index e94873c..5605c8a 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -19,6 +19,7 @@
 #include <linux/power_supply.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
+#include <linux/log2.h>
 #include <linux/qpnp/qpnp-revid.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/of_regulator.h>
@@ -122,87 +123,6 @@
 		.max_u	= 1575000,
 		.step_u	= 25000,
 	},
-	.step_soc_threshold[0]		= {
-		.name	= "step charge soc threshold 1",
-		.reg	= STEP_CHG_SOC_OR_BATT_V_TH1_REG,
-		.min_u	= 0,
-		.max_u	= 100,
-		.step_u	= 1,
-	},
-	.step_soc_threshold[1]		= {
-		.name	= "step charge soc threshold 2",
-		.reg	= STEP_CHG_SOC_OR_BATT_V_TH2_REG,
-		.min_u	= 0,
-		.max_u	= 100,
-		.step_u	= 1,
-	},
-	.step_soc_threshold[2]         = {
-		.name	= "step charge soc threshold 3",
-		.reg	= STEP_CHG_SOC_OR_BATT_V_TH3_REG,
-		.min_u	= 0,
-		.max_u	= 100,
-		.step_u	= 1,
-	},
-	.step_soc_threshold[3]         = {
-		.name	= "step charge soc threshold 4",
-		.reg	= STEP_CHG_SOC_OR_BATT_V_TH4_REG,
-		.min_u	= 0,
-		.max_u	= 100,
-		.step_u	= 1,
-	},
-	.step_soc			= {
-		.name	= "step charge soc",
-		.reg	= STEP_CHG_SOC_VBATT_V_REG,
-		.min_u	= 0,
-		.max_u	= 100,
-		.step_u	= 1,
-		.set_proc	= smblib_mapping_soc_from_field_value,
-	},
-	.step_cc_delta[0]	= {
-		.name	= "step charge current delta 1",
-		.reg	= STEP_CHG_CURRENT_DELTA1_REG,
-		.min_u	= 100000,
-		.max_u	= 3200000,
-		.step_u	= 100000,
-		.get_proc	= smblib_mapping_cc_delta_to_field_value,
-		.set_proc	= smblib_mapping_cc_delta_from_field_value,
-	},
-	.step_cc_delta[1]	= {
-		.name	= "step charge current delta 2",
-		.reg	= STEP_CHG_CURRENT_DELTA2_REG,
-		.min_u	= 100000,
-		.max_u	= 3200000,
-		.step_u	= 100000,
-		.get_proc	= smblib_mapping_cc_delta_to_field_value,
-		.set_proc	= smblib_mapping_cc_delta_from_field_value,
-	},
-	.step_cc_delta[2]	= {
-		.name	= "step charge current delta 3",
-		.reg	= STEP_CHG_CURRENT_DELTA3_REG,
-		.min_u	= 100000,
-		.max_u	= 3200000,
-		.step_u	= 100000,
-		.get_proc	= smblib_mapping_cc_delta_to_field_value,
-		.set_proc	= smblib_mapping_cc_delta_from_field_value,
-	},
-	.step_cc_delta[3]	= {
-		.name	= "step charge current delta 4",
-		.reg	= STEP_CHG_CURRENT_DELTA4_REG,
-		.min_u	= 100000,
-		.max_u	= 3200000,
-		.step_u	= 100000,
-		.get_proc	= smblib_mapping_cc_delta_to_field_value,
-		.set_proc	= smblib_mapping_cc_delta_from_field_value,
-	},
-	.step_cc_delta[4]	= {
-		.name	= "step charge current delta 5",
-		.reg	= STEP_CHG_CURRENT_DELTA5_REG,
-		.min_u	= 100000,
-		.max_u	= 3200000,
-		.step_u	= 100000,
-		.get_proc	= smblib_mapping_cc_delta_to_field_value,
-		.set_proc	= smblib_mapping_cc_delta_from_field_value,
-	},
 	.freq_buck		= {
 		.name	= "buck switching frequency",
 		.reg	= CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG,
@@ -236,7 +156,6 @@
 	},
 };
 
-#define STEP_CHARGING_MAX_STEPS	5
 struct smb_dt_props {
 	int	usb_icl_ua;
 	int	dc_icl_ua;
@@ -244,14 +163,13 @@
 	int	wipower_max_uw;
 	int	min_freq_khz;
 	int	max_freq_khz;
-	u32	step_soc_threshold[STEP_CHARGING_MAX_STEPS - 1];
-	s32	step_cc_delta[STEP_CHARGING_MAX_STEPS];
 	struct	device_node *revid_dev_node;
 	int	float_option;
 	int	chg_inhibit_thr_mv;
 	bool	no_battery;
 	bool	hvdcp_disable;
 	bool	auto_recharge_soc;
+	int	wd_bark_time;
 };
 
 struct smb2 {
@@ -273,6 +191,11 @@
 #define MICRO_1P5A		1500000
 #define MICRO_P1A		100000
 #define OTG_DEFAULT_DEGLITCH_TIME_MS	50
+#define MIN_WD_BARK_TIME		16
+#define DEFAULT_WD_BARK_TIME		64
+#define BITE_WDOG_TIMEOUT_8S		0x3
+#define BARK_WDOG_TIMEOUT_MASK		GENMASK(3, 2)
+#define BARK_WDOG_TIMEOUT_SHIFT		2
 static int smb2_parse_dt(struct smb2 *chip)
 {
 	struct smb_charger *chg = &chip->chg;
@@ -284,27 +207,13 @@
 		return -EINVAL;
 	}
 
-	chg->step_chg_enabled = true;
+	chg->step_chg_enabled = of_property_read_bool(node,
+				"qcom,step-charging-enable");
 
-	if (of_property_count_u32_elems(node, "qcom,step-soc-thresholds")
-			!= STEP_CHARGING_MAX_STEPS - 1)
-		chg->step_chg_enabled = false;
-
-	rc = of_property_read_u32_array(node, "qcom,step-soc-thresholds",
-			chip->dt.step_soc_threshold,
-			STEP_CHARGING_MAX_STEPS - 1);
-	if (rc < 0)
-		chg->step_chg_enabled = false;
-
-	if (of_property_count_u32_elems(node, "qcom,step-current-deltas")
-			!= STEP_CHARGING_MAX_STEPS)
-		chg->step_chg_enabled = false;
-
-	rc = of_property_read_u32_array(node, "qcom,step-current-deltas",
-			chip->dt.step_cc_delta,
-			STEP_CHARGING_MAX_STEPS);
-	if (rc < 0)
-		chg->step_chg_enabled = false;
+	rc = of_property_read_u32(node, "qcom,wd-bark-time-secs",
+					&chip->dt.wd_bark_time);
+	if (rc < 0 || chip->dt.wd_bark_time < MIN_WD_BARK_TIME)
+		chip->dt.wd_bark_time = DEFAULT_WD_BARK_TIME;
 
 	chip->dt.no_battery = of_property_read_bool(node,
 						"qcom,batteryless-platform");
@@ -988,7 +897,6 @@
 	POWER_SUPPLY_PROP_TEMP,
 	POWER_SUPPLY_PROP_TECHNOLOGY,
 	POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED,
-	POWER_SUPPLY_PROP_STEP_CHARGING_STEP,
 	POWER_SUPPLY_PROP_CHARGE_DONE,
 	POWER_SUPPLY_PROP_PARALLEL_DISABLE,
 	POWER_SUPPLY_PROP_SET_SHIP_MODE,
@@ -1051,9 +959,6 @@
 	case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
 		val->intval = chg->step_chg_enabled;
 		break;
-	case POWER_SUPPLY_PROP_STEP_CHARGING_STEP:
-		rc = smblib_get_prop_step_chg_step(chg, val);
-		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
 		rc = smblib_get_prop_batt_voltage_now(chg, val);
 		break;
@@ -1167,6 +1072,9 @@
 			vote(chg->fcc_votable, BATT_PROFILE_VOTER, false, 0);
 		}
 		break;
+	case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
+		chg->step_chg_enabled = !!val->intval;
+		break;
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
 		chg->batt_profile_fcc_ua = val->intval;
 		vote(chg->fcc_votable, BATT_PROFILE_VOTER, true, val->intval);
@@ -1207,6 +1115,7 @@
 	case POWER_SUPPLY_PROP_DP_DM:
 	case POWER_SUPPLY_PROP_RERUN_AICL:
 	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+	case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
 		return 1;
 	default:
 		break;
@@ -1334,73 +1243,6 @@
 /***************************
  * HARDWARE INITIALIZATION *
  ***************************/
-static int smb2_config_step_charging(struct smb2 *chip)
-{
-	struct smb_charger *chg = &chip->chg;
-	int rc = 0;
-	int i;
-
-	if (!chg->step_chg_enabled)
-		return rc;
-
-	for (i = 0; i < STEP_CHARGING_MAX_STEPS - 1; i++) {
-		rc = smblib_set_charge_param(chg,
-					     &chg->param.step_soc_threshold[i],
-					     chip->dt.step_soc_threshold[i]);
-		if (rc < 0) {
-			pr_err("Couldn't configure soc thresholds rc = %d\n",
-				rc);
-			goto err_out;
-		}
-	}
-
-	for (i = 0; i < STEP_CHARGING_MAX_STEPS; i++) {
-		rc = smblib_set_charge_param(chg, &chg->param.step_cc_delta[i],
-					     chip->dt.step_cc_delta[i]);
-		if (rc < 0) {
-			pr_err("Couldn't configure cc delta rc = %d\n",
-				rc);
-			goto err_out;
-		}
-	}
-
-	rc = smblib_write(chg, STEP_CHG_UPDATE_REQUEST_TIMEOUT_CFG_REG,
-			  STEP_CHG_UPDATE_REQUEST_TIMEOUT_40S);
-	if (rc < 0) {
-		dev_err(chg->dev,
-			"Couldn't configure soc request timeout reg rc=%d\n",
-			 rc);
-		goto err_out;
-	}
-
-	rc = smblib_write(chg, STEP_CHG_UPDATE_FAIL_TIMEOUT_CFG_REG,
-			  STEP_CHG_UPDATE_FAIL_TIMEOUT_120S);
-	if (rc < 0) {
-		dev_err(chg->dev,
-			"Couldn't configure soc fail timeout reg rc=%d\n",
-			rc);
-		goto err_out;
-	}
-
-	/*
-	 *  enable step charging, source soc, standard mode, go to final
-	 *  state in case of failure.
-	 */
-	rc = smblib_write(chg, CHGR_STEP_CHG_MODE_CFG_REG,
-			       STEP_CHARGING_ENABLE_BIT |
-			       STEP_CHARGING_SOURCE_SELECT_BIT |
-			       STEP_CHARGING_SOC_FAIL_OPTION_BIT);
-	if (rc < 0) {
-		dev_err(chg->dev, "Couldn't configure charger rc=%d\n", rc);
-		goto err_out;
-	}
-
-	return 0;
-err_out:
-	chg->step_chg_enabled = false;
-	return rc;
-}
-
 static int smb2_config_wipower_input_power(struct smb2 *chip, int uw)
 {
 	int rc;
@@ -1576,7 +1418,7 @@
 {
 	struct smb_charger *chg = &chip->chg;
 	int rc;
-	u8 stat;
+	u8 stat, val;
 
 	if (chip->dt.no_battery)
 		chg->fake_capacity = 50;
@@ -1724,11 +1566,27 @@
 		return rc;
 	}
 
-	/* configure step charging */
-	rc = smb2_config_step_charging(chip);
-	if (rc < 0) {
-		dev_err(chg->dev, "Couldn't configure step charging rc=%d\n",
-			rc);
+	val = (ilog2(chip->dt.wd_bark_time / 16) << BARK_WDOG_TIMEOUT_SHIFT) &
+						BARK_WDOG_TIMEOUT_MASK;
+	val |= BITE_WDOG_TIMEOUT_8S;
+	rc = smblib_masked_write(chg, SNARL_BARK_BITE_WD_CFG_REG,
+			BITE_WDOG_DISABLE_CHARGING_CFG_BIT |
+			BARK_WDOG_TIMEOUT_MASK | BITE_WDOG_TIMEOUT_MASK,
+			val);
+	if (rc) {
+		pr_err("Couldn't configue WD config rc=%d\n", rc);
+		return rc;
+	}
+
+	/* enable WD BARK and enable it on plugin */
+	rc = smblib_masked_write(chg, WD_CFG_REG,
+			WATCHDOG_TRIGGER_AFP_EN_BIT |
+			WDOG_TIMER_EN_ON_PLUGIN_BIT |
+			BARK_WDOG_INT_EN_BIT,
+			WDOG_TIMER_EN_ON_PLUGIN_BIT |
+			BARK_WDOG_INT_EN_BIT);
+	if (rc) {
+		pr_err("Couldn't configue WD config rc=%d\n", rc);
 		return rc;
 	}
 
@@ -1787,6 +1645,13 @@
 		return rc;
 	}
 
+	rc = smblib_read(chg, USBIN_OPTIONS_2_CFG_REG, &chg->float_cfg);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't read float charger options rc=%d\n",
+			rc);
+		return rc;
+	}
+
 	switch (chip->dt.chg_inhibit_thr_mv) {
 	case 50:
 		rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
@@ -1851,6 +1716,12 @@
 	struct smb_charger *chg = &chip->chg;
 	int rc;
 
+	/* In case the usb path is suspended, we would have missed disabling
+	 * the icl change interrupt because the interrupt could have been
+	 * not requested
+	 */
+	rerun_election(chg->usb_icl_votable);
+
 	/* configure power role for dual-role */
 	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
 				 TYPEC_POWER_ROLE_CMD_MASK, 0);
@@ -1942,8 +1813,8 @@
 	smblib_handle_usb_source_change(0, &irq_data);
 	smblib_handle_chg_state_change(0, &irq_data);
 	smblib_handle_icl_change(0, &irq_data);
-	smblib_handle_step_chg_state_change(0, &irq_data);
-	smblib_handle_step_chg_soc_update_request(0, &irq_data);
+	smblib_handle_batt_temp_changed(0, &irq_data);
+	smblib_handle_wdog_bark(0, &irq_data);
 
 	return 0;
 }
@@ -1965,18 +1836,15 @@
 	},
 	[STEP_CHG_STATE_CHANGE_IRQ] = {
 		.name		= "step-chg-state-change",
-		.handler	= smblib_handle_step_chg_state_change,
-		.wake		= true,
+		.handler	= NULL,
 	},
 	[STEP_CHG_SOC_UPDATE_FAIL_IRQ] = {
 		.name		= "step-chg-soc-update-fail",
-		.handler	= smblib_handle_step_chg_soc_update_fail,
-		.wake		= true,
+		.handler	= NULL,
 	},
 	[STEP_CHG_SOC_UPDATE_REQ_IRQ] = {
 		.name		= "step-chg-soc-update-request",
-		.handler	= smblib_handle_step_chg_soc_update_request,
-		.wake		= true,
+		.handler	= NULL,
 	},
 /* OTG IRQs */
 	[OTG_FAIL_IRQ] = {
@@ -1999,6 +1867,7 @@
 	[BATT_TEMP_IRQ] = {
 		.name		= "bat-temp",
 		.handler	= smblib_handle_batt_temp_changed,
+		.wake		= true,
 	},
 	[BATT_OCP_IRQ] = {
 		.name		= "bat-ocp",
@@ -2094,7 +1963,8 @@
 	},
 	[WDOG_BARK_IRQ] = {
 		.name		= "wdog-bark",
-		.handler	= NULL,
+		.handler	= smblib_handle_wdog_bark,
+		.wake		= true,
 	},
 	[AICL_FAIL_IRQ] = {
 		.name		= "aicl-fail",
@@ -2200,6 +2070,8 @@
 				return rc;
 		}
 	}
+	if (chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq)
+		chg->usb_icl_change_irq_enabled = true;
 
 	return rc;
 }
@@ -2332,18 +2204,18 @@
 		return rc;
 	}
 
-	rc = smblib_init(chg);
-	if (rc < 0) {
-		pr_err("Smblib_init failed rc=%d\n", rc);
-		goto cleanup;
-	}
-
 	rc = smb2_parse_dt(chip);
 	if (rc < 0) {
 		pr_err("Couldn't parse device tree rc=%d\n", rc);
 		goto cleanup;
 	}
 
+	rc = smblib_init(chg);
+	if (rc < 0) {
+		pr_err("Smblib_init failed rc=%d\n", rc);
+		goto cleanup;
+	}
+
 	/* set driver data before resources request it */
 	platform_set_drvdata(pdev, chip);
 
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 3f26e5e..57a85de 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -22,6 +22,7 @@
 #include "smb-lib.h"
 #include "smb-reg.h"
 #include "battery.h"
+#include "step-chg-jeita.h"
 #include "storm-watch.h"
 
 #define smblib_err(chg, fmt, ...)		\
@@ -101,35 +102,6 @@
 	return rc;
 }
 
-static int smblib_get_step_cc_delta(struct smb_charger *chg, int *cc_delta_ua)
-{
-	int rc, step_state;
-	u8 stat;
-
-	if (!chg->step_chg_enabled) {
-		*cc_delta_ua = 0;
-		return 0;
-	}
-
-	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
-			rc);
-		return rc;
-	}
-
-	step_state = (stat & STEP_CHARGING_STATUS_MASK) >>
-				STEP_CHARGING_STATUS_SHIFT;
-	rc = smblib_get_charge_param(chg, &chg->param.step_cc_delta[step_state],
-				     cc_delta_ua);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't get step cc delta rc=%d\n", rc);
-		return rc;
-	}
-
-	return 0;
-}
-
 static int smblib_get_jeita_cc_delta(struct smb_charger *chg, int *cc_delta_ua)
 {
 	int rc, cc_minus_ua;
@@ -148,7 +120,7 @@
 	}
 
 	rc = smblib_get_charge_param(chg, &chg->param.jeita_cc_comp,
-				     &cc_minus_ua);
+					&cc_minus_ua);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't get jeita cc minus rc=%d\n", rc);
 		return rc;
@@ -401,31 +373,17 @@
 	return rc;
 }
 
-static int step_charge_soc_update(struct smb_charger *chg, int capacity)
-{
-	int rc = 0;
-
-	rc = smblib_set_charge_param(chg, &chg->param.step_soc, capacity);
-	if (rc < 0) {
-		smblib_err(chg, "Error in updating soc, rc=%d\n", rc);
-		return rc;
-	}
-
-	rc = smblib_write(chg, STEP_CHG_SOC_VBATT_V_UPDATE_REG,
-			STEP_CHG_SOC_VBATT_V_UPDATE_BIT);
-	if (rc < 0) {
-		smblib_err(chg,
-			"Couldn't set STEP_CHG_SOC_VBATT_V_UPDATE_REG rc=%d\n",
-			rc);
-		return rc;
-	}
-
-	return rc;
-}
-
 int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend)
 {
 	int rc = 0;
+	int irq = chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq;
+
+	if (suspend && irq) {
+		if (chg->usb_icl_change_irq_enabled) {
+			disable_irq_nosync(irq);
+			chg->usb_icl_change_irq_enabled = false;
+		}
+	}
 
 	rc = smblib_masked_write(chg, USBIN_CMD_IL_REG, USBIN_SUSPEND_BIT,
 				 suspend ? USBIN_SUSPEND_BIT : 0);
@@ -433,6 +391,13 @@
 		smblib_err(chg, "Couldn't write %s to USBIN_SUSPEND_BIT rc=%d\n",
 			suspend ? "suspend" : "resume", rc);
 
+	if (!suspend && irq) {
+		if (!chg->usb_icl_change_irq_enabled) {
+			enable_irq(irq);
+			chg->usb_icl_change_irq_enabled = true;
+		}
+	}
+
 	return rc;
 }
 
@@ -522,6 +487,45 @@
 /********************
  * HELPER FUNCTIONS *
  ********************/
+static int smblib_request_dpdm(struct smb_charger *chg, bool enable)
+{
+	int rc = 0;
+
+	/* fetch the DPDM regulator */
+	if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
+				"dpdm-supply", NULL)) {
+		chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
+		if (IS_ERR(chg->dpdm_reg)) {
+			rc = PTR_ERR(chg->dpdm_reg);
+			smblib_err(chg, "Couldn't get dpdm regulator rc=%d\n",
+					rc);
+			chg->dpdm_reg = NULL;
+			return rc;
+		}
+	}
+
+	if (enable) {
+		if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
+			smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
+			rc = regulator_enable(chg->dpdm_reg);
+			if (rc < 0)
+				smblib_err(chg,
+					"Couldn't enable dpdm regulator rc=%d\n",
+					rc);
+		}
+	} else {
+		if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
+			smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
+			rc = regulator_disable(chg->dpdm_reg);
+			if (rc < 0)
+				smblib_err(chg,
+					"Couldn't disable dpdm regulator rc=%d\n",
+					rc);
+		}
+	}
+
+	return rc;
+}
 
 static void smblib_rerun_apsd(struct smb_charger *chg)
 {
@@ -548,10 +552,17 @@
 	const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
 
 	/* if PD is active, APSD is disabled so won't have a valid result */
-	if (chg->pd_active)
+	if (chg->pd_active) {
 		chg->real_charger_type = POWER_SUPPLY_TYPE_USB_PD;
-	else
+	} else {
+		/*
+		 * Update real charger type only if its not FLOAT
+		 * detected as as SDP
+		 */
+		if (!(apsd_result->pst == POWER_SUPPLY_TYPE_USB_FLOAT &&
+			chg->real_charger_type == POWER_SUPPLY_TYPE_USB))
 		chg->real_charger_type = apsd_result->pst;
+	}
 
 	smblib_dbg(chg, PR_MISC, "APSD=%s PD=%d\n",
 					apsd_result->name, chg->pd_active);
@@ -634,13 +645,9 @@
 
 	cancel_delayed_work_sync(&chg->pl_enable_work);
 
-	if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
-		smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
-		rc = regulator_disable(chg->dpdm_reg);
-		if (rc < 0)
-			smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
-				rc);
-	}
+	rc = smblib_request_dpdm(chg, false);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't to disable DPDM rc=%d\n", rc);
 
 	if (chg->wa_flags & BOOST_BACK_WA) {
 		data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
@@ -732,24 +739,9 @@
 	if (!val.intval)
 		return 0;
 
-	/* fetch the DPDM regulator */
-	if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
-						"dpdm-supply", NULL)) {
-		chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
-		if (IS_ERR(chg->dpdm_reg)) {
-			smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n",
-				PTR_ERR(chg->dpdm_reg));
-			chg->dpdm_reg = NULL;
-		}
-	}
-
-	if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
-		smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
-		rc = regulator_enable(chg->dpdm_reg);
-		if (rc < 0)
-			smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
-				rc);
-	}
+	rc = smblib_request_dpdm(chg, true);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
 
 	chg->uusb_apsd_rerun_done = true;
 	smblib_rerun_apsd(chg);
@@ -819,6 +811,7 @@
 {
 	int rc;
 	u8 icl_options;
+	const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
 
 	/* power source is SDP */
 	switch (icl_ua) {
@@ -843,6 +836,21 @@
 		return -EINVAL;
 	}
 
+	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB &&
+		apsd_result->pst == POWER_SUPPLY_TYPE_USB_FLOAT) {
+		/*
+		 * change the float charger configuration to SDP, if this
+		 * is the case of SDP being detected as FLOAT
+		 */
+		rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+			FORCE_FLOAT_SDP_CFG_BIT, FORCE_FLOAT_SDP_CFG_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't set float ICL options rc=%d\n",
+						rc);
+			return rc;
+		}
+	}
+
 	rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
 		CFG_USB3P0_SEL_BIT | USB51_MODE_BIT, icl_options);
 	if (rc < 0) {
@@ -884,7 +892,6 @@
 	if (icl_ua < USBIN_25MA)
 		return smblib_set_usb_suspend(chg, true);
 
-	disable_irq_nosync(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
 	if (icl_ua == INT_MAX)
 		goto override_suspend_config;
 
@@ -942,7 +949,6 @@
 	}
 
 enable_icl_changed_interrupt:
-	enable_irq(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
 	return rc;
 }
 
@@ -1318,11 +1324,84 @@
 #define MAX_RETRY		15
 #define MIN_DELAY_US		2000
 #define MAX_DELAY_US		9000
+static int otg_current[] = {250000, 500000, 1000000, 1500000};
+static int smblib_enable_otg_wa(struct smb_charger *chg)
+{
+	u8 stat;
+	int rc, i, retry_count = 0, min_delay = MIN_DELAY_US;
+
+	for (i = 0; i < ARRAY_SIZE(otg_current); i++) {
+		smblib_dbg(chg, PR_OTG, "enabling OTG with %duA\n",
+						otg_current[i]);
+		rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
+						otg_current[i]);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't set otg limit rc=%d\n", rc);
+			return rc;
+		}
+
+		rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
+			return rc;
+		}
+
+		retry_count = 0;
+		min_delay = MIN_DELAY_US;
+		do {
+			usleep_range(min_delay, min_delay + 100);
+			rc = smblib_read(chg, OTG_STATUS_REG, &stat);
+			if (rc < 0) {
+				smblib_err(chg, "Couldn't read OTG status rc=%d\n",
+							rc);
+				goto out;
+			}
+
+			if (stat & BOOST_SOFTSTART_DONE_BIT) {
+				rc = smblib_set_charge_param(chg,
+					&chg->param.otg_cl, chg->otg_cl_ua);
+				if (rc < 0) {
+					smblib_err(chg, "Couldn't set otg limit rc=%d\n",
+							rc);
+					goto out;
+				}
+				break;
+			}
+			/* increase the delay for following iterations */
+			if (retry_count > 5)
+				min_delay = MAX_DELAY_US;
+
+		} while (retry_count++ < MAX_RETRY);
+
+		if (retry_count >= MAX_RETRY) {
+			smblib_dbg(chg, PR_OTG, "OTG enable failed with %duA\n",
+								otg_current[i]);
+			rc = smblib_write(chg, CMD_OTG_REG, 0);
+			if (rc < 0) {
+				smblib_err(chg, "disable OTG rc=%d\n", rc);
+				goto out;
+			}
+		} else {
+			smblib_dbg(chg, PR_OTG, "OTG enabled\n");
+			return 0;
+		}
+	}
+
+	if (i == ARRAY_SIZE(otg_current)) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	return 0;
+out:
+	smblib_write(chg, CMD_OTG_REG, 0);
+	return rc;
+}
+
 static int _smblib_vbus_regulator_enable(struct regulator_dev *rdev)
 {
 	struct smb_charger *chg = rdev_get_drvdata(rdev);
-	int rc, retry_count = 0, min_delay = MIN_DELAY_US;
-	u8 stat;
+	int rc;
 
 	smblib_dbg(chg, PR_OTG, "halt 1 in 8 mode\n");
 	rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
@@ -1335,48 +1414,17 @@
 	}
 
 	smblib_dbg(chg, PR_OTG, "enabling OTG\n");
-	rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't enable OTG regulator rc=%d\n", rc);
-		return rc;
-	}
 
 	if (chg->wa_flags & OTG_WA) {
-		/* check for softstart */
-		do {
-			usleep_range(min_delay, min_delay + 100);
-			rc = smblib_read(chg, OTG_STATUS_REG, &stat);
-			if (rc < 0) {
-				smblib_err(chg,
-					"Couldn't read OTG status rc=%d\n",
-					rc);
-				goto out;
-			}
-
-			if (stat & BOOST_SOFTSTART_DONE_BIT) {
-				rc = smblib_set_charge_param(chg,
-					&chg->param.otg_cl, chg->otg_cl_ua);
-				if (rc < 0)
-					smblib_err(chg,
-						"Couldn't set otg limit\n");
-				break;
-			}
-
-			/* increase the delay for following iterations */
-			if (retry_count > 5)
-				min_delay = MAX_DELAY_US;
-		} while (retry_count++ < MAX_RETRY);
-
-		if (retry_count >= MAX_RETRY) {
-			smblib_dbg(chg, PR_OTG, "Boost Softstart not done\n");
-			goto out;
-		}
+		rc = smblib_enable_otg_wa(chg);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
+	} else {
+		rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
 	}
 
-	return 0;
-out:
-	/* disable OTG if softstart failed */
-	smblib_write(chg, CMD_OTG_REG, 0);
 	return rc;
 }
 
@@ -1755,30 +1803,6 @@
 	return rc;
 }
 
-int smblib_get_prop_step_chg_step(struct smb_charger *chg,
-				union power_supply_propval *val)
-{
-	int rc;
-	u8 stat;
-
-	if (!chg->step_chg_enabled) {
-		val->intval = -1;
-		return 0;
-	}
-
-	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
-			rc);
-		return rc;
-	}
-
-	val->intval = (stat & STEP_CHARGING_STATUS_MASK) >>
-				STEP_CHARGING_STATUS_SHIFT;
-
-	return rc;
-}
-
 int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
 					union power_supply_propval *val)
 {
@@ -2427,6 +2451,31 @@
 	return 0;
 }
 
+#define SDP_CURRENT_UA			500000
+#define CDP_CURRENT_UA			1500000
+#define DCP_CURRENT_UA			1500000
+#define HVDCP_CURRENT_UA		3000000
+#define TYPEC_DEFAULT_CURRENT_UA	900000
+#define TYPEC_MEDIUM_CURRENT_UA		1500000
+#define TYPEC_HIGH_CURRENT_UA		3000000
+static int get_rp_based_dcp_current(struct smb_charger *chg, int typec_mode)
+{
+	int rp_ua;
+
+	switch (typec_mode) {
+	case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+		rp_ua = TYPEC_HIGH_CURRENT_UA;
+		break;
+	case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+	case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+	/* fall through */
+	default:
+		rp_ua = DCP_CURRENT_UA;
+	}
+
+	return rp_ua;
+}
+
 /*******************
  * USB PSY SETTERS *
  * *****************/
@@ -2444,14 +2493,54 @@
 	return rc;
 }
 
+static int smblib_handle_usb_current(struct smb_charger *chg,
+					int usb_current)
+{
+	int rc = 0, rp_ua, typec_mode;
+
+	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
+		if (usb_current == -ETIMEDOUT) {
+			/*
+			 * Valid FLOAT charger, report the current based
+			 * of Rp
+			 */
+			typec_mode = smblib_get_prop_typec_mode(chg);
+			rp_ua = get_rp_based_dcp_current(chg, typec_mode);
+			rc = vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER,
+								true, rp_ua);
+			if (rc < 0)
+				return rc;
+		} else {
+			/*
+			 * FLOAT charger detected as SDP by USB driver,
+			 * charge with the requested current and update the
+			 * real_charger_type
+			 */
+			chg->real_charger_type = POWER_SUPPLY_TYPE_USB;
+			rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+						true, usb_current);
+			if (rc < 0)
+				return rc;
+			rc = vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER,
+							false, 0);
+			if (rc < 0)
+				return rc;
+		}
+	} else {
+		rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+					true, usb_current);
+	}
+
+	return rc;
+}
+
 int smblib_set_prop_usb_current_max(struct smb_charger *chg,
 				    const union power_supply_propval *val)
 {
 	int rc = 0;
 
 	if (!chg->pd_active) {
-		rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
-				true, val->intval);
+		rc = smblib_handle_usb_current(chg, val->intval);
 	} else if (chg->system_suspend_supported) {
 		if (val->intval <= USBIN_25MA)
 			rc = vote(chg->usb_icl_votable,
@@ -2834,46 +2923,72 @@
 	return rc;
 }
 
+static int smblib_recover_from_soft_jeita(struct smb_charger *chg)
+{
+	u8 stat_1, stat_2;
+	int rc;
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat_1);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_2_REG, &stat_2);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	if ((chg->jeita_status && !(stat_2 & BAT_TEMP_STATUS_SOFT_LIMIT_MASK) &&
+		((stat_1 & BATTERY_CHARGER_STATUS_MASK) == TERMINATE_CHARGE))) {
+		/*
+		 * We are moving from JEITA soft -> Normal and charging
+		 * is terminated
+		 */
+		rc = smblib_write(chg, CHARGING_ENABLE_CMD_REG, 0);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't disable charging rc=%d\n",
+						rc);
+			return rc;
+		}
+		rc = smblib_write(chg, CHARGING_ENABLE_CMD_REG,
+						CHARGING_ENABLE_CMD_BIT);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't enable charging rc=%d\n",
+						rc);
+			return rc;
+		}
+	}
+
+	chg->jeita_status = stat_2 & BAT_TEMP_STATUS_SOFT_LIMIT_MASK;
+
+	return 0;
+}
+
 /************************
  * USB MAIN PSY GETTERS *
  ************************/
 int smblib_get_prop_fcc_delta(struct smb_charger *chg,
-			       union power_supply_propval *val)
+				union power_supply_propval *val)
 {
-	int rc, jeita_cc_delta_ua, step_cc_delta_ua, hw_cc_delta_ua = 0;
-
-	rc = smblib_get_step_cc_delta(chg, &step_cc_delta_ua);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't get step cc delta rc=%d\n", rc);
-		step_cc_delta_ua = 0;
-	} else {
-		hw_cc_delta_ua = step_cc_delta_ua;
-	}
+	int rc, jeita_cc_delta_ua = 0;
 
 	rc = smblib_get_jeita_cc_delta(chg, &jeita_cc_delta_ua);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't get jeita cc delta rc=%d\n", rc);
 		jeita_cc_delta_ua = 0;
-	} else if (jeita_cc_delta_ua < 0) {
-		/* HW will take the min between JEITA and step charge */
-		hw_cc_delta_ua = min(hw_cc_delta_ua, jeita_cc_delta_ua);
 	}
 
-	val->intval = hw_cc_delta_ua;
+	val->intval = jeita_cc_delta_ua;
 	return 0;
 }
 
 /************************
  * USB MAIN PSY SETTERS *
  ************************/
-
-#define SDP_CURRENT_UA			500000
-#define CDP_CURRENT_UA			1500000
-#define DCP_CURRENT_UA			1500000
-#define HVDCP_CURRENT_UA		3000000
-#define TYPEC_DEFAULT_CURRENT_UA	900000
-#define TYPEC_MEDIUM_CURRENT_UA		1500000
-#define TYPEC_HIGH_CURRENT_UA		3000000
 int smblib_get_charge_current(struct smb_charger *chg,
 				int *total_current_ua)
 {
@@ -3040,61 +3155,18 @@
 	return IRQ_HANDLED;
 }
 
-irqreturn_t smblib_handle_step_chg_state_change(int irq, void *data)
-{
-	struct smb_irq_data *irq_data = data;
-	struct smb_charger *chg = irq_data->parent_data;
-
-	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
-
-	if (chg->step_chg_enabled)
-		rerun_election(chg->fcc_votable);
-
-	return IRQ_HANDLED;
-}
-
-irqreturn_t smblib_handle_step_chg_soc_update_fail(int irq, void *data)
-{
-	struct smb_irq_data *irq_data = data;
-	struct smb_charger *chg = irq_data->parent_data;
-
-	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
-
-	if (chg->step_chg_enabled)
-		rerun_election(chg->fcc_votable);
-
-	return IRQ_HANDLED;
-}
-
-#define STEP_SOC_REQ_MS	3000
-irqreturn_t smblib_handle_step_chg_soc_update_request(int irq, void *data)
-{
-	struct smb_irq_data *irq_data = data;
-	struct smb_charger *chg = irq_data->parent_data;
-	int rc;
-	union power_supply_propval pval = {0, };
-
-	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
-
-	if (!chg->bms_psy) {
-		schedule_delayed_work(&chg->step_soc_req_work,
-				      msecs_to_jiffies(STEP_SOC_REQ_MS));
-		return IRQ_HANDLED;
-	}
-
-	rc = smblib_get_prop_batt_capacity(chg, &pval);
-	if (rc < 0)
-		smblib_err(chg, "Couldn't get batt capacity rc=%d\n", rc);
-	else
-		step_charge_soc_update(chg, pval.intval);
-
-	return IRQ_HANDLED;
-}
-
 irqreturn_t smblib_handle_batt_temp_changed(int irq, void *data)
 {
 	struct smb_irq_data *irq_data = data;
 	struct smb_charger *chg = irq_data->parent_data;
+	int rc;
+
+	rc = smblib_recover_from_soft_jeita(chg);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't recover chg from soft jeita rc=%d\n",
+				rc);
+		return IRQ_HANDLED;
+	}
 
 	rerun_election(chg->fcc_votable);
 	power_supply_changed(chg->batt_psy);
@@ -3207,25 +3279,10 @@
 	smblib_set_opt_freq_buck(chg, vbus_rising ? chg->chg_freq.freq_5V :
 						chg->chg_freq.freq_removal);
 
-	/* fetch the DPDM regulator */
-	if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
-						"dpdm-supply", NULL)) {
-		chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
-		if (IS_ERR(chg->dpdm_reg)) {
-			smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n",
-				PTR_ERR(chg->dpdm_reg));
-			chg->dpdm_reg = NULL;
-		}
-	}
-
 	if (vbus_rising) {
-		if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
-			smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
-			rc = regulator_enable(chg->dpdm_reg);
-			if (rc < 0)
-				smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
-					rc);
-		}
+		rc = smblib_request_dpdm(chg, true);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
 
 		/* Schedule work to enable parallel charger */
 		vote(chg->awake_votable, PL_DELAY_VOTER, true, 0);
@@ -3245,13 +3302,9 @@
 			}
 		}
 
-		if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
-			smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
-			rc = regulator_disable(chg->dpdm_reg);
-			if (rc < 0)
-				smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
-					rc);
-		}
+		rc = smblib_request_dpdm(chg, false);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
 	}
 
 	if (chg->micro_usb_mode)
@@ -3472,24 +3525,6 @@
 		   rising ? "rising" : "falling");
 }
 
-static int get_rp_based_dcp_current(struct smb_charger *chg, int typec_mode)
-{
-	int rp_ua;
-
-	switch (typec_mode) {
-	case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
-		rp_ua = TYPEC_HIGH_CURRENT_UA;
-		break;
-	case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
-	case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
-	/* fall through */
-	default:
-		rp_ua = DCP_CURRENT_UA;
-	}
-
-	return rp_ua;
-}
-
 static void smblib_force_legacy_icl(struct smb_charger *chg, int pst)
 {
 	int typec_mode;
@@ -3515,11 +3550,17 @@
 		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 1500000);
 		break;
 	case POWER_SUPPLY_TYPE_USB_DCP:
-	case POWER_SUPPLY_TYPE_USB_FLOAT:
 		typec_mode = smblib_get_prop_typec_mode(chg);
 		rp_ua = get_rp_based_dcp_current(chg, typec_mode);
 		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, rp_ua);
 		break;
+	case POWER_SUPPLY_TYPE_USB_FLOAT:
+		/*
+		 * limit ICL to 100mA, the USB driver will enumerate to check
+		 * if this is a SDP and appropriately set the current
+		 */
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 100000);
+		break;
 	case POWER_SUPPLY_TYPE_USB_HVDCP:
 	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
 		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 3000000);
@@ -3655,13 +3696,9 @@
 
 	chg->cc2_detach_wa_active = false;
 
-	if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
-		smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
-		rc = regulator_disable(chg->dpdm_reg);
-		if (rc < 0)
-			smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
-				rc);
-	}
+	rc = smblib_request_dpdm(chg, false);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
 
 	if (chg->wa_flags & BOOST_BACK_WA) {
 		data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
@@ -3720,6 +3757,13 @@
 	chg->pd_hard_reset = 0;
 	chg->typec_legacy_valid = false;
 
+	/* write back the default FLOAT charger configuration */
+	rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+				(u8)FLOAT_OPTIONS_MASK, chg->float_cfg);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't write float charger options rc=%d\n",
+			rc);
+
 	/* reset back to 120mS tCC debounce */
 	rc = smblib_masked_write(chg, MISC_CFG_REG, TCC_DEBOUNCE_20MS_BIT, 0);
 	if (rc < 0)
@@ -3799,10 +3843,14 @@
 		smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
 									rc);
 
-	if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT)
+	if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT) {
 		typec_sink_insertion(chg);
-	else
+	} else {
+		rc = smblib_request_dpdm(chg, true);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
 		typec_sink_removal(chg);
+	}
 }
 
 static void smblib_handle_rp_change(struct smb_charger *chg, int typec_mode)
@@ -3815,6 +3863,24 @@
 		return;
 
 	/*
+	 * if APSD indicates FLOAT and the USB stack had detected SDP,
+	 * do not respond to Rp changes as we do not confirm that its
+	 * a legacy cable
+	 */
+	if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
+		return;
+	/*
+	 * We want the ICL vote @ 100mA for a FLOAT charger
+	 * until the detection by the USB stack is complete.
+	 * Ignore the Rp changes unless there is a
+	 * pre-existing valid vote.
+	 */
+	if (apsd->pst == POWER_SUPPLY_TYPE_USB_FLOAT &&
+		get_client_vote(chg->usb_icl_votable,
+			LEGACY_UNKNOWN_VOTER) <= 100000)
+		return;
+
+	/*
 	 * handle Rp change for DCP/FLOAT/OCP.
 	 * Update the current only if the Rp is different from
 	 * the last Rp value.
@@ -4011,10 +4077,15 @@
 	struct smb_charger *chg = irq_data->parent_data;
 	int rc;
 
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
 	rc = smblib_write(chg, BARK_BITE_WDOG_PET_REG, BARK_BITE_WDOG_PET_BIT);
 	if (rc < 0)
 		smblib_err(chg, "Couldn't pet the dog rc=%d\n", rc);
 
+	if (chg->step_chg_enabled)
+		power_supply_changed(chg->batt_psy);
+
 	return IRQ_HANDLED;
 }
 
@@ -4097,22 +4168,6 @@
 		power_supply_changed(chg->batt_psy);
 }
 
-static void step_soc_req_work(struct work_struct *work)
-{
-	struct smb_charger *chg = container_of(work, struct smb_charger,
-						step_soc_req_work.work);
-	union power_supply_propval pval = {0, };
-	int rc;
-
-	rc = smblib_get_prop_batt_capacity(chg, &pval);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't get batt capacity rc=%d\n", rc);
-		return;
-	}
-
-	step_charge_soc_update(chg, pval.intval);
-}
-
 static void clear_hdc_work(struct work_struct *work)
 {
 	struct smb_charger *chg = container_of(work, struct smb_charger,
@@ -4645,7 +4700,6 @@
 	INIT_WORK(&chg->bms_update_work, bms_update_work);
 	INIT_WORK(&chg->rdstd_cc2_detach_work, rdstd_cc2_detach_work);
 	INIT_DELAYED_WORK(&chg->hvdcp_detect_work, smblib_hvdcp_detect_work);
-	INIT_DELAYED_WORK(&chg->step_soc_req_work, step_soc_req_work);
 	INIT_DELAYED_WORK(&chg->clear_hdc_work, clear_hdc_work);
 	INIT_WORK(&chg->otg_oc_work, smblib_otg_oc_work);
 	INIT_WORK(&chg->vconn_oc_work, smblib_vconn_oc_work);
@@ -4667,6 +4721,13 @@
 			return rc;
 		}
 
+		rc = qcom_step_chg_init(chg->step_chg_enabled);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't init qcom_step_chg_init rc=%d\n",
+				rc);
+			return rc;
+		}
+
 		rc = smblib_create_votables(chg);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't create votables rc=%d\n",
@@ -4701,7 +4762,6 @@
 		cancel_work_sync(&chg->bms_update_work);
 		cancel_work_sync(&chg->rdstd_cc2_detach_work);
 		cancel_delayed_work_sync(&chg->hvdcp_detect_work);
-		cancel_delayed_work_sync(&chg->step_soc_req_work);
 		cancel_delayed_work_sync(&chg->clear_hdc_work);
 		cancel_work_sync(&chg->otg_oc_work);
 		cancel_work_sync(&chg->vconn_oc_work);
@@ -4713,6 +4773,7 @@
 		cancel_delayed_work_sync(&chg->bb_removal_work);
 		power_supply_unreg_notifier(&chg->nb);
 		smblib_destroy_votables(chg);
+		qcom_step_chg_deinit();
 		qcom_batt_deinit();
 		break;
 	case PARALLEL_SLAVE:
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index c08d404..4ffbeb6 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -192,9 +192,6 @@
 	struct smb_chg_param	dc_icl_div2_mid_hv;
 	struct smb_chg_param	dc_icl_div2_hv;
 	struct smb_chg_param	jeita_cc_comp;
-	struct smb_chg_param	step_soc_threshold[4];
-	struct smb_chg_param	step_soc;
-	struct smb_chg_param	step_cc_delta[5];
 	struct smb_chg_param	freq_buck;
 	struct smb_chg_param	freq_boost;
 };
@@ -289,7 +286,6 @@
 	struct work_struct	rdstd_cc2_detach_work;
 	struct delayed_work	hvdcp_detect_work;
 	struct delayed_work	ps_change_timeout_work;
-	struct delayed_work	step_soc_req_work;
 	struct delayed_work	clear_hdc_work;
 	struct work_struct	otg_oc_work;
 	struct work_struct	vconn_oc_work;
@@ -330,6 +326,9 @@
 	int			fake_input_current_limited;
 	bool			pr_swap_in_progress;
 	int			typec_mode;
+	int			usb_icl_change_irq_enabled;
+	u32			jeita_status;
+	u8			float_cfg;
 
 	/* workaround flag */
 	u32			wa_flags;
@@ -384,9 +383,6 @@
 irqreturn_t smblib_handle_debug(int irq, void *data);
 irqreturn_t smblib_handle_otg_overcurrent(int irq, void *data);
 irqreturn_t smblib_handle_chg_state_change(int irq, void *data);
-irqreturn_t smblib_handle_step_chg_state_change(int irq, void *data);
-irqreturn_t smblib_handle_step_chg_soc_update_fail(int irq, void *data);
-irqreturn_t smblib_handle_step_chg_soc_update_request(int irq, void *data);
 irqreturn_t smblib_handle_batt_temp_changed(int irq, void *data);
 irqreturn_t smblib_handle_batt_psy_changed(int irq, void *data);
 irqreturn_t smblib_handle_usb_psy_changed(int irq, void *data);
@@ -426,9 +422,6 @@
 				union power_supply_propval *val);
 int smblib_get_prop_batt_temp(struct smb_charger *chg,
 				union power_supply_propval *val);
-int smblib_get_prop_step_chg_step(struct smb_charger *chg,
-				union power_supply_propval *val);
-
 int smblib_set_prop_input_suspend(struct smb_charger *chg,
 				const union power_supply_propval *val);
 int smblib_set_prop_batt_capacity(struct smb_charger *chg,
@@ -508,7 +501,7 @@
 void smblib_suspend_on_debug_battery(struct smb_charger *chg);
 int smblib_rerun_apsd_if_required(struct smb_charger *chg);
 int smblib_get_prop_fcc_delta(struct smb_charger *chg,
-			       union power_supply_propval *val);
+				union power_supply_propval *val);
 int smblib_icl_override(struct smb_charger *chg, bool override);
 int smblib_dp_dm(struct smb_charger *chg, int val);
 int smblib_rerun_aicl(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index ca0a2c6..dd949e7 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -845,6 +845,13 @@
 		}
 	}
 
+	/* configure to a fixed 700khz freq to avoid tdie errors */
+	rc = smblib_set_charge_param(chg, &chg->param.freq_buck, 700);
+	if (rc < 0) {
+		pr_err("Couldn't configure 700Khz switch freq rc=%d\n", rc);
+		return rc;
+	}
+
 	/* enable watchdog bark and bite interrupts, and disable the watchdog */
 	rc = smblib_masked_write(chg, WD_CFG_REG, WDOG_TIMER_EN_BIT
 			| WDOG_TIMER_EN_ON_PLUGIN_BIT | BITE_WDOG_INT_EN_BIT
diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c
new file mode 100644
index 0000000..a2c08be
--- /dev/null
+++ b/drivers/power/supply/qcom/step-chg-jeita.c
@@ -0,0 +1,272 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "QCOM-STEPCHG: %s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/slab.h>
+#include <linux/pmic-voter.h>
+#include "step-chg-jeita.h"
+
+#define MAX_STEP_CHG_ENTRIES	8
+#define STEP_CHG_VOTER		"STEP_CHG_VOTER"
+#define STATUS_CHANGE_VOTER	"STATUS_CHANGE_VOTER"
+
+#define is_between(left, right, value) \
+		(((left) >= (right) && (left) >= (value) \
+			&& (value) >= (right)) \
+		|| ((left) <= (right) && (left) <= (value) \
+			&& (value) <= (right)))
+
+struct step_chg_data {
+	u32 vbatt_soc_low;
+	u32 vbatt_soc_high;
+	u32 fcc_ua;
+};
+
+struct step_chg_cfg {
+	u32 psy_prop;
+	char *prop_name;
+	struct step_chg_data cfg[MAX_STEP_CHG_ENTRIES];
+};
+
+struct step_chg_info {
+	ktime_t			last_update_time;
+	bool			step_chg_enable;
+
+	struct votable		*fcc_votable;
+	struct wakeup_source	*step_chg_ws;
+	struct power_supply	*batt_psy;
+	struct delayed_work	status_change_work;
+	struct notifier_block	nb;
+};
+
+static struct step_chg_info *the_chip;
+
+/*
+ * Step Charging Configuration
+ * Update the table based on the battery profile
+ * Supports VBATT and SOC based source
+ */
+static struct step_chg_cfg step_chg_config = {
+	.psy_prop  = POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	.prop_name = "VBATT",
+	.cfg	 = {
+		/* VBAT_LOW	VBAT_HIGH	FCC */
+		{3600000,	4000000,	3000000},
+		{4000000,	4200000,	2800000},
+		{4200000,	4400000,	2000000},
+	},
+/*
+ *	SOC STEP-CHG configuration example.
+ *
+ *	.psy_prop = POWER_SUPPLY_PROP_CAPACITY,
+ *	.prop_name = "SOC",
+ *	.cfg	= {
+ *		//SOC_LOW	SOC_HIGH	FCC
+ *		{20,		70,		3000000},
+ *		{70,		90,		2750000},
+ *		{90,		100,		2500000},
+ *	},
+ */
+};
+
+static bool is_batt_available(struct step_chg_info *chip)
+{
+	if (!chip->batt_psy)
+		chip->batt_psy = power_supply_get_by_name("battery");
+
+	if (!chip->batt_psy)
+		return false;
+
+	return true;
+}
+
+static int get_fcc(int threshold)
+{
+	int i;
+
+	for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++)
+		if (is_between(step_chg_config.cfg[i].vbatt_soc_low,
+			step_chg_config.cfg[i].vbatt_soc_high, threshold))
+			return step_chg_config.cfg[i].fcc_ua;
+
+	return -ENODATA;
+}
+
+static int handle_step_chg_config(struct step_chg_info *chip)
+{
+	union power_supply_propval pval = {0, };
+	int rc = 0, fcc_ua = 0;
+
+	rc = power_supply_get_property(chip->batt_psy,
+		POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED, &pval);
+	if (rc < 0)
+		chip->step_chg_enable = 0;
+	else
+		chip->step_chg_enable = pval.intval;
+
+	if (!chip->step_chg_enable) {
+		if (chip->fcc_votable)
+			vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
+		return 0;
+	}
+
+	rc = power_supply_get_property(chip->batt_psy,
+				step_chg_config.psy_prop, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't read %s property rc=%d\n",
+				step_chg_config.prop_name, rc);
+		return rc;
+	}
+
+	chip->fcc_votable = find_votable("FCC");
+	if (!chip->fcc_votable)
+		return -EINVAL;
+
+	fcc_ua = get_fcc(pval.intval);
+	if (fcc_ua < 0) {
+		/* remove the vote if no step-based fcc is found */
+		vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
+		return 0;
+	}
+
+	vote(chip->fcc_votable, STEP_CHG_VOTER, true, fcc_ua);
+
+	pr_debug("%s = %d Step-FCC = %duA\n",
+		step_chg_config.prop_name, pval.intval, fcc_ua);
+
+	return 0;
+}
+
+#define STEP_CHG_HYSTERISIS_DELAY_US		5000000 /* 5 secs */
+static void status_change_work(struct work_struct *work)
+{
+	struct step_chg_info *chip = container_of(work,
+			struct step_chg_info, status_change_work.work);
+	int rc = 0;
+	u64 elapsed_us;
+
+	elapsed_us = ktime_us_delta(ktime_get(), chip->last_update_time);
+	if (elapsed_us < STEP_CHG_HYSTERISIS_DELAY_US)
+		goto release_ws;
+
+	if (!is_batt_available(chip))
+		goto release_ws;
+
+	rc = handle_step_chg_config(chip);
+	if (rc < 0)
+		goto release_ws;
+
+	chip->last_update_time = ktime_get();
+
+release_ws:
+	__pm_relax(chip->step_chg_ws);
+}
+
+static int step_chg_notifier_call(struct notifier_block *nb,
+		unsigned long ev, void *v)
+{
+	struct power_supply *psy = v;
+	struct step_chg_info *chip = container_of(nb, struct step_chg_info, nb);
+
+	if (ev != PSY_EVENT_PROP_CHANGED)
+		return NOTIFY_OK;
+
+	if ((strcmp(psy->desc->name, "battery") == 0)) {
+		__pm_stay_awake(chip->step_chg_ws);
+		schedule_delayed_work(&chip->status_change_work, 0);
+	}
+
+	return NOTIFY_OK;
+}
+
+static int step_chg_register_notifier(struct step_chg_info *chip)
+{
+	int rc;
+
+	chip->nb.notifier_call = step_chg_notifier_call;
+	rc = power_supply_reg_notifier(&chip->nb);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+int qcom_step_chg_init(bool step_chg_enable)
+{
+	int rc;
+	struct step_chg_info *chip;
+
+	if (the_chip) {
+		pr_err("Already initialized\n");
+		return -EINVAL;
+	}
+
+	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->step_chg_ws = wakeup_source_register("qcom-step-chg");
+	if (!chip->step_chg_ws) {
+		rc = -EINVAL;
+		goto cleanup;
+	}
+
+	chip->step_chg_enable = step_chg_enable;
+
+	if (step_chg_enable && (!step_chg_config.psy_prop ||
+				!step_chg_config.prop_name)) {
+		/* fail if step-chg configuration is invalid */
+		pr_err("Step-chg configuration not defined - fail\n");
+		return -ENODATA;
+	}
+
+	INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
+
+	rc = step_chg_register_notifier(chip);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		goto release_wakeup_source;
+	}
+
+	the_chip = chip;
+
+	if (step_chg_enable)
+		pr_info("Step charging enabled. Using %s source\n",
+				step_chg_config.prop_name);
+
+	return 0;
+
+release_wakeup_source:
+	wakeup_source_unregister(chip->step_chg_ws);
+cleanup:
+	kfree(chip);
+	return rc;
+}
+
+void qcom_step_chg_deinit(void)
+{
+	struct step_chg_info *chip = the_chip;
+
+	if (!chip)
+		return;
+
+	cancel_delayed_work_sync(&chip->status_change_work);
+	power_supply_unreg_notifier(&chip->nb);
+	wakeup_source_unregister(chip->step_chg_ws);
+	the_chip = NULL;
+	kfree(chip);
+}
diff --git a/drivers/power/supply/qcom/step-chg-jeita.h b/drivers/power/supply/qcom/step-chg-jeita.h
new file mode 100644
index 0000000..236877a
--- /dev/null
+++ b/drivers/power/supply/qcom/step-chg-jeita.h
@@ -0,0 +1,17 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __STEP_CHG_H__
+#define __STEP_CHG_H__
+int qcom_step_chg_init(bool step_chg_enable);
+void qcom_step_chg_deinit(void);
+#endif /* __STEP_CHG_H__ */
diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c
index b1e6a3b..9510016 100644
--- a/drivers/regulator/cpr3-regulator.c
+++ b/drivers/regulator/cpr3-regulator.c
@@ -1316,6 +1316,27 @@
 static int cprh_regulator_aging_adjust(struct cpr3_controller *ctrl);
 
 /**
+ * cpr3_regulator_cprh_initialized() - checks if CPRh has already been
+ *		initialized by the boot loader
+ * @ctrl:		Pointer to the CPR3 controller
+ *
+ * Return: true if CPRh controller is already initialized else false
+ */
+static bool cpr3_regulator_cprh_initialized(struct cpr3_controller *ctrl)
+{
+	u32 reg;
+
+	if (ctrl->ctrl_type != CPR_CTRL_TYPE_CPRH)
+		return false;
+
+	ctrl->cpr_hw_version = readl_relaxed(ctrl->cpr_ctrl_base
+						+ CPR3_REG_CPR_VERSION);
+	reg = readl_relaxed(ctrl->cpr_ctrl_base + CPRH_REG_CTL(ctrl));
+
+	return reg & CPRH_CTL_OSM_ENABLED;
+}
+
+/**
  * cpr3_regulator_init_cprh() - performs hardware initialization at the
  *		controller and thread level required for CPRh operation.
  * @ctrl:		Pointer to the CPR3 controller
@@ -6459,6 +6480,11 @@
 	}
 	ctrl->cpr_ctrl_base = devm_ioremap(dev, res->start, resource_size(res));
 
+	if (cpr3_regulator_cprh_initialized(ctrl)) {
+		cpr3_err(ctrl, "CPRh controller already initialized by boot loader\n");
+		return -EPERM;
+	}
+
 	if (ctrl->aging_possible_mask) {
 		/*
 		 * Aging possible register address is required if an aging
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index deb0ce5..c393940 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -38,12 +38,10 @@
 #define SDM660_KBSS_FUSE_CORNERS			5
 
 #define SDM845_KBSS_POWER_CLUSTER_FUSE_CORNERS		4
-#define SDM845_KBSS_PERFORMANCE_CLUSTER_FUSE_CORNERS	3
-/*
- * This must be set to the larger of SDM845_KBSS_POWER_CLUSTER_FUSE_CORNERS and
- * SDM845_KBSS_PERFORMANCE_CLUSTER_FUSE_CORNERS values.
- */
-#define SDM845_KBSS_MAX_FUSE_CORNERS			4
+#define SDM845_V1_KBSS_PERF_CLUSTER_FUSE_CORNERS	3
+#define SDM845_V2_KBSS_PERF_CLUSTER_FUSE_CORNERS	5
+/* This must be set to the largest of SDM845 FUSE_CORNERS values. */
+#define SDM845_KBSS_MAX_FUSE_CORNERS			5
 
 /**
  * struct cprh_kbss_fuses - KBSS specific fuse data
@@ -153,18 +151,38 @@
 #define CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID	0
 
 static const char * const
-cprh_sdm845_kbss_fuse_corner_name[2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
+cprh_sdm845_v1_kbss_fuse_corner_name[2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		"LowSVS",
 		"SVS_L1",
 		"NOM_L1",
 		"TURBO",
+		"",
 	},
 	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
 		"SVS",
 		"NOM",
 		"TURBO_L2",
 		"",
+		"",
+	},
+};
+
+static const char * const
+cprh_sdm845_v2_kbss_fuse_corner_name[2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		"LowSVS",
+		"SVS_L1",
+		"NOM",
+		"TURBO",
+		"",
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		"LowSVS",
+		"SVS",
+		"NOM_L1",
+		"TURBO_L2",
+		"BINNING",
 	},
 };
 
@@ -334,7 +352,7 @@
  *		 different fuse rows.
  */
 static const struct cpr3_fuse_param
-sdm845_kbss_ro_sel_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
+sdm845_v1_kbss_ro_sel_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
 			{{66, 52, 55}, {} },
@@ -359,7 +377,34 @@
 };
 
 static const struct cpr3_fuse_param
-sdm845_kbss_init_voltage_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
+sdm845_v2_kbss_ro_sel_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
+			{{66, 52, 55}, {} },
+			{{66, 48, 51}, {} },
+			{{66, 44, 47}, {} },
+			{{66, 40, 43}, {} },
+		},
+		[CPRH_KBSS_L3_THREAD_ID] = {
+			{{66, 52, 55}, {} },
+			{{66, 48, 51}, {} },
+			{{66, 44, 47}, {} },
+			{{66, 40, 43}, {} },
+		},
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		[CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+			{{73,  5,  8}, {} },
+			{{70, 12, 15}, {} },
+			{{70,  8, 11}, {} },
+			{{70,  4,  7}, {} },
+			{{70,  0,  3}, {} },
+		},
+	},
+};
+
+static const struct cpr3_fuse_param
+sdm845_v1_kbss_init_voltage_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
 			{{67, 10, 15}, {} },
@@ -384,7 +429,34 @@
 };
 
 static const struct cpr3_fuse_param
-sdm845_kbss_target_quot_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
+sdm845_v2_kbss_init_voltage_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
+			{{67, 10, 15}, {} },
+			{{67,  4,  9}, {} },
+			{{66, 62, 63}, {67,  0,  3}, {} },
+			{{66, 56, 61}, {} },
+		},
+		[CPRH_KBSS_L3_THREAD_ID] = {
+			{{68, 50, 55}, {} },
+			{{68, 44, 49}, {} },
+			{{68, 38, 43}, {} },
+			{{68, 32, 37}, {} },
+		},
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		[CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+			{{72, 10, 15}, {} },
+			{{70, 34, 39}, {} },
+			{{70, 28, 33}, {} },
+			{{70, 22, 27}, {} },
+			{{70, 16, 21}, {} },
+		},
+	},
+};
+
+static const struct cpr3_fuse_param
+sdm845_v1_kbss_target_quot_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
 			{{67, 52, 63}, {} },
@@ -409,7 +481,34 @@
 };
 
 static const struct cpr3_fuse_param
-sdm845_kbss_quot_offset_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][2] = {
+sdm845_v2_kbss_target_quot_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][3] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
+			{{67, 52, 63}, {} },
+			{{67, 40, 51}, {} },
+			{{67, 28, 39}, {} },
+			{{67, 16, 27}, {} },
+		},
+		[CPRH_KBSS_L3_THREAD_ID] = {
+			{{69, 28, 39}, {} },
+			{{69, 16, 27}, {} },
+			{{69,  4, 15}, {} },
+			{{68, 56, 63}, {69, 0, 3}, {} },
+		},
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		[CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+			{{72, 16, 27}, {} },
+			{{71, 12, 23}, {} },
+			{{71,  0, 11}, {} },
+			{{70, 52, 63}, {} },
+			{{70, 40, 51}, {} },
+		},
+	},
+};
+
+static const struct cpr3_fuse_param
+sdm845_v1_kbss_quot_offset_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][2] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
 			{{} },
@@ -433,6 +532,33 @@
 	},
 };
 
+static const struct cpr3_fuse_param
+sdm845_v2_kbss_quot_offset_param[2][2][SDM845_KBSS_MAX_FUSE_CORNERS][2] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
+			{{} },
+			{{68, 16, 23}, {} },
+			{{68,  8, 15}, {} },
+			{{68,  0,  7}, {} },
+		},
+		[CPRH_KBSS_L3_THREAD_ID] = {
+			{{} },
+			{{69, 56, 63}, {} },
+			{{69, 48, 55}, {} },
+			{{69, 40, 47}, {} },
+		},
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		[CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+			{{} },
+			{{72, 28, 35}, {} },
+			{{71, 40, 47}, {} },
+			{{71, 32, 39}, {} },
+			{{71, 24, 31}, {} },
+		},
+	},
+};
+
 static const struct cpr3_fuse_param msm8998_cpr_fusing_rev_param[] = {
 	{39, 51, 53},
 	{},
@@ -443,11 +569,16 @@
 	{},
 };
 
-static const struct cpr3_fuse_param sdm845_cpr_fusing_rev_param[] = {
+static const struct cpr3_fuse_param sdm845_v1_cpr_fusing_rev_param[] = {
 	{73, 3, 5},
 	{},
 };
 
+static const struct cpr3_fuse_param sdm845_v2_cpr_fusing_rev_param[] = {
+	{75, 34, 36},
+	{},
+};
+
 static const struct cpr3_fuse_param kbss_speed_bin_param[] = {
 	{38, 29, 31},
 	{},
@@ -490,7 +621,7 @@
 };
 
 static const struct cpr3_fuse_param
-sdm845_kbss_aging_init_quot_diff_param[2][2] = {
+sdm845_v1_kbss_aging_init_quot_diff_param[2][2] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		{68, 21, 28},
 		{},
@@ -501,6 +632,18 @@
 	},
 };
 
+static const struct cpr3_fuse_param
+sdm845_v2_kbss_aging_init_quot_diff_param[2][2] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		{68, 24, 31},
+		{},
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		{71, 48, 55},
+		{},
+	},
+};
+
 /*
  * Open loop voltage fuse reference voltages in microvolts for MSM8998 v1
  */
@@ -556,7 +699,7 @@
  * Open loop voltage fuse reference voltages in microvolts for SDM845
  */
 static const int
-sdm845_kbss_fuse_ref_volt[2][2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
+sdm845_v1_kbss_fuse_ref_volt[2][2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
 	[CPRH_KBSS_POWER_CLUSTER_ID] = {
 		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
 			688000,
@@ -580,6 +723,33 @@
 	},
 };
 
+static const int
+sdm845_v2_kbss_fuse_ref_volt[2][2][SDM845_KBSS_MAX_FUSE_CORNERS] = {
+	[CPRH_KBSS_POWER_CLUSTER_ID] = {
+		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
+			688000,
+			812000,
+			828000,
+			952000,
+		},
+		[CPRH_KBSS_L3_THREAD_ID] = {
+			688000,
+			812000,
+			828000,
+			952000,
+		},
+	},
+	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+		[CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+			 688000,
+			 812000,
+			 884000,
+			1000000,
+			1000000,
+		},
+	},
+};
+
 #define CPRH_KBSS_FUSE_STEP_VOLT		10000
 #define CPRH_SDM845_KBSS_FUSE_STEP_VOLT		8000
 #define CPRH_KBSS_VOLTAGE_FUSE_SIZE		6
@@ -880,9 +1050,11 @@
 		struct cprh_kbss_fuses *fuse)
 {
 	void __iomem *base = vreg->thread->ctrl->fuse_base;
+	bool is_v1 = (vreg->thread->ctrl->soc_revision == SDM845_V1_SOC_ID);
 	int i, cid, tid, rc;
 
-	rc = cpr3_read_fuse_param(base, sdm845_cpr_fusing_rev_param,
+	rc = cpr3_read_fuse_param(base, is_v1 ? sdm845_v1_cpr_fusing_rev_param
+					      : sdm845_v2_cpr_fusing_rev_param,
 				&fuse->cpr_fusing_rev);
 	if (rc) {
 		cpr3_err(vreg, "Unable to read CPR fusing revision fuse, rc=%d\n",
@@ -895,8 +1067,9 @@
 	cid = vreg->thread->ctrl->ctrl_id;
 
 	for (i = 0; i < vreg->fuse_corner_count; i++) {
-		rc = cpr3_read_fuse_param(base,
-				sdm845_kbss_init_voltage_param[cid][tid][i],
+		rc = cpr3_read_fuse_param(base, is_v1 ?
+				sdm845_v1_kbss_init_voltage_param[cid][tid][i] :
+				sdm845_v2_kbss_init_voltage_param[cid][tid][i],
 				&fuse->init_voltage[i]);
 		if (rc) {
 			cpr3_err(vreg, "Unable to read fuse-corner %d initial voltage fuse, rc=%d\n",
@@ -904,8 +1077,9 @@
 			return rc;
 		}
 
-		rc = cpr3_read_fuse_param(base,
-				sdm845_kbss_target_quot_param[cid][tid][i],
+		rc = cpr3_read_fuse_param(base, is_v1 ?
+				sdm845_v1_kbss_target_quot_param[cid][tid][i] :
+				sdm845_v2_kbss_target_quot_param[cid][tid][i],
 				&fuse->target_quot[i]);
 		if (rc) {
 			cpr3_err(vreg, "Unable to read fuse-corner %d target quotient fuse, rc=%d\n",
@@ -913,8 +1087,9 @@
 			return rc;
 		}
 
-		rc = cpr3_read_fuse_param(base,
-				sdm845_kbss_ro_sel_param[cid][tid][i],
+		rc = cpr3_read_fuse_param(base, is_v1 ?
+				sdm845_v1_kbss_ro_sel_param[cid][tid][i] :
+				sdm845_v2_kbss_ro_sel_param[cid][tid][i],
 				&fuse->ro_sel[i]);
 		if (rc) {
 			cpr3_err(vreg, "Unable to read fuse-corner %d RO select fuse, rc=%d\n",
@@ -922,8 +1097,9 @@
 			return rc;
 		}
 
-		rc = cpr3_read_fuse_param(base,
-				sdm845_kbss_quot_offset_param[cid][tid][i],
+		rc = cpr3_read_fuse_param(base, is_v1 ?
+				sdm845_v1_kbss_quot_offset_param[cid][tid][i] :
+				sdm845_v2_kbss_quot_offset_param[cid][tid][i],
 				&fuse->quot_offset[i]);
 		if (rc) {
 			cpr3_err(vreg, "Unable to read fuse-corner %d quotient offset fuse, rc=%d\n",
@@ -932,8 +1108,9 @@
 		}
 	}
 
-	rc = cpr3_read_fuse_param(base,
-				sdm845_kbss_aging_init_quot_diff_param[cid],
+	rc = cpr3_read_fuse_param(base, is_v1 ?
+				sdm845_v1_kbss_aging_init_quot_diff_param[cid] :
+				sdm845_v2_kbss_aging_init_quot_diff_param[cid],
 				&fuse->aging_init_quot_diff);
 	if (rc) {
 		cpr3_err(vreg, "Unable to read aging initial quotient difference fuse, rc=%d\n",
@@ -994,11 +1171,16 @@
 		fuse_corners = MSM8998_KBSS_FUSE_CORNERS;
 		break;
 	case SDM845_V1_SOC_ID:
+		fuse_corners = vreg->thread->ctrl->ctrl_id
+					== CPRH_KBSS_POWER_CLUSTER_ID
+				? SDM845_KBSS_POWER_CLUSTER_FUSE_CORNERS
+				: SDM845_V1_KBSS_PERF_CLUSTER_FUSE_CORNERS;
+		break;
 	case SDM845_V2_SOC_ID:
 		fuse_corners = vreg->thread->ctrl->ctrl_id
 					== CPRH_KBSS_POWER_CLUSTER_ID
 				? SDM845_KBSS_POWER_CLUSTER_FUSE_CORNERS
-				: SDM845_KBSS_PERFORMANCE_CLUSTER_FUSE_CORNERS;
+				: SDM845_V2_KBSS_PERF_CLUSTER_FUSE_CORNERS;
 		break;
 	default:
 		cpr3_err(vreg, "unsupported soc id = %d\n", soc_revision);
@@ -1156,10 +1338,14 @@
 		corner_name = cprh_msm8998_kbss_fuse_corner_name;
 		break;
 	case SDM845_V1_SOC_ID:
+		tid = cprh_kbss_get_thread_id(vreg->thread);
+		ref_volt = sdm845_v1_kbss_fuse_ref_volt[id][tid];
+		corner_name = cprh_sdm845_v1_kbss_fuse_corner_name[id];
+		break;
 	case SDM845_V2_SOC_ID:
 		tid = cprh_kbss_get_thread_id(vreg->thread);
-		ref_volt = sdm845_kbss_fuse_ref_volt[id][tid];
-		corner_name = cprh_sdm845_kbss_fuse_corner_name[id];
+		ref_volt = sdm845_v2_kbss_fuse_ref_volt[id][tid];
+		corner_name = cprh_sdm845_v2_kbss_fuse_corner_name[id];
 		break;
 	default:
 		cpr3_err(vreg, "unsupported soc id = %d\n", soc_revision);
@@ -1744,8 +1930,13 @@
 			CPRH_MSM8998_KBSS_FUSE_CORNER_TURBO_L1;
 		break;
 	case SDM845_V1_SOC_ID:
+		corner_name = cprh_sdm845_v1_kbss_fuse_corner_name[
+						vreg->thread->ctrl->ctrl_id];
+		lowest_fuse_corner = 0;
+		highest_fuse_corner = vreg->fuse_corner_count - 1;
+		break;
 	case SDM845_V2_SOC_ID:
-		corner_name = cprh_sdm845_kbss_fuse_corner_name[
+		corner_name = cprh_sdm845_v2_kbss_fuse_corner_name[
 						vreg->thread->ctrl->ctrl_id];
 		lowest_fuse_corner = 0;
 		highest_fuse_corner = vreg->fuse_corner_count - 1;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index a06069b..1d57b34 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -384,11 +384,12 @@
 		= container_of(kref, struct scsi_target, reap_ref);
 
 	/*
-	 * if we get here and the target is still in the CREATED state that
+	 * if we get here and the target is still in a CREATED state that
 	 * means it was allocated but never made visible (because a scan
 	 * turned up no LUNs), so don't call device_del() on it.
 	 */
-	if (starget->state != STARGET_CREATED) {
+	if ((starget->state != STARGET_CREATED) &&
+	    (starget->state != STARGET_CREATED_REMOVE)) {
 		transport_remove_device(&starget->dev);
 		device_del(&starget->dev);
 	}
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index e90a8e1..f14d95e 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1371,11 +1371,15 @@
 	spin_lock_irqsave(shost->host_lock, flags);
 	list_for_each_entry(starget, &shost->__targets, siblings) {
 		if (starget->state == STARGET_DEL ||
-		    starget->state == STARGET_REMOVE)
+		    starget->state == STARGET_REMOVE ||
+		    starget->state == STARGET_CREATED_REMOVE)
 			continue;
 		if (starget->dev.parent == dev || &starget->dev == dev) {
 			kref_get(&starget->reap_ref);
-			starget->state = STARGET_REMOVE;
+			if (starget->state == STARGET_CREATED)
+				starget->state = STARGET_CREATED_REMOVE;
+			else
+				starget->state = STARGET_REMOVE;
 			spin_unlock_irqrestore(shost->host_lock, flags);
 			__scsi_remove_target(starget);
 			scsi_target_reap(starget);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 370e092..a43c18f 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1487,7 +1487,7 @@
 		hba->clk_gating.state = REQ_CLKS_ON;
 		trace_ufshcd_clk_gating(dev_name(hba->dev),
 			hba->clk_gating.state);
-		queue_work(hba->clk_gating.ungating_workq,
+		queue_work(hba->clk_gating.clk_gating_workq,
 				&hba->clk_gating.ungate_work);
 		/*
 		 * fall through to check if we should wait for this
@@ -1755,7 +1755,8 @@
 	struct ufs_hba *hba = container_of(timer, struct ufs_hba,
 					   clk_gating.gate_hrtimer);
 
-	schedule_work(&hba->clk_gating.gate_work);
+	queue_work(hba->clk_gating.clk_gating_workq,
+				&hba->clk_gating.gate_work);
 
 	return HRTIMER_NORESTART;
 }
@@ -1763,7 +1764,7 @@
 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
 {
 	struct ufs_clk_gating *gating = &hba->clk_gating;
-	char wq_name[sizeof("ufs_clk_ungating_00")];
+	char wq_name[sizeof("ufs_clk_gating_00")];
 
 	hba->clk_gating.state = CLKS_ON;
 
@@ -1792,9 +1793,10 @@
 	hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;
 
-	snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_ungating_%d",
+	snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
 			hba->host->host_no);
-	hba->clk_gating.ungating_workq = create_singlethread_workqueue(wq_name);
+	hba->clk_gating.clk_gating_workq =
+		create_singlethread_workqueue(wq_name);
 
 	gating->is_enabled = true;
 
@@ -1858,7 +1860,7 @@
 	device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
 	ufshcd_cancel_gate_work(hba);
 	cancel_work_sync(&hba->clk_gating.ungate_work);
-	destroy_workqueue(hba->clk_gating.ungating_workq);
+	destroy_workqueue(hba->clk_gating.clk_gating_workq);
 }
 
 static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
@@ -7862,7 +7864,7 @@
 			 __func__);
 		pm_runtime_get_sync(hba->dev);
 		ufshcd_detect_device(hba);
-		pm_runtime_put_sync(hba->dev);
+		/* ufshcd_probe_hba() calls pm_runtime_put_sync() on exit */
 	} else {
 		dev_dbg(hba->dev, "%s: card removed notification received\n",
 			 __func__);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 343f327..eaed1b3 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -446,7 +446,7 @@
 	struct device_attribute enable_attr;
 	bool is_enabled;
 	int active_reqs;
-	struct workqueue_struct *ungating_workq;
+	struct workqueue_struct *clk_gating_workq;
 };
 
 /* Hibern8 state  */
diff --git a/drivers/soc/qcom/early_random.c b/drivers/soc/qcom/early_random.c
index 06601dd..641f70e 100644
--- a/drivers/soc/qcom/early_random.c
+++ b/drivers/soc/qcom/early_random.c
@@ -38,6 +38,7 @@
 	int ret;
 	u32 resp;
 	struct scm_desc desc;
+	u64 bytes_received;
 
 	data.out_buf = (uint8_t *) virt_to_phys(random_buffer);
 	desc.args[0] = (unsigned long) data.out_buf;
@@ -46,18 +47,18 @@
 
 	dmac_flush_range(random_buffer, random_buffer + RANDOM_BUFFER_SIZE);
 
-	if (!is_scm_armv8())
+	if (!is_scm_armv8()) {
 		ret = scm_call_noalloc(TZ_SVC_CRYPTO, PRNG_CMD_ID, &data,
 				sizeof(data), &resp, sizeof(resp),
 				common_scm_buf,
 				SCM_BUFFER_SIZE(common_scm_buf));
-	else
+		bytes_received = resp;
+	} else {
 		ret = scm_call2(SCM_SIP_FNID(TZ_SVC_CRYPTO, PRNG_CMD_ID),
 					&desc);
-
+		bytes_received = desc.ret[0];
+	}
 	if (!ret) {
-		u64 bytes_received = desc.ret[0];
-
 		if (bytes_received != SZ_512)
 			pr_warn("Did not receive the expected number of bytes from PRNG: %llu\n",
 				bytes_received);
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index d31bf8d..1d605e3 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -30,7 +30,6 @@
 #include "glink_private.h"
 #include "glink_xprt_if.h"
 
-#define GLINK_CTX_CANARY 0x58544324 /* "$CTX" */
 /* Number of internal IPC Logging log pages */
 #define NUM_LOG_PAGES	10
 #define GLINK_PM_QOS_HOLDOFF_MS		10
@@ -40,7 +39,6 @@
 
 #define GLINK_KTHREAD_PRIO 1
 
-static rwlock_t magic_lock;
 /**
  * struct glink_qos_priority_bin - Packet Scheduler's priority bucket
  * @max_rate_kBps:	Maximum rate supported by the priority bucket.
@@ -232,6 +230,8 @@
  * @req_rate_kBps:			Current QoS request by the channel.
  * @tx_intent_cnt:			Intent count to transmit soon in future.
  * @tx_cnt:				Packets to be picked by tx scheduler.
+ * @rt_vote_on:				Number of times RT vote on is called.
+ * @rt_vote_off:			Number of times RT vote off is called.
  */
 struct channel_ctx {
 	struct rwref_lock ch_state_lhb2;
@@ -312,7 +312,9 @@
 	unsigned long req_rate_kBps;
 	uint32_t tx_intent_cnt;
 	uint32_t tx_cnt;
-	uint32_t magic_number;
+
+	uint32_t rt_vote_on;
+	uint32_t rt_vote_off;
 };
 
 static struct glink_core_if core_impl;
@@ -443,33 +445,15 @@
 
 static int glink_get_ch_ctx(struct channel_ctx *ctx)
 {
-	unsigned long flags;
-
 	if (!ctx)
 		return -EINVAL;
-	read_lock_irqsave(&magic_lock, flags);
-	if (ctx->magic_number != GLINK_CTX_CANARY) {
-		read_unlock_irqrestore(&magic_lock, flags);
-		return -EINVAL;
-	}
 	rwref_get(&ctx->ch_state_lhb2);
-	read_unlock_irqrestore(&magic_lock, flags);
 	return 0;
 }
 
-static int glink_put_ch_ctx(struct channel_ctx *ctx, bool update_magic)
+static void glink_put_ch_ctx(struct channel_ctx *ctx)
 {
-	unsigned long flags;
-
-	if (!update_magic) {
-		rwref_put(&ctx->ch_state_lhb2);
-		return 0;
-	}
-	write_lock_irqsave(&magic_lock, flags);
-	ctx->magic_number = 0;
 	rwref_put(&ctx->ch_state_lhb2);
-	write_unlock_irqrestore(&magic_lock, flags);
-	return 0;
 }
 
 /**
@@ -1931,13 +1915,13 @@
 		}
 
 		ctx->transport_ptr = xprt_ctx;
+		rwref_get(&ctx->ch_state_lhb2);
 		list_add_tail(&ctx->port_list_node, &xprt_ctx->channels);
 
 		GLINK_INFO_PERF_CH_XPRT(ctx, xprt_ctx,
 			"%s: local:GLINK_CHANNEL_CLOSED\n",
 			__func__);
 	}
-	rwref_get(&ctx->ch_state_lhb2);
 	spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
 	rwref_write_put(&xprt_ctx->xprt_state_lhb0);
 	mutex_lock(&xprt_ctx->xprt_dbgfs_lock_lhb4);
@@ -2419,6 +2403,25 @@
 }
 
 /**
+ * dummy_rx_rt_vote() - Dummy RX Realtime thread vote
+ * @if_ptr:	The transport to transmit on.
+
+ */
+static int dummy_rx_rt_vote(struct glink_transport_if *if_ptr)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_rx_rt_unvote() - Dummy RX Realtime thread unvote
+ * @if_ptr:	The transport to transmit on.
+ */
+static int dummy_rx_rt_unvote(struct glink_transport_if *if_ptr)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
  * notif_if_up_all_xprts() - Check and notify existing transport state if up
  * @notif_info:	Data structure containing transport information to be notified.
  *
@@ -2600,7 +2603,6 @@
 	ctx->notify_tx_abort = cfg->notify_tx_abort;
 	ctx->notify_rx_tracer_pkt = cfg->notify_rx_tracer_pkt;
 	ctx->notify_remote_rx_intent = cfg->notify_remote_rx_intent;
-	ctx->magic_number = GLINK_CTX_CANARY;
 
 	if (!ctx->notify_rx_intent_req)
 		ctx->notify_rx_intent_req = glink_dummy_notify_rx_intent_req;
@@ -2742,13 +2744,13 @@
 
 	GLINK_INFO_CH(ctx, "%s: Closing channel, ctx: %p\n", __func__, ctx);
 	if (ctx->local_open_state == GLINK_CHANNEL_CLOSED) {
-		glink_put_ch_ctx(ctx, false);
+		glink_put_ch_ctx(ctx);
 		return 0;
 	}
 
 	if (ctx->local_open_state == GLINK_CHANNEL_CLOSING) {
 		/* close already pending */
-		glink_put_ch_ctx(ctx, false);
+		glink_put_ch_ctx(ctx);
 		return -EBUSY;
 	}
 
@@ -2813,7 +2815,7 @@
 
 	rwref_put(&ctx->ch_state_lhb2);
 	rwref_read_put(&xprt_ctx->xprt_state_lhb0);
-	glink_put_ch_ctx(ctx, true);
+	glink_put_ch_ctx(ctx);
 	return ret;
 }
 EXPORT_SYMBOL(glink_close);
@@ -3029,13 +3031,13 @@
 		xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info);
 
 	rwref_read_put(&ctx->ch_state_lhb2);
-	glink_put_ch_ctx(ctx, false);
+	glink_put_ch_ctx(ctx);
 	return ret;
 
 glink_tx_common_err:
 	rwref_read_put(&ctx->ch_state_lhb2);
 glink_tx_common_err_2:
-	glink_put_ch_ctx(ctx, false);
+	glink_put_ch_ctx(ctx);
 	kfree(tx_info);
 	return ret;
 }
@@ -3085,7 +3087,7 @@
 		/* Can only queue rx intents if channel is fully opened */
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
-		glink_put_ch_ctx(ctx, false);
+		glink_put_ch_ctx(ctx);
 		return -EBUSY;
 	}
 
@@ -3094,14 +3096,14 @@
 		GLINK_ERR_CH(ctx,
 			"%s: Intent pointer allocation failed size[%zu]\n",
 			__func__, size);
-		glink_put_ch_ctx(ctx, false);
+		glink_put_ch_ctx(ctx);
 		return -ENOMEM;
 	}
 	GLINK_DBG_CH(ctx, "%s: L[%u]:%zu\n", __func__, intent_ptr->id,
 			intent_ptr->intent_size);
 
 	if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS) {
-		glink_put_ch_ctx(ctx, false);
+		glink_put_ch_ctx(ctx);
 		return ret;
 	}
 
@@ -3111,7 +3113,7 @@
 	if (ret)
 		/* unable to transmit, dequeue intent */
 		ch_remove_local_rx_intent(ctx, intent_ptr->id);
-	glink_put_ch_ctx(ctx, false);
+	glink_put_ch_ctx(ctx);
 	return ret;
 }
 EXPORT_SYMBOL(glink_queue_rx_intent);
@@ -3143,12 +3145,12 @@
 		if (size <= intent->intent_size) {
 			spin_unlock_irqrestore(
 				&ctx->local_rx_intent_lst_lock_lhc1, flags);
-			glink_put_ch_ctx(ctx, false);
+			glink_put_ch_ctx(ctx);
 			return true;
 		}
 	}
 	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
-	glink_put_ch_ctx(ctx, false);
+	glink_put_ch_ctx(ctx);
 	return false;
 }
 EXPORT_SYMBOL(glink_rx_intent_exists);
@@ -3177,7 +3179,7 @@
 	if (IS_ERR_OR_NULL(liid_ptr)) {
 		/* invalid pointer */
 		GLINK_ERR_CH(ctx, "%s: Invalid pointer %p\n", __func__, ptr);
-		glink_put_ch_ctx(ctx, false);
+		glink_put_ch_ctx(ctx);
 		return -EINVAL;
 	}
 
@@ -3203,7 +3205,7 @@
 	/* send rx done */
 	ctx->transport_ptr->ops->tx_cmd_local_rx_done(ctx->transport_ptr->ops,
 			ctx->lcid, id, reuse);
-	glink_put_ch_ctx(ctx, false);
+	glink_put_ch_ctx(ctx);
 	return ret;
 }
 EXPORT_SYMBOL(glink_rx_done);
@@ -3257,7 +3259,7 @@
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
-		glink_put_ch_ctx(ctx, false);
+		glink_put_ch_ctx(ctx);
 		return -EBUSY;
 	}
 
@@ -3267,7 +3269,7 @@
 			ctx->lcid, ctx->lsigs);
 	GLINK_INFO_CH(ctx, "%s: Sent SIGNAL SET command\n", __func__);
 
-	glink_put_ch_ctx(ctx, false);
+	glink_put_ch_ctx(ctx);
 	return ret;
 }
 EXPORT_SYMBOL(glink_sigs_set);
@@ -3293,12 +3295,12 @@
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
-		glink_put_ch_ctx(ctx, false);
+		glink_put_ch_ctx(ctx);
 		return -EBUSY;
 	}
 
 	*sigs = ctx->lsigs;
-	glink_put_ch_ctx(ctx, false);
+	glink_put_ch_ctx(ctx);
 	return 0;
 }
 EXPORT_SYMBOL(glink_sigs_local_get);
@@ -3325,12 +3327,12 @@
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
-		glink_put_ch_ctx(ctx, false);
+		glink_put_ch_ctx(ctx);
 		return -EBUSY;
 	}
 
 	*sigs = ctx->rsigs;
-	glink_put_ch_ctx(ctx, false);
+	glink_put_ch_ctx(ctx);
 	return 0;
 }
 EXPORT_SYMBOL(glink_sigs_remote_get);
@@ -3434,7 +3436,7 @@
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
-		glink_put_ch_ctx(ctx, false);
+		glink_put_ch_ctx(ctx);
 		return -EBUSY;
 	}
 
@@ -3444,7 +3446,7 @@
 	if (ret < 0)
 		GLINK_ERR_CH(ctx, "%s: QoS %lu:%zu cannot be met\n",
 			     __func__, latency_us, pkt_size);
-	glink_put_ch_ctx(ctx, false);
+	glink_put_ch_ctx(ctx);
 	return ret;
 }
 EXPORT_SYMBOL(glink_qos_latency);
@@ -3468,12 +3470,12 @@
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
-		glink_put_ch_ctx(ctx, false);
+		glink_put_ch_ctx(ctx);
 		return -EBUSY;
 	}
 
 	ret = glink_qos_reset_priority(ctx);
-	glink_put_ch_ctx(ctx, false);
+	glink_put_ch_ctx(ctx);
 	return ret;
 }
 EXPORT_SYMBOL(glink_qos_cancel);
@@ -3500,7 +3502,7 @@
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
-		glink_put_ch_ctx(ctx, false);
+		glink_put_ch_ctx(ctx);
 		return -EBUSY;
 	}
 
@@ -3509,7 +3511,7 @@
 	ret = glink_qos_add_ch_tx_intent(ctx);
 	spin_unlock(&ctx->tx_lists_lock_lhc3);
 	spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
-	glink_put_ch_ctx(ctx, false);
+	glink_put_ch_ctx(ctx);
 	return ret;
 }
 EXPORT_SYMBOL(glink_qos_start);
@@ -3537,11 +3539,11 @@
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
-		glink_put_ch_ctx(ctx, false);
+		glink_put_ch_ctx(ctx);
 		return (unsigned long)-EBUSY;
 	}
 
-	glink_put_ch_ctx(ctx, false);
+	glink_put_ch_ctx(ctx);
 	return ctx->transport_ptr->ops->get_power_vote_ramp_time(
 			ctx->transport_ptr->ops,
 			glink_prio_to_power_state(ctx->transport_ptr,
@@ -3549,6 +3551,61 @@
 }
 EXPORT_SYMBOL(glink_qos_get_ramp_time);
 
+
+/**
+ * glink_start_rx_rt() - Vote for RT thread priority on RX.
+ * @handle:	Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_start_rx_rt(void *handle)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
+
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		glink_put_ch_ctx(ctx);
+		return -EBUSY;
+	}
+	ret = ctx->transport_ptr->ops->rx_rt_vote(ctx->transport_ptr->ops);
+	ctx->rt_vote_on++;
+	GLINK_INFO_CH(ctx, "%s: Voting RX Realtime Thread %d", __func__, ret);
+	glink_put_ch_ctx(ctx);
+	return ret;
+}
+
+/**
+ * glink_end_rx_rt() - Vote for RT thread priority on RX.
+ * @handle:	Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_end_rx_rt(void *handle)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
+
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		glink_put_ch_ctx(ctx);
+		return -EBUSY;
+	}
+	ret = ctx->transport_ptr->ops->rx_rt_unvote(ctx->transport_ptr->ops);
+	ctx->rt_vote_off++;
+	GLINK_INFO_CH(ctx, "%s: Unvoting RX Realtime Thread %d", __func__, ret);
+	glink_put_ch_ctx(ctx);
+	return ret;
+}
+
 /**
  * glink_rpm_rx_poll() - Poll and receive any available events
  * @handle:	Channel handle in which this operation is performed.
@@ -3631,10 +3688,10 @@
 	if (ret)
 		return ret;
 	if (!ctx->transport_ptr) {
-		glink_put_ch_ctx(ctx, false);
+		glink_put_ch_ctx(ctx);
 		return -EOPNOTSUPP;
 	}
-	glink_put_ch_ctx(ctx, false);
+	glink_put_ch_ctx(ctx);
 	return ctx->transport_ptr->ops->wait_link_down(ctx->transport_ptr->ops);
 }
 EXPORT_SYMBOL(glink_wait_link_down);
@@ -3956,6 +4013,10 @@
 		if_ptr->power_vote = dummy_power_vote;
 	if (!if_ptr->power_unvote)
 		if_ptr->power_unvote = dummy_power_unvote;
+	if (!if_ptr->rx_rt_vote)
+		if_ptr->rx_rt_vote = dummy_rx_rt_vote;
+	if (!if_ptr->rx_rt_unvote)
+		if_ptr->rx_rt_unvote = dummy_rx_rt_unvote;
 	xprt_ptr->capabilities = 0;
 	xprt_ptr->ops = if_ptr;
 	spin_lock_init(&xprt_ptr->xprt_ctx_lock_lhb1);
@@ -6175,7 +6236,6 @@
 static int glink_init(void)
 {
 	log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "glink", 0);
-	rwlock_init(&magic_lock);
 	if (!log_ctx)
 		GLINK_ERR("%s: unable to create log context\n", __func__);
 	glink_debugfs_init();
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 94dffa5..384347d 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -182,6 +182,8 @@
  * @deferred_cmds:		List of deferred commands that need to be
  *				processed in process context.
  * @deferred_cmds_cnt:		Number of deferred commands in queue.
+ * @rt_vote_lock:		Serialize access to RT rx votes
+ * @rt_votes:			Vote count for RT rx thread priority
  * @num_pw_states:		Size of @ramp_time_us.
  * @ramp_time_us:		Array of ramp times in microseconds where array
  *				index position represents a power state.
@@ -221,6 +223,8 @@
 	spinlock_t rx_lock;
 	struct list_head deferred_cmds;
 	uint32_t deferred_cmds_cnt;
+	spinlock_t rt_vote_lock;
+	uint32_t rt_votes;
 	uint32_t num_pw_states;
 	unsigned long *ramp_time_us;
 	struct mailbox_config_info *mailbox;
@@ -2125,6 +2129,52 @@
 }
 
 /**
+ * rx_rt_vote() - Increment and RX thread RT vote
+ * @if_ptr:	The transport interface on which power voting is requested.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int rx_rt_vote(struct glink_transport_if *if_ptr)
+{
+	struct edge_info *einfo;
+	struct sched_param param = { .sched_priority = 1 };
+	int ret = 0;
+	unsigned long flags;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+	spin_lock_irqsave(&einfo->rt_vote_lock, flags);
+	if (!einfo->rt_votes)
+		ret = sched_setscheduler_nocheck(einfo->task, SCHED_FIFO,
+							&param);
+	einfo->rt_votes++;
+	spin_unlock_irqrestore(&einfo->rt_vote_lock, flags);
+	return ret;
+}
+
+/**
+ * rx_rt_unvote() - Remove a RX thread RT vote
+ * @if_ptr:	The transport interface on which power voting is requested.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int rx_rt_unvote(struct glink_transport_if *if_ptr)
+{
+	struct edge_info *einfo;
+	struct sched_param param = { .sched_priority = 0 };
+	int ret = 0;
+	unsigned long flags;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+	spin_lock_irqsave(&einfo->rt_vote_lock, flags);
+	einfo->rt_votes--;
+	if (!einfo->rt_votes)
+		ret = sched_setscheduler_nocheck(einfo->task, SCHED_NORMAL,
+							&param);
+	spin_unlock_irqrestore(&einfo->rt_vote_lock, flags);
+	return ret;
+}
+
+/**
  * negotiate_features_v1() - determine what features of a version can be used
  * @if_ptr:	The transport for which features are negotiated for.
  * @version:	The version negotiated.
@@ -2169,6 +2219,8 @@
 	einfo->xprt_if.get_power_vote_ramp_time = get_power_vote_ramp_time;
 	einfo->xprt_if.power_vote = power_vote;
 	einfo->xprt_if.power_unvote = power_unvote;
+	einfo->xprt_if.rx_rt_vote = rx_rt_vote;
+	einfo->xprt_if.rx_rt_unvote = rx_rt_unvote;
 }
 
 /**
@@ -2341,6 +2393,8 @@
 	init_srcu_struct(&einfo->use_ref);
 	spin_lock_init(&einfo->rx_lock);
 	INIT_LIST_HEAD(&einfo->deferred_cmds);
+	spin_lock_init(&einfo->rt_vote_lock);
+	einfo->rt_votes = 0;
 
 	mutex_lock(&probe_lock);
 	if (edge_infos[einfo->remote_proc_id]) {
diff --git a/drivers/soc/qcom/glink_xprt_if.h b/drivers/soc/qcom/glink_xprt_if.h
index f4d5a3b..47c1580 100644
--- a/drivers/soc/qcom/glink_xprt_if.h
+++ b/drivers/soc/qcom/glink_xprt_if.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -141,6 +141,8 @@
 			struct glink_transport_if *if_ptr, uint32_t state);
 	int (*power_vote)(struct glink_transport_if *if_ptr, uint32_t state);
 	int (*power_unvote)(struct glink_transport_if *if_ptr);
+	int (*rx_rt_vote)(struct glink_transport_if *if_ptr);
+	int (*rx_rt_unvote)(struct glink_transport_if *if_ptr);
 	/*
 	 * Keep data pointers at the end of the structure after all function
 	 * pointer to allow for in-place initialization.
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index a0982bc..fa8191e 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -84,34 +84,58 @@
 	} while (0)
 
 #define icnss_pr_err(_fmt, ...) do {					\
-		pr_err(_fmt, ##__VA_ARGS__);				\
-		icnss_ipc_log_string("ERR: " pr_fmt(_fmt),		\
-				     ##__VA_ARGS__);			\
+	printk("%s" pr_fmt(_fmt), KERN_ERR, ##__VA_ARGS__);		\
+	icnss_ipc_log_string("%s" pr_fmt(_fmt), "",			\
+			     ##__VA_ARGS__);				\
 	} while (0)
 
 #define icnss_pr_warn(_fmt, ...) do {					\
-		pr_warn(_fmt, ##__VA_ARGS__);				\
-		icnss_ipc_log_string("WRN: " pr_fmt(_fmt),		\
-				     ##__VA_ARGS__);			\
+	printk("%s" pr_fmt(_fmt), KERN_WARNING, ##__VA_ARGS__);		\
+	icnss_ipc_log_string("%s" pr_fmt(_fmt), "",			\
+			     ##__VA_ARGS__);				\
 	} while (0)
 
 #define icnss_pr_info(_fmt, ...) do {					\
-		pr_info(_fmt, ##__VA_ARGS__);				\
-		icnss_ipc_log_string("INF: " pr_fmt(_fmt),		\
-				     ##__VA_ARGS__);			\
+	printk("%s" pr_fmt(_fmt), KERN_INFO, ##__VA_ARGS__);		\
+	icnss_ipc_log_string("%s" pr_fmt(_fmt), "",			\
+			     ##__VA_ARGS__);				\
 	} while (0)
 
+#if defined(CONFIG_DYNAMIC_DEBUG)
 #define icnss_pr_dbg(_fmt, ...) do {					\
-		pr_debug(_fmt, ##__VA_ARGS__);				\
-		icnss_ipc_log_string("DBG: " pr_fmt(_fmt),		\
-				     ##__VA_ARGS__);			\
+	pr_debug(_fmt, ##__VA_ARGS__);					\
+	icnss_ipc_log_string(pr_fmt(_fmt), ##__VA_ARGS__);		\
 	} while (0)
 
 #define icnss_pr_vdbg(_fmt, ...) do {					\
-		pr_debug(_fmt, ##__VA_ARGS__);				\
-		icnss_ipc_log_long_string("DBG: " pr_fmt(_fmt),		\
-				     ##__VA_ARGS__);			\
+	pr_debug(_fmt, ##__VA_ARGS__);					\
+	icnss_ipc_log_long_string(pr_fmt(_fmt), ##__VA_ARGS__);		\
 	} while (0)
+#elif defined(DEBUG)
+#define icnss_pr_dbg(_fmt, ...) do {					\
+	printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__);		\
+	icnss_ipc_log_string("%s" pr_fmt(_fmt), "",			\
+			     ##__VA_ARGS__);				\
+	} while (0)
+
+#define icnss_pr_vdbg(_fmt, ...) do {					\
+	printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__);		\
+	icnss_ipc_log_long_string("%s" pr_fmt(_fmt), "",		\
+				  ##__VA_ARGS__);			\
+	} while (0)
+#else
+#define icnss_pr_dbg(_fmt, ...) do {					\
+	no_printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__);	\
+	icnss_ipc_log_string("%s" pr_fmt(_fmt), "",			\
+		     ##__VA_ARGS__);					\
+	} while (0)
+
+#define icnss_pr_vdbg(_fmt, ...) do {					\
+	no_printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__);	\
+	icnss_ipc_log_long_string("%s" pr_fmt(_fmt), "",		\
+				  ##__VA_ARGS__);			\
+	} while (0)
+#endif
 
 #ifdef CONFIG_ICNSS_DEBUG
 #define ICNSS_ASSERT(_condition) do {					\
@@ -2579,21 +2603,22 @@
 	if (event_data == NULL)
 		return notifier_from_errno(-ENOMEM);
 
+	event_data->crashed = true;
+
 	if (state == NULL) {
-		event_data->crashed = true;
 		priv->stats.recovery.root_pd_crash++;
 		goto event_post;
 	}
 
 	switch (*state) {
 	case ROOT_PD_WDOG_BITE:
-		event_data->crashed = true;
 		event_data->wdog_bite = true;
 		priv->stats.recovery.root_pd_crash++;
 		break;
 	case ROOT_PD_SHUTDOWN:
 		cause = ICNSS_ROOT_PD_SHUTDOWN;
 		priv->stats.recovery.root_pd_shutdown++;
+		event_data->crashed = false;
 		break;
 	case USER_PD_STATE_CHANGE:
 		if (test_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state)) {
@@ -2605,7 +2630,6 @@
 		}
 		break;
 	default:
-		event_data->crashed = true;
 		priv->stats.recovery.root_pd_crash++;
 		break;
 	}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
index 3f8b52c..6c69bec 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -133,7 +133,7 @@
 		return 0;
 	}
 
-	if (sscanf(buf, "%s %llu", name, &vote_khz) != 2) {
+	if (sscanf(buf, "%9s %llu", name, &vote_khz) != 2) {
 		pr_err("%s:return error", __func__);
 		return -EINVAL;
 	}
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 17e0a4c..1b41269 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -77,6 +77,8 @@
 	DECLARE_BITMAP(fast_req, RPMH_MAX_FAST_RES);
 	bool dirty;
 	bool in_solver_mode;
+	/* Cache sleep and wake requests sent as passthru */
+	struct rpmh_msg *passthru_cache[2 * RPMH_MAX_REQ_IN_BATCH];
 };
 
 struct rpmh_client {
@@ -111,17 +113,24 @@
 	return msg;
 }
 
+static void __free_msg_to_pool(struct rpmh_msg *rpm_msg)
+{
+	struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
+
+	/* If we allocated the pool, set it as available */
+	if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
+		bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
+	}
+}
+
 static void free_msg_to_pool(struct rpmh_msg *rpm_msg)
 {
 	struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
 	unsigned long flags;
 
-	/* If we allocated the pool, set it as available */
-	if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
-		spin_lock_irqsave(&rpm->lock, flags);
-		bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
-		spin_unlock_irqrestore(&rpm->lock, flags);
-	}
+	spin_lock_irqsave(&rpm->lock, flags);
+	__free_msg_to_pool(rpm_msg);
+	spin_unlock_irqrestore(&rpm->lock, flags);
 }
 
 static void rpmh_rx_cb(struct mbox_client *cl, void *msg)
@@ -181,6 +190,7 @@
 {
 	int ret;
 	int count = 4;
+	int skip = 0;
 
 	do {
 		ret = wait_for_completion_timeout(compl, RPMH_TIMEOUT);
@@ -192,6 +202,10 @@
 			return;
 		}
 		if (!count) {
+			if (skip++ % 100)
+				continue;
+			dev_err(rc->dev,
+				"RPMH waiting for interrupt from AOSS\n");
 			mbox_chan_debug(rc->chan);
 		} else {
 			dev_err(rc->dev,
@@ -511,6 +525,70 @@
 }
 EXPORT_SYMBOL(rpmh_write);
 
+static int cache_passthru(struct rpmh_client *rc, struct rpmh_msg **rpm_msg,
+					int count)
+{
+	struct rpmh_mbox *rpm = rc->rpmh;
+	unsigned long flags;
+	int ret = 0;
+	int index = 0;
+	int i;
+
+	spin_lock_irqsave(&rpm->lock, flags);
+	while (rpm->passthru_cache[index])
+		index++;
+	if (index + count >=  2 * RPMH_MAX_REQ_IN_BATCH) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	for (i = 0; i < count; i++)
+		rpm->passthru_cache[index + i] = rpm_msg[i];
+fail:
+	spin_unlock_irqrestore(&rpm->lock, flags);
+
+	return ret;
+}
+
+static int flush_passthru(struct rpmh_client *rc)
+{
+	struct rpmh_mbox *rpm = rc->rpmh;
+	struct rpmh_msg *rpm_msg;
+	unsigned long flags;
+	int ret = 0;
+	int i;
+
+	/* Send Sleep/Wake requests to the controller, expect no response */
+	spin_lock_irqsave(&rpm->lock, flags);
+	for (i = 0; rpm->passthru_cache[i]; i++) {
+		rpm_msg = rpm->passthru_cache[i];
+		ret = mbox_send_controller_data(rc->chan, &rpm_msg->msg);
+		if (ret)
+			goto fail;
+	}
+fail:
+	spin_unlock_irqrestore(&rpm->lock, flags);
+
+	return ret;
+}
+
+static void invalidate_passthru(struct rpmh_client *rc)
+{
+	struct rpmh_mbox *rpm = rc->rpmh;
+	unsigned long flags;
+	int index = 0;
+	int i;
+
+	spin_lock_irqsave(&rpm->lock, flags);
+	while (rpm->passthru_cache[index])
+		index++;
+	for (i = 0; i < index; i++) {
+		__free_msg_to_pool(rpm->passthru_cache[i]);
+		rpm->passthru_cache[i] = NULL;
+	}
+	spin_unlock_irqrestore(&rpm->lock, flags);
+}
+
 /**
  * rpmh_write_passthru: Write multiple batches of RPMH commands without caching
  *
@@ -530,7 +608,7 @@
 int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
 			struct tcs_cmd *cmd, int *n)
 {
-	struct rpmh_msg *rpm_msg[RPMH_MAX_REQ_IN_BATCH];
+	struct rpmh_msg *rpm_msg[RPMH_MAX_REQ_IN_BATCH] = { NULL };
 	DECLARE_COMPLETION_ONSTACK(compl);
 	atomic_t wait_count = ATOMIC_INIT(0); /* overwritten */
 	int count = 0;
@@ -548,7 +626,7 @@
 	if (ret)
 		return ret;
 
-	while (n[count++])
+	while (n[count++] > 0)
 		;
 	count--;
 	if (!count || count > RPMH_MAX_REQ_IN_BATCH)
@@ -607,14 +685,11 @@
 			rpmh_tx_done(&rc->client, &rpm_msg[j]->msg, ret);
 		wait_for_tx_done(rc, &compl, addr, data);
 	} else {
-		/* Send Sleep requests to the controller, expect no response */
-		for (i = 0; i < count; i++) {
-			rpm_msg[i]->completion = NULL;
-			ret = mbox_send_controller_data(rc->chan,
-						&rpm_msg[i]->msg);
-			free_msg_to_pool(rpm_msg[i]);
-		}
-		return 0;
+		/*
+		 * Cache sleep/wake data in store.
+		 * But flush passthru first before flushing all other data.
+		 */
+		return cache_passthru(rc, rpm_msg, count);
 	}
 
 	return 0;
@@ -674,7 +749,7 @@
 {
 	DEFINE_RPMH_MSG_ONSTACK(rc, 0, NULL, NULL, rpm_msg);
 
-	if (IS_ERR_OR_NULL(rc) ||  n > MAX_RPMH_PAYLOAD)
+	if (IS_ERR_OR_NULL(rc) || n <= 0 || n > MAX_RPMH_PAYLOAD)
 		return -EINVAL;
 
 	if (rpmh_standalone)
@@ -710,6 +785,8 @@
 	if (rpmh_standalone)
 		return 0;
 
+	invalidate_passthru(rc);
+
 	rpm = rc->rpmh;
 	rpm_msg.msg.invalidate = true;
 	rpm_msg.msg.is_complete = false;
@@ -823,6 +900,11 @@
 	}
 	spin_unlock_irqrestore(&rpm->lock, flags);
 
+	/* First flush the cached passthru's */
+	ret = flush_passthru(rc);
+	if (ret)
+		return ret;
+
 	/*
 	 * Nobody else should be calling this function other than sleep,
 	 * hence we can run without locks.
@@ -906,8 +988,10 @@
 
 	rpmh->msg_pool = kzalloc(sizeof(struct rpmh_msg) *
 				RPMH_MAX_FAST_RES, GFP_KERNEL);
-	if (!rpmh->msg_pool)
+	if (!rpmh->msg_pool) {
+		of_node_put(spec.np);
 		return ERR_PTR(-ENOMEM);
+	}
 
 	rpmh->mbox_dn = spec.np;
 	INIT_LIST_HEAD(&rpmh->resources);
diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c
index fcb3731..68ddd1f 100644
--- a/drivers/soc/qcom/scm.c
+++ b/drivers/soc/qcom/scm.c
@@ -668,10 +668,6 @@
 
 		desc->ret[0] = desc->ret[1] = desc->ret[2] = 0;
 
-		pr_debug("scm_call: func id %#llx, args: %#x, %#llx, %#llx, %#llx, %#llx\n",
-			x0, desc->arginfo, desc->args[0], desc->args[1],
-			desc->args[2], desc->x5);
-
 		trace_scm_call_start(x0, desc);
 
 		if (scm_version == SCM_ARMV8_64)
@@ -701,10 +697,8 @@
 	}  while (ret == SCM_V2_EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));
 
 	if (ret < 0)
-		pr_err("scm_call failed: func id %#llx, arginfo: %#x, args: %#llx, %#llx, %#llx, %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
-			x0, desc->arginfo, desc->args[0], desc->args[1],
-			desc->args[2], desc->x5, ret, desc->ret[0],
-			desc->ret[1], desc->ret[2]);
+		pr_err("scm_call failed: func id %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
+			x0, ret, desc->ret[0], desc->ret[1], desc->ret[2]);
 
 	if (arglen > N_REGISTER_ARGS)
 		kfree(desc->extra_arg_buf);
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index 49fd7fe..6553ac0 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -25,29 +25,12 @@
 
 DEFINE_MUTEX(secure_buffer_mutex);
 
-struct cp2_mem_chunks {
-	u32 chunk_list;
-	u32 chunk_list_size;
-	u32 chunk_size;
-} __attribute__ ((__packed__));
-
-struct cp2_lock_req {
-	struct cp2_mem_chunks chunks;
-	u32 mem_usage;
-	u32 lock;
-} __attribute__ ((__packed__));
-
-
 struct mem_prot_info {
 	phys_addr_t addr;
 	u64 size;
 };
 
 #define MEM_PROT_ASSIGN_ID		0x16
-#define MEM_PROTECT_LOCK_ID2		0x0A
-#define MEM_PROTECT_LOCK_ID2_FLAT	0x11
-#define V2_CHUNK_SIZE		SZ_1M
-#define FEATURE_ID_CP 12
 
 struct dest_vm_and_perm_info {
 	u32 vm;
@@ -59,137 +42,6 @@
 static void *qcom_secure_mem;
 #define QCOM_SECURE_MEM_SIZE (512*1024)
 
-static int secure_buffer_change_chunk(u32 chunks,
-				u32 nchunks,
-				u32 chunk_size,
-				int lock)
-{
-	struct cp2_lock_req request;
-	u32 resp;
-	int ret;
-	struct scm_desc desc = {0};
-
-	desc.args[0] = request.chunks.chunk_list = chunks;
-	desc.args[1] = request.chunks.chunk_list_size = nchunks;
-	desc.args[2] = request.chunks.chunk_size = chunk_size;
-	/* Usage is now always 0 */
-	desc.args[3] = request.mem_usage = 0;
-	desc.args[4] = request.lock = lock;
-	desc.args[5] = 0;
-	desc.arginfo = SCM_ARGS(6, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
-				SCM_VAL);
-
-	kmap_flush_unused();
-	kmap_atomic_flush_unused();
-
-	if (!is_scm_armv8()) {
-		ret = scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
-				&request, sizeof(request), &resp, sizeof(resp));
-	} else {
-		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
-				MEM_PROTECT_LOCK_ID2_FLAT), &desc);
-		resp = desc.ret[0];
-	}
-
-	return ret;
-}
-
-
-
-static int secure_buffer_change_table(struct sg_table *table, int lock)
-{
-	int i, j;
-	int ret = -EINVAL;
-	u32 *chunk_list;
-	struct scatterlist *sg;
-
-	for_each_sg(table->sgl, sg, table->nents, i) {
-		int nchunks;
-		int size = sg->length;
-		int chunk_list_len;
-		phys_addr_t chunk_list_phys;
-
-		/*
-		 * This should theoretically be a phys_addr_t but the protocol
-		 * indicates this should be a u32.
-		 */
-		u32 base;
-		u64 tmp = sg_dma_address(sg);
-
-		WARN((tmp >> 32) & 0xffffffff,
-			"%s: there are ones in the upper 32 bits of the sg at %p! They will be truncated! Address: 0x%llx\n",
-			__func__, sg, tmp);
-		if (unlikely(!size || (size % V2_CHUNK_SIZE))) {
-			WARN(1,
-				"%s: chunk %d has invalid size: 0x%x. Must be a multiple of 0x%x\n",
-				__func__, i, size, V2_CHUNK_SIZE);
-			return -EINVAL;
-		}
-
-		base = (u32)tmp;
-
-		nchunks = size / V2_CHUNK_SIZE;
-		chunk_list_len = sizeof(u32)*nchunks;
-
-		chunk_list = kzalloc(chunk_list_len, GFP_KERNEL);
-
-		if (!chunk_list)
-			return -ENOMEM;
-
-		chunk_list_phys = virt_to_phys(chunk_list);
-		for (j = 0; j < nchunks; j++)
-			chunk_list[j] = base + j * V2_CHUNK_SIZE;
-
-		/*
-		 * Flush the chunk list before sending the memory to the
-		 * secure environment to ensure the data is actually present
-		 * in RAM
-		 */
-		dmac_flush_range(chunk_list, chunk_list + chunk_list_len);
-
-		ret = secure_buffer_change_chunk(virt_to_phys(chunk_list),
-				nchunks, V2_CHUNK_SIZE, lock);
-
-		if (!ret) {
-			/*
-			 * Set or clear the private page flag to communicate the
-			 * status of the chunk to other entities
-			 */
-			if (lock)
-				SetPagePrivate(sg_page(sg));
-			else
-				ClearPagePrivate(sg_page(sg));
-		}
-
-		kfree(chunk_list);
-	}
-
-	return ret;
-}
-
-int msm_secure_table(struct sg_table *table)
-{
-	int ret;
-
-	mutex_lock(&secure_buffer_mutex);
-	ret = secure_buffer_change_table(table, 1);
-	mutex_unlock(&secure_buffer_mutex);
-
-	return ret;
-
-}
-
-int msm_unsecure_table(struct sg_table *table)
-{
-	int ret;
-
-	mutex_lock(&secure_buffer_mutex);
-	ret = secure_buffer_change_table(table, 0);
-	mutex_unlock(&secure_buffer_mutex);
-	return ret;
-
-}
-
 static struct dest_vm_and_perm_info *
 populate_dest_info(int *dest_vmids, int nelements, int *dest_perms,
 		   size_t *size_in_bytes)
@@ -426,20 +278,6 @@
 	}
 }
 
-#define MAKE_CP_VERSION(major, minor, patch) \
-	(((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
-
-bool msm_secure_v2_is_supported(void)
-{
-	int version = scm_get_feat_version(FEATURE_ID_CP);
-
-	/*
-	 * if the version is < 1.1.0 then dynamic buffer allocation is
-	 * not supported
-	 */
-	return version >= MAKE_CP_VERSION(1, 1, 0);
-}
-
 static int __init alloc_secure_shared_memory(void)
 {
 	int ret = 0;
diff --git a/drivers/soc/qcom/smp2p_sleepstate.c b/drivers/soc/qcom/smp2p_sleepstate.c
index 5b0129e..9c764aa 100644
--- a/drivers/soc/qcom/smp2p_sleepstate.c
+++ b/drivers/soc/qcom/smp2p_sleepstate.c
@@ -14,6 +14,8 @@
 #include <linux/of_gpio.h>
 #include <linux/platform_device.h>
 #include <linux/suspend.h>
+#include <linux/delay.h>
+#include <linux/ipc_router.h>
 #include "smp2p_private.h"
 
 #define SET_DELAY (2 * HZ)
@@ -35,10 +37,14 @@
 	switch (event) {
 	case PM_SUSPEND_PREPARE:
 		gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 0);
+		msleep(25); /* To be tuned based on SMP2P latencies */
+		msm_ipc_router_set_ws_allowed(true);
 		break;
 
 	case PM_POST_SUSPEND:
 		gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 1);
+		msleep(25); /* To be tuned based on SMP2P latencies */
+		msm_ipc_router_set_ws_allowed(false);
 		break;
 	}
 	return NOTIFY_DONE;
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 31760ee..c69429c 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -592,10 +592,11 @@
 }
 EXPORT_SYMBOL_GPL(socinfo_get_id);
 
-static char *socinfo_get_id_string(void)
+char *socinfo_get_id_string(void)
 {
 	return (socinfo) ? cpu_of_id[socinfo->v0_1.id].soc_id_string : NULL;
 }
+EXPORT_SYMBOL(socinfo_get_id_string);
 
 uint32_t socinfo_get_version(void)
 {
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index a8107cc..df8900a 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -365,11 +365,23 @@
 	return 0;
 }
 
+static int spmi_drv_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	int ret;
+
+	ret = of_device_uevent_modalias(dev, env);
+	if (ret != -ENODEV)
+		return ret;
+
+	return 0;
+}
+
 static struct bus_type spmi_bus_type = {
 	.name		= "spmi",
 	.match		= spmi_device_match,
 	.probe		= spmi_drv_probe,
 	.remove		= spmi_drv_remove,
+	.uevent		= spmi_drv_uevent,
 };
 
 /**
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index ff6436f..ee8e8b6 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -249,6 +249,8 @@
 	struct ion_cma_buffer_info *info = buffer->priv_virt;
 
 	source_nelems = count_set_bits(buffer->flags & ION_FLAGS_CP_MASK);
+	if (!source_nelems)
+		return;
 	source_vm_list = kcalloc(source_nelems, sizeof(*source_vm_list),
 				 GFP_KERNEL);
 	if (!source_vm_list)
@@ -291,6 +293,10 @@
 	source_vm = VMID_HLOS;
 
 	dest_nelems = count_set_bits(flags & ION_FLAGS_CP_MASK);
+	if (!dest_nelems) {
+		ret = -EINVAL;
+		goto out;
+	}
 	dest_vm_list = kcalloc(dest_nelems, sizeof(*dest_vm_list), GFP_KERNEL);
 	if (!dest_vm_list) {
 		ret = -ENOMEM;
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index c0cda28..a9731d6 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -525,7 +525,7 @@
 			task_set_lmk_waiting(selected);
 		task_unlock(selected);
 		trace_lowmemory_kill(selected, cache_size, cache_limit, free);
-		lowmem_print(1, "Killing '%s' (%d), adj %hd,\n"
+		lowmem_print(1, "Killing '%s' (%d) (tgid %d), adj %hd,\n"
 			"to free %ldkB on behalf of '%s' (%d) because\n"
 			"cache %ldkB is below limit %ldkB for oom score %hd\n"
 			"Free memory is %ldkB above reserved.\n"
@@ -534,7 +534,7 @@
 			"Total free pages is %ldkB\n"
 			"Total file cache is %ldkB\n"
 			"GFP mask is 0x%x\n",
-			selected->comm, selected->pid,
+			selected->comm, selected->pid, selected->tgid,
 			selected_oom_score_adj,
 			selected_tasksize * (long)(PAGE_SIZE / 1024),
 			current->comm, current->pid,
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 1c967c3..a574885 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -3078,8 +3078,7 @@
 		/* following line: 2-1 per STC */
 		ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG);
 		ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG);
-		/* following line: N-1 per STC */
-		ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG);
+		ni_stc_writel(dev, trigvar, NISTC_AO_UI_LOADA_REG);
 	} else { /* TRIG_EXT */
 		/* FIXME:  assert scan_begin_arg != 0, ret failure otherwise */
 		devpriv->ao_cmd2  |= NISTC_AO_CMD2_BC_GATE_ENA;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index b27de88..995f2da 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1650,8 +1650,13 @@
 	ibmsg = tx->tx_msg;
 	ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
 
-	copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, IBLND_MSG_SIZE,
-		       &from);
+	rc = copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, payload_nob,
+			    &from);
+	if (rc != payload_nob) {
+		kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
+		return -EFAULT;
+	}
+
 	nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
 	kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
 
@@ -1751,8 +1756,14 @@
 			break;
 		}
 
-		copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload,
-			     IBLND_MSG_SIZE, to);
+		rc = copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload, rlen,
+				  to);
+		if (rc != rlen) {
+			rc = -EFAULT;
+			break;
+		}
+
+		rc = 0;
 		lnet_finalize(ni, lntmsg, 0);
 		break;
 
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 68e1e6b..b432153 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -43,6 +43,7 @@
 	{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
 	{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
 	{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
+	{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
 	{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
 	{}	/* Terminating entry */
 };
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 7d90e25..86ace14 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -1049,6 +1049,26 @@
 	return err;
 }
 
+static int lynxfb_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+	struct apertures_struct *ap;
+	bool primary = false;
+
+	ap = alloc_apertures(1);
+	if (!ap)
+		return -ENOMEM;
+
+	ap->ranges[0].base = pci_resource_start(pdev, 0);
+	ap->ranges[0].size = pci_resource_len(pdev, 0);
+#ifdef CONFIG_X86
+	primary = pdev->resource[PCI_ROM_RESOURCE].flags &
+					IORESOURCE_ROM_SHADOW;
+#endif
+	remove_conflicting_framebuffers(ap, "sm750_fb1", primary);
+	kfree(ap);
+	return 0;
+}
+
 static int lynxfb_pci_probe(struct pci_dev *pdev,
 			    const struct pci_device_id *ent)
 {
@@ -1057,6 +1077,10 @@
 	int fbidx;
 	int err;
 
+	err = lynxfb_kick_out_firmware_fb(pdev);
+	if (err)
+		return err;
+
 	/* enable device */
 	err = pcim_enable_device(pdev);
 	if (err)
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 7e70fe8..9cbbc9c 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -802,6 +802,7 @@
 DEF_TPG_ATTRIB(t10_pi);
 DEF_TPG_ATTRIB(fabric_prot_type);
 DEF_TPG_ATTRIB(tpg_enabled_sendtargets);
+DEF_TPG_ATTRIB(login_keys_workaround);
 
 static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
 	&iscsi_tpg_attrib_attr_authentication,
@@ -817,6 +818,7 @@
 	&iscsi_tpg_attrib_attr_t10_pi,
 	&iscsi_tpg_attrib_attr_fabric_prot_type,
 	&iscsi_tpg_attrib_attr_tpg_enabled_sendtargets,
+	&iscsi_tpg_attrib_attr_login_keys_workaround,
 	NULL,
 };
 
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 89d34bd..6693d7c 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -819,7 +819,8 @@
 			SENDER_TARGET,
 			login->rsp_buf,
 			&login->rsp_length,
-			conn->param_list);
+			conn->param_list,
+			conn->tpg->tpg_attrib.login_keys_workaround);
 	if (ret < 0)
 		return -1;
 
@@ -889,7 +890,8 @@
 			SENDER_TARGET,
 			login->rsp_buf,
 			&login->rsp_length,
-			conn->param_list);
+			conn->param_list,
+			conn->tpg->tpg_attrib.login_keys_workaround);
 	if (ret < 0) {
 		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
 				ISCSI_LOGIN_STATUS_INIT_ERR);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 4a073339a..0151776 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -765,7 +765,8 @@
 	return 0;
 }
 
-static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
+static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param,
+						    bool keys_workaround)
 {
 	if (IS_TYPE_BOOL_AND(param)) {
 		if (!strcmp(param->value, NO))
@@ -773,19 +774,31 @@
 	} else if (IS_TYPE_BOOL_OR(param)) {
 		if (!strcmp(param->value, YES))
 			SET_PSTATE_REPLY_OPTIONAL(param);
-		 /*
-		  * Required for gPXE iSCSI boot client
-		  */
-		if (!strcmp(param->name, IMMEDIATEDATA))
-			SET_PSTATE_REPLY_OPTIONAL(param);
+
+		if (keys_workaround) {
+			/*
+			 * Required for gPXE iSCSI boot client
+			 */
+			if (!strcmp(param->name, IMMEDIATEDATA))
+				SET_PSTATE_REPLY_OPTIONAL(param);
+		}
 	} else if (IS_TYPE_NUMBER(param)) {
 		if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
 			SET_PSTATE_REPLY_OPTIONAL(param);
-		/*
-		 * Required for gPXE iSCSI boot client
-		 */
-		if (!strcmp(param->name, MAXCONNECTIONS))
-			SET_PSTATE_REPLY_OPTIONAL(param);
+
+		if (keys_workaround) {
+			/*
+			 * Required for Mellanox Flexboot PXE boot ROM
+			 */
+			if (!strcmp(param->name, FIRSTBURSTLENGTH))
+				SET_PSTATE_REPLY_OPTIONAL(param);
+
+			/*
+			 * Required for gPXE iSCSI boot client
+			 */
+			if (!strcmp(param->name, MAXCONNECTIONS))
+				SET_PSTATE_REPLY_OPTIONAL(param);
+		}
 	} else if (IS_PHASE_DECLARATIVE(param))
 		SET_PSTATE_REPLY_OPTIONAL(param);
 }
@@ -1422,7 +1435,8 @@
 	u8 sender,
 	char *textbuf,
 	u32 *length,
-	struct iscsi_param_list *param_list)
+	struct iscsi_param_list *param_list,
+	bool keys_workaround)
 {
 	char *output_buf = NULL;
 	struct iscsi_extra_response *er;
@@ -1458,7 +1472,8 @@
 			*length += 1;
 			output_buf = textbuf + *length;
 			SET_PSTATE_PROPOSER(param);
-			iscsi_check_proposer_for_optional_reply(param);
+			iscsi_check_proposer_for_optional_reply(param,
+							        keys_workaround);
 			pr_debug("Sending key: %s=%s\n",
 				param->name, param->value);
 		}
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index a0751e3..17a58c2 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -40,7 +40,7 @@
 extern int iscsi_update_param_value(struct iscsi_param *, char *);
 extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *);
 extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
-			struct iscsi_param_list *);
+			struct iscsi_param_list *, bool);
 extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
 extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *,
 			struct iscsi_param_list *);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 205a509..63e1dcc 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -227,6 +227,7 @@
 	a->t10_pi = TA_DEFAULT_T10_PI;
 	a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE;
 	a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS;
+	a->login_keys_workaround = TA_DEFAULT_LOGIN_KEYS_WORKAROUND;
 }
 
 int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -899,3 +900,21 @@
 
 	return 0;
 }
+
+int iscsit_ta_login_keys_workaround(
+	struct iscsi_portal_group *tpg,
+	u32 flag)
+{
+	struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+	if ((flag != 0) && (flag != 1)) {
+		pr_err("Illegal value %d\n", flag);
+		return -EINVAL;
+	}
+
+	a->login_keys_workaround = flag;
+	pr_debug("iSCSI_TPG[%hu] - TPG enabled bit for login keys workaround: %s ",
+		tpg->tpgt, (a->login_keys_workaround) ? "ON" : "OFF");
+
+	return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 2da2119..901a712 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -39,5 +39,6 @@
 extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
 extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32);
 extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_login_keys_workaround(struct iscsi_portal_group *, u32);
 
 #endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 1f9bfa4..e8a1f5c 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -753,6 +753,15 @@
 	if (cmd->transport_state & CMD_T_ABORTED ||
 	    cmd->transport_state & CMD_T_STOP) {
 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+		/*
+		 * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(),
+		 * release se_device->caw_sem obtained by sbc_compare_and_write()
+		 * since target_complete_ok_work() or target_complete_failure_work()
+		 * won't be called to invoke the normal CAW completion callbacks.
+		 */
+		if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
+			up(&dev->caw_sem);
+		}
 		complete_all(&cmd->t_transport_stop_comp);
 		return;
 	} else if (!success) {
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index cd5bde3..09f7f20 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -205,8 +205,10 @@
 	mutex_lock(&cooling_list_lock);
 	list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
 		if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
+			unsigned long level = get_level(cpufreq_dev, freq);
+
 			mutex_unlock(&cooling_list_lock);
-			return get_level(cpufreq_dev, freq);
+			return level;
 		}
 	}
 	mutex_unlock(&cooling_list_lock);
diff --git a/drivers/thermal/max77620_thermal.c b/drivers/thermal/max77620_thermal.c
index 83905ff..7e98927 100644
--- a/drivers/thermal/max77620_thermal.c
+++ b/drivers/thermal/max77620_thermal.c
@@ -104,8 +104,6 @@
 		return -EINVAL;
 	}
 
-	pdev->dev.of_node = pdev->dev.parent->of_node;
-
 	mtherm->dev = &pdev->dev;
 	mtherm->rmap = dev_get_regmap(pdev->dev.parent, NULL);
 	if (!mtherm->rmap) {
@@ -113,6 +111,14 @@
 		return -ENODEV;
 	}
 
+	/*
+	 * Drop any current reference to a device-tree node and get a
+	 * reference to the parent's node which will be balanced on reprobe or
+	 * on platform-device release.
+	 */
+	of_node_put(pdev->dev.of_node);
+	pdev->dev.of_node = of_node_get(pdev->dev.parent->of_node);
+
 	mtherm->tz_device = devm_thermal_zone_of_sensor_register(&pdev->dev, 0,
 				mtherm, &max77620_thermal_ops);
 	if (IS_ERR(mtherm->tz_device)) {
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index a04ddae..fe0a7c7 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -160,14 +160,18 @@
 	for (i = 0; i < TSENS_MAX_SENSORS; i++) {
 		tmdev->sensor[i].tmdev = tmdev;
 		tmdev->sensor[i].hw_id = i;
-		tmdev->sensor[i].tzd =
-			devm_thermal_zone_of_sensor_register(
-			&tmdev->pdev->dev, i,
-			&tmdev->sensor[i], &tsens_tm_thermal_zone_ops);
-		if (IS_ERR(tmdev->sensor[i].tzd)) {
-			pr_debug("Error registering sensor:%d\n", i);
-			sensor_missing++;
-			continue;
+		if (tmdev->ops->sensor_en(tmdev, i)) {
+			tmdev->sensor[i].tzd =
+				devm_thermal_zone_of_sensor_register(
+				&tmdev->pdev->dev, i,
+				&tmdev->sensor[i], &tsens_tm_thermal_zone_ops);
+			if (IS_ERR(tmdev->sensor[i].tzd)) {
+				pr_debug("Error registering sensor:%d\n", i);
+				sensor_missing++;
+				continue;
+			}
+		} else {
+			pr_debug("Sensor not enabled:%d\n", i);
 		}
 	}
 
diff --git a/drivers/thermal/tsens.h b/drivers/thermal/tsens.h
index 770b982..a695d57 100644
--- a/drivers/thermal/tsens.h
+++ b/drivers/thermal/tsens.h
@@ -91,6 +91,7 @@
 	int (*set_trips)(struct tsens_sensor *, int, int);
 	int (*interrupts_reg)(struct tsens_device *);
 	int (*dbg)(struct tsens_device *, u32, u32, int *);
+	int (*sensor_en)(struct tsens_device *, u32);
 };
 
 struct tsens_irqs {
diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c
index 55be2f9..de9f27f 100644
--- a/drivers/thermal/tsens2xxx.c
+++ b/drivers/thermal/tsens2xxx.c
@@ -61,6 +61,7 @@
 #define TSENS_TM_WATCHDOG_LOG(n)		((n) + 0x13c)
 
 #define TSENS_EN				BIT(0)
+#define TSENS_CTRL_SENSOR_EN_MASK(n)		((n >> 3) & 0xffff)
 
 static void msm_tsens_convert_temp(int last_temp, int *temp)
 {
@@ -499,6 +500,21 @@
 	return IRQ_HANDLED;
 }
 
+static int tsens2xxx_hw_sensor_en(struct tsens_device *tmdev,
+					u32 sensor_id)
+{
+	void __iomem *srot_addr;
+	unsigned int srot_val, sensor_en;
+
+	srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
+	srot_val = readl_relaxed(srot_addr);
+	srot_val = TSENS_CTRL_SENSOR_EN_MASK(srot_val);
+
+	sensor_en = ((1 << sensor_id) & srot_val);
+
+	return sensor_en;
+}
+
 static int tsens2xxx_hw_init(struct tsens_device *tmdev)
 {
 	void __iomem *srot_addr;
@@ -602,6 +618,7 @@
 	.set_trips	= tsens2xxx_set_trip_temp,
 	.interrupts_reg	= tsens2xxx_register_interrupts,
 	.dbg		= tsens2xxx_dbg,
+	.sensor_en	= tsens2xxx_hw_sensor_en,
 };
 
 const struct tsens_data data_tsens2xxx = {
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 8c3bf3d..ce2c3c6 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -2711,13 +2711,13 @@
 	 * related to the kernel should not use this.
 	 */
 			data = vt_get_shift_state();
-			ret = __put_user(data, p);
+			ret = put_user(data, p);
 			break;
 		case TIOCL_GETMOUSEREPORTING:
 			console_lock();	/* May be overkill */
 			data = mouse_reporting();
 			console_unlock();
-			ret = __put_user(data, p);
+			ret = put_user(data, p);
 			break;
 		case TIOCL_SETVESABLANK:
 			console_lock();
@@ -2726,7 +2726,7 @@
 			break;
 		case TIOCL_GETKMSGREDIRECT:
 			data = vt_get_kmsg_redirect();
-			ret = __put_user(data, p);
+			ret = put_user(data, p);
 			break;
 		case TIOCL_SETKMSGREDIRECT:
 			if (!capable(CAP_SYS_ADMIN)) {
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index a876d47..f16491c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1770,6 +1770,9 @@
 	{ USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */
 	.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
 	},
+	{ USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
+	.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+	},
 
 	{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
 	.driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index eef716b..3fd2b54 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -981,6 +981,15 @@
 		case USB_PTM_CAP_TYPE:
 			dev->bos->ptm_cap =
 				(struct usb_ptm_cap_descriptor *)buffer;
+			break;
+		case USB_CAP_TYPE_CONFIG_SUMMARY:
+			/* one such desc per configuration */
+			if (!dev->bos->num_config_summary_desc)
+				dev->bos->config_summary =
+				(struct usb_config_summary_descriptor *)buffer;
+
+			dev->bos->num_config_summary_desc++;
+			break;
 		default:
 			break;
 		}
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 358ca8d..0f10ff2 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -19,6 +19,8 @@
 
 #include <linux/usb.h>
 #include <linux/usb/hcd.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v3.h>
 #include "usb.h"
 
 static inline const char *plural(int n)
@@ -40,6 +42,34 @@
 		&& desc->bInterfaceProtocol == 1;
 }
 
+static int usb_audio_max_rev_config(struct usb_host_bos *bos)
+{
+	int desc_cnt, func_cnt, numfunc;
+	int num_cfg_desc;
+	struct usb_config_summary_descriptor *conf_summary;
+
+	if (!bos || !bos->config_summary)
+		goto done;
+
+	conf_summary = bos->config_summary;
+	num_cfg_desc = bos->num_config_summary_desc;
+
+	for (desc_cnt = 0; desc_cnt < num_cfg_desc; desc_cnt++) {
+		numfunc = conf_summary->bNumFunctions;
+		for (func_cnt = 0; func_cnt < numfunc; func_cnt++) {
+			/* honor device preferred config */
+			if (conf_summary->cs_info[func_cnt].bClass ==
+				USB_CLASS_AUDIO &&
+				conf_summary->cs_info[func_cnt].bSubClass !=
+				FULL_ADC_3_0)
+				return conf_summary->bConfigurationValue;
+		}
+	}
+
+done:
+	return -EINVAL;
+}
+
 int usb_choose_configuration(struct usb_device *udev)
 {
 	int i;
@@ -130,7 +160,6 @@
 			best = c;
 			break;
 		}
-
 		/* If all the remaining configs are vendor-specific,
 		 * choose the first one. */
 		else if (!best)
@@ -143,7 +172,10 @@
 			insufficient_power, plural(insufficient_power));
 
 	if (best) {
-		i = best->desc.bConfigurationValue;
+		/* choose usb audio class preferred config if available */
+		i = usb_audio_max_rev_config(udev->bos);
+		if (i < 0)
+			i = best->desc.bConfigurationValue;
 		dev_dbg(&udev->dev,
 			"configuration #%d chosen from %d choice%s\n",
 			i, num_configs, plural(num_configs));
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 3e9d233..0dc81d2 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -2912,6 +2912,7 @@
 		}
 	}
 
+	edev = NULL;
 	/* Use third phandle (optional) for EUD based detach/attach events */
 	if (of_count_phandle_with_args(node, "extcon", NULL) > 2) {
 		edev = extcon_get_edev_by_phandle(mdwc->dev, 2);
@@ -2921,7 +2922,7 @@
 		}
 	}
 
-	if (!IS_ERR(edev)) {
+	if (!IS_ERR_OR_NULL(edev)) {
 		mdwc->extcon_eud = edev;
 		mdwc->eud_event_nb.notifier_call = dwc3_msm_eud_notifier;
 		ret = extcon_register_notifier(edev, EXTCON_USB,
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
index 51ab794..1590927 100644
--- a/drivers/usb/gadget/function/f_audio_source.c
+++ b/drivers/usb/gadget/function/f_audio_source.c
@@ -1029,8 +1029,14 @@
 	config_group_init_type_name(&fi_audio->func_inst.group, "",
 						&audio_source_func_type);
 
-	snprintf(device_name, AUDIO_SOURCE_DEV_NAME_LENGTH,
+	if (!count) {
+		snprintf(device_name, AUDIO_SOURCE_DEV_NAME_LENGTH,
+					"f_audio_source");
+		count++;
+	} else {
+		snprintf(device_name, AUDIO_SOURCE_DEV_NAME_LENGTH,
 					"f_audio_source%d", count++);
+	}
 
 	dev = create_function_device(device_name);
 
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index a9a1e4c..c8989c6 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -77,6 +77,16 @@
 #define USB_INTEL_USB3_PSSEN   0xD8
 #define USB_INTEL_USB3PRM      0xDC
 
+/* ASMEDIA quirk use */
+#define ASMT_DATA_WRITE0_REG	0xF8
+#define ASMT_DATA_WRITE1_REG	0xFC
+#define ASMT_CONTROL_REG	0xE0
+#define ASMT_CONTROL_WRITE_BIT	0x02
+#define ASMT_WRITEREG_CMD	0x10423
+#define ASMT_FLOWCTL_ADDR	0xFA30
+#define ASMT_FLOWCTL_DATA	0xBA
+#define ASMT_PSEUDO_DATA	0
+
 /*
  * amd_chipset_gen values represent AMD different chipset generations
  */
@@ -412,6 +422,50 @@
 }
 EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
 
+static int usb_asmedia_wait_write(struct pci_dev *pdev)
+{
+	unsigned long retry_count;
+	unsigned char value;
+
+	for (retry_count = 1000; retry_count > 0; --retry_count) {
+
+		pci_read_config_byte(pdev, ASMT_CONTROL_REG, &value);
+
+		if (value == 0xff) {
+			dev_err(&pdev->dev, "%s: check_ready ERROR", __func__);
+			return -EIO;
+		}
+
+		if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
+			return 0;
+
+		usleep_range(40, 60);
+	}
+
+	dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
+	return -ETIMEDOUT;
+}
+
+void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev)
+{
+	if (usb_asmedia_wait_write(pdev) != 0)
+		return;
+
+	/* send command and address to device */
+	pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_WRITEREG_CMD);
+	pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_FLOWCTL_ADDR);
+	pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
+
+	if (usb_asmedia_wait_write(pdev) != 0)
+		return;
+
+	/* send data to device */
+	pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_FLOWCTL_DATA);
+	pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_PSEUDO_DATA);
+	pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
+}
+EXPORT_SYMBOL_GPL(usb_asmedia_modifyflowcontrol);
+
 void usb_amd_quirk_pll_enable(void)
 {
 	usb_amd_quirk_pll(0);
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index c622ddf..6463fdb 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -11,6 +11,7 @@
 void usb_amd_dev_put(void);
 void usb_amd_quirk_pll_disable(void);
 void usb_amd_quirk_pll_enable(void);
+void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
 void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
 void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
 void sb800_prefetch(struct device *dev, int on);
@@ -18,6 +19,7 @@
 struct pci_dev;
 static inline void usb_amd_quirk_pll_disable(void) {}
 static inline void usb_amd_quirk_pll_enable(void) {}
+static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {}
 static inline void usb_amd_dev_put(void) {}
 static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
 static inline void sb800_prefetch(struct device *dev, int on) {}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index b0c4f12..e9a8c3f 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -798,6 +798,9 @@
 			clear_bit(wIndex, &bus_state->resuming_ports);
 
 			set_bit(wIndex, &bus_state->rexit_ports);
+
+			xhci_test_and_clear_bit(xhci, port_array, wIndex,
+						PORT_PLC);
 			xhci_set_link_state(xhci, port_array, wIndex,
 					XDEV_U0);
 
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 672751e..2383344 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -59,6 +59,8 @@
 #define PCI_DEVICE_ID_AMD_PROMONTORYA_2			0x43bb
 #define PCI_DEVICE_ID_AMD_PROMONTORYA_1			0x43bc
 
+#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI		0x1142
+
 static const char hcd_name[] = "xhci_hcd";
 
 static struct hc_driver __read_mostly xhci_pci_hc_driver;
@@ -217,6 +219,10 @@
 			pdev->device == 0x1142)
 		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
 
+	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+		pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI)
+		xhci->quirks |= XHCI_ASMEDIA_MODIFY_FLOWCONTROL;
+
 	if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
 		xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
 
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 5d434e0..e185bbe 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -860,13 +860,16 @@
 			(ep->ep_state & EP_GETTING_NO_STREAMS)) {
 		int stream_id;
 
-		for (stream_id = 0; stream_id < ep->stream_info->num_streams;
+		for (stream_id = 1; stream_id < ep->stream_info->num_streams;
 				stream_id++) {
+			ring = ep->stream_info->stream_rings[stream_id];
+			if (!ring)
+				continue;
+
 			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 					"Killing URBs for slot ID %u, ep index %u, stream %u",
-					slot_id, ep_index, stream_id + 1);
-			xhci_kill_ring_urbs(xhci,
-					ep->stream_info->stream_rings[stream_id]);
+					slot_id, ep_index, stream_id);
+			xhci_kill_ring_urbs(xhci, ring);
 		}
 	} else {
 		ring = ep->ring;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ec9ff3e..15bf308 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -201,6 +201,9 @@
 	if (ret)
 		return ret;
 
+	if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
+		usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
+
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 			 "Wait for controller to be ready for doorbell rings");
 	/*
@@ -1134,6 +1137,9 @@
 	if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
 		compliance_mode_recovery_timer_init(xhci);
 
+	if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
+		usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
+
 	/* Re-enable port polling. */
 	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
 	set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 3a7fb29..757d045 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1669,6 +1669,7 @@
 #define XHCI_BROKEN_PORT_PED	(1 << 25)
 #define XHCI_LIMIT_ENDPOINT_INTERVAL_7	(1 << 26)
 #define XHCI_U2_DISABLE_WAKE	(1 << 27)
+#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL	(1 << 28)
 
 	unsigned int		num_active_eps;
 	unsigned int		limit_active_eps;
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 1fad512..141b916 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -1432,6 +1432,7 @@
 static void dr_swap(struct usbpd *pd)
 {
 	reset_vdm_state(pd);
+	usbpd_dbg(&pd->dev, "dr_swap: current_dr(%d)\n", pd->current_dr);
 
 	if (pd->current_dr == DR_DFP) {
 		stop_usb_host(pd);
@@ -1439,9 +1440,9 @@
 		pd->current_dr = DR_UFP;
 	} else if (pd->current_dr == DR_UFP) {
 		stop_usb_peripheral(pd);
+		start_usb_host(pd, true);
 		pd->current_dr = DR_DFP;
 
-		/* don't start USB host until after SVDM discovery */
 		usbpd_send_svdm(pd, USBPD_SID, USBPD_SVDM_DISCOVER_IDENTITY,
 				SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
 	}
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 012a37a..7994208 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -752,8 +752,10 @@
 	struct usbhs_priv *priv = dev_get_drvdata(dev);
 	struct platform_device *pdev = usbhs_priv_to_pdev(priv);
 
-	if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
+	if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) {
 		usbhsc_power_ctrl(priv, 1);
+		usbhs_mod_autonomy_mode(priv);
+	}
 
 	usbhs_platform_call(priv, phy_reset, pdev);
 
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 5bc7a61..93fba90 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -37,6 +37,7 @@
 struct usbhsg_uep {
 	struct usb_ep		 ep;
 	struct usbhs_pipe	*pipe;
+	spinlock_t		lock;	/* protect the pipe */
 
 	char ep_name[EP_NAME_SIZE];
 
@@ -636,10 +637,16 @@
 static int usbhsg_ep_disable(struct usb_ep *ep)
 {
 	struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
-	struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+	struct usbhs_pipe *pipe;
+	unsigned long flags;
+	int ret = 0;
 
-	if (!pipe)
-		return -EINVAL;
+	spin_lock_irqsave(&uep->lock, flags);
+	pipe = usbhsg_uep_to_pipe(uep);
+	if (!pipe) {
+		ret = -EINVAL;
+		goto out;
+	}
 
 	usbhsg_pipe_disable(uep);
 	usbhs_pipe_free(pipe);
@@ -647,6 +654,9 @@
 	uep->pipe->mod_private	= NULL;
 	uep->pipe		= NULL;
 
+out:
+	spin_unlock_irqrestore(&uep->lock, flags);
+
 	return 0;
 }
 
@@ -696,8 +706,11 @@
 {
 	struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
 	struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
-	struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+	struct usbhs_pipe *pipe;
+	unsigned long flags;
 
+	spin_lock_irqsave(&uep->lock, flags);
+	pipe = usbhsg_uep_to_pipe(uep);
 	if (pipe)
 		usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq));
 
@@ -706,6 +719,7 @@
 	 * even if the pipe is NULL.
 	 */
 	usbhsg_queue_pop(uep, ureq, -ECONNRESET);
+	spin_unlock_irqrestore(&uep->lock, flags);
 
 	return 0;
 }
@@ -852,10 +866,10 @@
 {
 	struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
 	struct usbhs_mod *mod = usbhs_mod_get_current(priv);
-	struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv);
+	struct usbhsg_uep *uep;
 	struct device *dev = usbhs_priv_to_dev(priv);
 	unsigned long flags;
-	int ret = 0;
+	int ret = 0, i;
 
 	/********************  spin lock ********************/
 	usbhs_lock(priv, flags);
@@ -887,7 +901,9 @@
 	usbhs_sys_set_test_mode(priv, 0);
 	usbhs_sys_function_ctrl(priv, 0);
 
-	usbhsg_ep_disable(&dcp->ep);
+	/* disable all eps */
+	usbhsg_for_each_uep_with_dcp(uep, gpriv, i)
+		usbhsg_ep_disable(&uep->ep);
 
 	dev_dbg(dev, "stop gadget\n");
 
@@ -1069,6 +1085,7 @@
 		ret = -ENOMEM;
 		goto usbhs_mod_gadget_probe_err_gpriv;
 	}
+	spin_lock_init(&uep->lock);
 
 	gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
 	dev_info(dev, "%stransceiver found\n",
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index fba4005..6a7720e 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1529,8 +1529,11 @@
 
 	/* Make sure driver was initialized */
 
-	if (us->extra == NULL)
+	if (us->extra == NULL) {
 		usb_stor_dbg(us, "ERROR Driver not initialized\n");
+		srb->result = DID_ERROR << 16;
+		return;
+	}
 
 	scsi_set_resid(srb, 0);
 	/* scsi_bufflen might change in protocol translation to ata */
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index d1d70e0..881fc3a 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -419,6 +419,34 @@
 	kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
 }
 
+struct vfio_group_put_work {
+	struct work_struct work;
+	struct vfio_group *group;
+};
+
+static void vfio_group_put_bg(struct work_struct *work)
+{
+	struct vfio_group_put_work *do_work;
+
+	do_work = container_of(work, struct vfio_group_put_work, work);
+
+	vfio_group_put(do_work->group);
+	kfree(do_work);
+}
+
+static void vfio_group_schedule_put(struct vfio_group *group)
+{
+	struct vfio_group_put_work *do_work;
+
+	do_work = kmalloc(sizeof(*do_work), GFP_KERNEL);
+	if (WARN_ON(!do_work))
+		return;
+
+	INIT_WORK(&do_work->work, vfio_group_put_bg);
+	do_work->group = group;
+	schedule_work(&do_work->work);
+}
+
 /* Assume group_lock or group reference is held */
 static void vfio_group_get(struct vfio_group *group)
 {
@@ -743,7 +771,14 @@
 		break;
 	}
 
-	vfio_group_put(group);
+	/*
+	 * If we're the last reference to the group, the group will be
+	 * released, which includes unregistering the iommu group notifier.
+	 * We hold a read-lock on that notifier list, unregistering needs
+	 * a write-lock... deadlock.  Release our reference asynchronously
+	 * to avoid that situation.
+	 */
+	vfio_group_schedule_put(group);
 	return NOTIFY_OK;
 }
 
@@ -1716,6 +1751,15 @@
 }
 EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
 
+bool vfio_external_group_match_file(struct vfio_group *test_group,
+				    struct file *filep)
+{
+	struct vfio_group *group = filep->private_data;
+
+	return (filep->f_op == &vfio_group_fops) && (group == test_group);
+}
+EXPORT_SYMBOL_GPL(vfio_external_group_match_file);
+
 int vfio_external_user_iommu_id(struct vfio_group *group)
 {
 	return iommu_group_id(group->iommu_group);
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index d6950e0..980f328 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -134,9 +134,7 @@
 	struct page *pages[VSCSI_MAX_GRANTS];
 
 	struct se_cmd se_cmd;
-};
 
-struct scsiback_tmr {
 	atomic_t tmr_complete;
 	wait_queue_head_t tmr_wait;
 };
@@ -599,26 +597,20 @@
 	struct scsiback_tpg *tpg = pending_req->v2p->tpg;
 	struct scsiback_nexus *nexus = tpg->tpg_nexus;
 	struct se_cmd *se_cmd = &pending_req->se_cmd;
-	struct scsiback_tmr *tmr;
 	u64 unpacked_lun = pending_req->v2p->lun;
 	int rc, err = FAILED;
 
-	tmr = kzalloc(sizeof(struct scsiback_tmr), GFP_KERNEL);
-	if (!tmr) {
-		target_put_sess_cmd(se_cmd);
-		goto err;
-	}
-
-	init_waitqueue_head(&tmr->tmr_wait);
+	init_waitqueue_head(&pending_req->tmr_wait);
 
 	rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess,
 			       &pending_req->sense_buffer[0],
-			       unpacked_lun, tmr, act, GFP_KERNEL,
+			       unpacked_lun, NULL, act, GFP_KERNEL,
 			       tag, TARGET_SCF_ACK_KREF);
 	if (rc)
 		goto err;
 
-	wait_event(tmr->tmr_wait, atomic_read(&tmr->tmr_complete));
+	wait_event(pending_req->tmr_wait,
+		   atomic_read(&pending_req->tmr_complete));
 
 	err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
 		SUCCESS : FAILED;
@@ -626,9 +618,8 @@
 	scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
 	transport_generic_free_cmd(&pending_req->se_cmd, 1);
 	return;
+
 err:
-	if (tmr)
-		kfree(tmr);
 	scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
 }
 
@@ -1389,12 +1380,6 @@
 static void scsiback_release_cmd(struct se_cmd *se_cmd)
 {
 	struct se_session *se_sess = se_cmd->se_sess;
-	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
-
-	if (se_tmr && se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
-		struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr;
-		kfree(tmr);
-	}
 
 	percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
 }
@@ -1455,11 +1440,11 @@
 
 static void scsiback_queue_tm_rsp(struct se_cmd *se_cmd)
 {
-	struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
-	struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr;
+	struct vscsibk_pend *pending_req = container_of(se_cmd,
+				struct vscsibk_pend, se_cmd);
 
-	atomic_set(&tmr->tmr_complete, 1);
-	wake_up(&tmr->tmr_wait);
+	atomic_set(&pending_req->tmr_complete, 1);
+	wake_up(&pending_req->tmr_wait);
 }
 
 static void scsiback_aborted_task(struct se_cmd *se_cmd)
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index cfd724f..1fdf4e5 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -911,17 +911,60 @@
 		elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
 
 		vaddr = elf_ppnt->p_vaddr;
+		/*
+		 * If we are loading ET_EXEC or we have already performed
+		 * the ET_DYN load_addr calculations, proceed normally.
+		 */
 		if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
 			elf_flags |= MAP_FIXED;
 		} else if (loc->elf_ex.e_type == ET_DYN) {
-			/* Try and get dynamic programs out of the way of the
-			 * default mmap base, as well as whatever program they
-			 * might try to exec.  This is because the brk will
-			 * follow the loader, and is not movable.  */
-			load_bias = ELF_ET_DYN_BASE - vaddr;
-			if (current->flags & PF_RANDOMIZE)
-				load_bias += arch_mmap_rnd();
-			load_bias = ELF_PAGESTART(load_bias);
+			/*
+			 * This logic is run once for the first LOAD Program
+			 * Header for ET_DYN binaries to calculate the
+			 * randomization (load_bias) for all the LOAD
+			 * Program Headers, and to calculate the entire
+			 * size of the ELF mapping (total_size). (Note that
+			 * load_addr_set is set to true later once the
+			 * initial mapping is performed.)
+			 *
+			 * There are effectively two types of ET_DYN
+			 * binaries: programs (i.e. PIE: ET_DYN with INTERP)
+			 * and loaders (ET_DYN without INTERP, since they
+			 * _are_ the ELF interpreter). The loaders must
+			 * be loaded away from programs since the program
+			 * may otherwise collide with the loader (especially
+			 * for ET_EXEC which does not have a randomized
+			 * position). For example to handle invocations of
+			 * "./ld.so someprog" to test out a new version of
+			 * the loader, the subsequent program that the
+			 * loader loads must avoid the loader itself, so
+			 * they cannot share the same load range. Sufficient
+			 * room for the brk must be allocated with the
+			 * loader as well, since brk must be available with
+			 * the loader.
+			 *
+			 * Therefore, programs are loaded offset from
+			 * ELF_ET_DYN_BASE and loaders are loaded into the
+			 * independently randomized mmap region (0 load_bias
+			 * without MAP_FIXED).
+			 */
+			if (elf_interpreter) {
+				load_bias = ELF_ET_DYN_BASE;
+				if (current->flags & PF_RANDOMIZE)
+					load_bias += arch_mmap_rnd();
+				elf_flags |= MAP_FIXED;
+			} else
+				load_bias = 0;
+
+			/*
+			 * Since load_bias is used for all subsequent loading
+			 * calculations, we must lower it by the first vaddr
+			 * so that the remaining calculations based on the
+			 * ELF vaddrs will be correctly offset. The result
+			 * is then page aligned.
+			 */
+			load_bias = ELF_PAGESTART(load_bias - vaddr);
+
 			total_size = total_mapping_size(elf_phdata,
 							loc->elf_ex.e_phnum);
 			if (!total_size) {
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 247b8df..8d8370d 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -78,12 +78,6 @@
 	switch (type) {
 	case ACL_TYPE_ACCESS:
 		name = XATTR_NAME_POSIX_ACL_ACCESS;
-		if (acl) {
-			ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
-			if (ret)
-				return ret;
-		}
-		ret = 0;
 		break;
 	case ACL_TYPE_DEFAULT:
 		if (!S_ISDIR(inode->i_mode))
@@ -119,6 +113,13 @@
 
 int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 {
+	int ret;
+
+	if (type == ACL_TYPE_ACCESS && acl) {
+		ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+		if (ret)
+			return ret;
+	}
 	return __btrfs_set_acl(NULL, inode, acl, type);
 }
 
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index aca0d88..cec2569 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -292,6 +292,11 @@
 		if (ret < 0)
 			err = ret;
 		dput(last);
+		/* last_name no longer match cache index */
+		if (fi->readdir_cache_idx >= 0) {
+			fi->readdir_cache_idx = -1;
+			fi->dir_release_count = 0;
+		}
 	}
 	return err;
 }
diff --git a/fs/dcache.c b/fs/dcache.c
index 362396a..7171f0d 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1133,11 +1133,12 @@
 		LIST_HEAD(dispose);
 
 		freed = list_lru_walk(&sb->s_dentry_lru,
-			dentry_lru_isolate_shrink, &dispose, UINT_MAX);
+			dentry_lru_isolate_shrink, &dispose, 1024);
 
 		this_cpu_sub(nr_dentry_unused, freed);
 		shrink_dentry_list(&dispose);
-	} while (freed > 0);
+		cond_resched();
+	} while (list_lru_count(&sb->s_dentry_lru) > 0);
 }
 EXPORT_SYMBOL(shrink_dcache_sb);
 
diff --git a/fs/exec.c b/fs/exec.c
index 26ab263..3e2de29 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -215,8 +215,7 @@
 
 	if (write) {
 		unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
-		unsigned long ptr_size;
-		struct rlimit *rlim;
+		unsigned long ptr_size, limit;
 
 		/*
 		 * Since the stack will hold pointers to the strings, we
@@ -245,14 +244,16 @@
 			return page;
 
 		/*
-		 * Limit to 1/4-th the stack size for the argv+env strings.
+		 * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
+		 * (whichever is smaller) for the argv+env strings.
 		 * This ensures that:
 		 *  - the remaining binfmt code will not run out of stack space,
 		 *  - the program will have a reasonable amount of stack left
 		 *    to work from.
 		 */
-		rlim = current->signal->rlim;
-		if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
+		limit = _STK_LIM / 4 * 3;
+		limit = min(limit, rlimit(RLIMIT_STACK) / 4);
+		if (size > limit)
 			goto fail;
 	}
 
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 79dafa7..069c0dc 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -175,11 +175,8 @@
 	return acl;
 }
 
-/*
- * inode->i_mutex: down
- */
-int
-ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+static int
+__ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 {
 	int name_index;
 	void *value = NULL;
@@ -189,13 +186,6 @@
 	switch(type) {
 		case ACL_TYPE_ACCESS:
 			name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
-			if (acl) {
-				error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
-				if (error)
-					return error;
-				inode->i_ctime = current_time(inode);
-				mark_inode_dirty(inode);
-			}
 			break;
 
 		case ACL_TYPE_DEFAULT:
@@ -222,6 +212,24 @@
 }
 
 /*
+ * inode->i_mutex: down
+ */
+int
+ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+{
+	int error;
+
+	if (type == ACL_TYPE_ACCESS && acl) {
+		error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+		if (error)
+			return error;
+		inode->i_ctime = current_time(inode);
+		mark_inode_dirty(inode);
+	}
+	return __ext2_set_acl(inode, acl, type);
+}
+
+/*
  * Initialize the ACLs of a new inode. Called from ext2_new_inode.
  *
  * dir->i_mutex: down
@@ -238,12 +246,12 @@
 		return error;
 
 	if (default_acl) {
-		error = ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+		error = __ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
 		posix_acl_release(default_acl);
 	}
 	if (acl) {
 		if (!error)
-			error = ext2_set_acl(inode, acl, ACL_TYPE_ACCESS);
+			error = __ext2_set_acl(inode, acl, ACL_TYPE_ACCESS);
 		posix_acl_release(acl);
 	}
 	return error;
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 6fe23af..55aa29c 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -211,7 +211,7 @@
 	switch (type) {
 	case ACL_TYPE_ACCESS:
 		name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
-		if (acl) {
+		if (acl && !ipage) {
 			error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
 			if (error)
 				return error;
diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile
index 60da84a..5d75cc4 100644
--- a/fs/fuse/Makefile
+++ b/fs/fuse/Makefile
@@ -5,4 +5,4 @@
 obj-$(CONFIG_FUSE_FS) += fuse.o
 obj-$(CONFIG_CUSE) += cuse.o
 
-fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o
+fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o passthrough.o
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 83511cb..658fa9e 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -7,6 +7,7 @@
 */
 
 #include "fuse_i.h"
+#include "fuse_passthrough.h"
 
 #include <linux/init.h>
 #include <linux/module.h>
@@ -550,9 +551,14 @@
 	       args->out.numargs * sizeof(struct fuse_arg));
 	fuse_request_send(fc, req);
 	ret = req->out.h.error;
-	if (!ret && args->out.argvar) {
-		BUG_ON(args->out.numargs != 1);
-		ret = req->out.args[0].size;
+	if (!ret) {
+		if (args->out.argvar) {
+			WARN_ON(args->out.numargs != 1);
+			ret = req->out.args[0].size;
+		}
+
+		if (req->passthrough_filp != NULL)
+			args->out.passthrough_filp = req->passthrough_filp;
 	}
 	fuse_put_request(fc, req);
 
@@ -1890,6 +1896,9 @@
 	}
 	fuse_copy_finish(cs);
 
+	fuse_setup_passthrough(fc, req);
+
+
 	spin_lock(&fpq->lock);
 	clear_bit(FR_LOCKED, &req->flags);
 	if (!fpq->connected)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index fc8ba62..c7c3c96 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -491,6 +491,7 @@
 	args.out.args[0].value = &outentry;
 	args.out.args[1].size = sizeof(outopen);
 	args.out.args[1].value = &outopen;
+	args.out.passthrough_filp = NULL;
 	err = fuse_simple_request(fc, &args);
 	if (err)
 		goto out_free_ff;
@@ -502,6 +503,8 @@
 	ff->fh = outopen.fh;
 	ff->nodeid = outentry.nodeid;
 	ff->open_flags = outopen.open_flags;
+	if (args.out.passthrough_filp != NULL)
+		ff->passthrough_filp = args.out.passthrough_filp;
 	inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
 			  &outentry.attr, entry_attr_timeout(&outentry), 0);
 	if (!inode) {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 5ec5870..75c95659 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -7,6 +7,7 @@
 */
 
 #include "fuse_i.h"
+#include "fuse_passthrough.h"
 
 #include <linux/pagemap.h>
 #include <linux/slab.h>
@@ -21,8 +22,10 @@
 static const struct file_operations fuse_direct_io_file_operations;
 
 static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
-			  int opcode, struct fuse_open_out *outargp)
+			  int opcode, struct fuse_open_out *outargp,
+			  struct file **passthrough_filpp)
 {
+	int ret_val;
 	struct fuse_open_in inarg;
 	FUSE_ARGS(args);
 
@@ -38,8 +41,14 @@
 	args.out.numargs = 1;
 	args.out.args[0].size = sizeof(*outargp);
 	args.out.args[0].value = outargp;
+	args.out.passthrough_filp = NULL;
 
-	return fuse_simple_request(fc, &args);
+	ret_val = fuse_simple_request(fc, &args);
+
+	if (args.out.passthrough_filp != NULL)
+		*passthrough_filpp = args.out.passthrough_filp;
+
+	return ret_val;
 }
 
 struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
@@ -50,6 +59,11 @@
 	if (unlikely(!ff))
 		return NULL;
 
+	ff->passthrough_filp = NULL;
+	ff->passthrough_enabled = 0;
+	if (fc->passthrough)
+		ff->passthrough_enabled = 1;
+
 	ff->fc = fc;
 	ff->reserved_req = fuse_request_alloc(0);
 	if (unlikely(!ff->reserved_req)) {
@@ -118,6 +132,7 @@
 		 bool isdir)
 {
 	struct fuse_file *ff;
+	struct file *passthrough_filp = NULL;
 	int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
 
 	ff = fuse_file_alloc(fc);
@@ -130,11 +145,12 @@
 		struct fuse_open_out outarg;
 		int err;
 
-		err = fuse_send_open(fc, nodeid, file, opcode, &outarg);
+		err = fuse_send_open(fc, nodeid, file, opcode, &outarg,
+				     &(passthrough_filp));
 		if (!err) {
 			ff->fh = outarg.fh;
 			ff->open_flags = outarg.open_flags;
-
+			ff->passthrough_filp = passthrough_filp;
 		} else if (err != -ENOSYS || isdir) {
 			fuse_file_free(ff);
 			return err;
@@ -253,6 +269,8 @@
 	if (unlikely(!ff))
 		return;
 
+	fuse_passthrough_release(ff);
+
 	req = ff->reserved_req;
 	fuse_prepare_release(ff, file->f_flags, opcode);
 
@@ -917,8 +935,10 @@
 
 static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
+	ssize_t ret_val;
 	struct inode *inode = iocb->ki_filp->f_mapping->host;
 	struct fuse_conn *fc = get_fuse_conn(inode);
+	struct fuse_file *ff = iocb->ki_filp->private_data;
 
 	/*
 	 * In auto invalidate mode, always update attributes on read.
@@ -933,7 +953,12 @@
 			return err;
 	}
 
-	return generic_file_read_iter(iocb, to);
+	if (ff && ff->passthrough_enabled && ff->passthrough_filp)
+		ret_val = fuse_passthrough_read_iter(iocb, to);
+	else
+		ret_val = generic_file_read_iter(iocb, to);
+
+	return ret_val;
 }
 
 static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
@@ -1165,6 +1190,7 @@
 static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
 	struct file *file = iocb->ki_filp;
+	struct fuse_file *ff = file->private_data;
 	struct address_space *mapping = file->f_mapping;
 	ssize_t written = 0;
 	ssize_t written_buffered = 0;
@@ -1198,6 +1224,11 @@
 	if (err)
 		goto out;
 
+	if (ff && ff->passthrough_enabled && ff->passthrough_filp) {
+		written = fuse_passthrough_write_iter(iocb, from);
+		goto out;
+	}
+
 	if (iocb->ki_flags & IOCB_DIRECT) {
 		loff_t pos = iocb->ki_pos;
 		written = generic_file_direct_write(iocb, from);
@@ -2069,6 +2100,9 @@
 
 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
+	struct fuse_file *ff = file->private_data;
+
+	ff->passthrough_enabled = 0;
 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
 		fuse_link_write_file(file);
 
@@ -2079,6 +2113,9 @@
 
 static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
 {
+	struct fuse_file *ff = file->private_data;
+
+	ff->passthrough_enabled = 0;
 	/* Can't provide the coherency needed for MAP_SHARED */
 	if (vma->vm_flags & VM_MAYSHARE)
 		return -ENODEV;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 6b30a12..cc2c82c 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -153,6 +153,10 @@
 
 	/** Has flock been performed on this file? */
 	bool flock:1;
+
+	/* the read write file */
+	struct file *passthrough_filp;
+	bool passthrough_enabled;
 };
 
 /** One input argument of a request */
@@ -232,6 +236,7 @@
 		unsigned argvar:1;
 		unsigned numargs;
 		struct fuse_arg args[2];
+		struct file *passthrough_filp;
 	} out;
 };
 
@@ -382,6 +387,9 @@
 
 	/** Request is stolen from fuse_file->reserved_req */
 	struct file *stolen_file;
+
+	/** fuse passthrough file  */
+	struct file *passthrough_filp;
 };
 
 struct fuse_iqueue {
@@ -542,6 +550,9 @@
 	/** handle fs handles killing suid/sgid/cap on write/chown/trunc */
 	unsigned handle_killpriv:1;
 
+	/** passthrough IO. */
+	unsigned passthrough:1;
+
 	/*
 	 * The following bitfields are only for optimization purposes
 	 * and hence races in setting them will not cause malfunction
diff --git a/fs/fuse/fuse_passthrough.h b/fs/fuse/fuse_passthrough.h
new file mode 100644
index 0000000..12429ac
--- /dev/null
+++ b/fs/fuse/fuse_passthrough.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _FS_FUSE_PASSTHROUGH_H
+#define _FS_FUSE_PASSTHROUGH_H
+
+#include "fuse_i.h"
+
+#include <linux/fuse.h>
+#include <linux/file.h>
+
+void fuse_setup_passthrough(struct fuse_conn *fc, struct fuse_req *req);
+
+ssize_t fuse_passthrough_read_iter(struct kiocb *iocb, struct iov_iter *to);
+
+ssize_t fuse_passthrough_write_iter(struct kiocb *iocb, struct iov_iter *from);
+
+void fuse_passthrough_release(struct fuse_file *ff);
+
+#endif /* _FS_FUSE_PASSTHROUGH_H */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 6fe6a88..f1512c8 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -915,6 +915,12 @@
 				fc->parallel_dirops = 1;
 			if (arg->flags & FUSE_HANDLE_KILLPRIV)
 				fc->handle_killpriv = 1;
+			if (arg->flags & FUSE_PASSTHROUGH) {
+				fc->passthrough = 1;
+				/* Prevent further stacking */
+				fc->sb->s_stack_depth =
+					FILESYSTEM_MAX_STACK_DEPTH;
+			}
 			if (arg->time_gran && arg->time_gran <= 1000000000)
 				fc->sb->s_time_gran = arg->time_gran;
 			if ((arg->flags & FUSE_POSIX_ACL)) {
diff --git a/fs/fuse/passthrough.c b/fs/fuse/passthrough.c
new file mode 100644
index 0000000..c92c40b
--- /dev/null
+++ b/fs/fuse/passthrough.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "fuse_passthrough.h"
+
+#include <linux/aio.h>
+#include <linux/fs_stack.h>
+
+void fuse_setup_passthrough(struct fuse_conn *fc, struct fuse_req *req)
+{
+	int daemon_fd, fs_stack_depth;
+	unsigned int open_out_index;
+	struct file *passthrough_filp;
+	struct inode *passthrough_inode;
+	struct super_block *passthrough_sb;
+	struct fuse_open_out *open_out;
+
+	req->passthrough_filp = NULL;
+
+	if (!(fc->passthrough))
+		return;
+
+	if ((req->in.h.opcode != FUSE_OPEN) &&
+	    (req->in.h.opcode != FUSE_CREATE))
+		return;
+
+	open_out_index = req->in.numargs - 1;
+
+	WARN_ON(open_out_index != 0 && open_out_index != 1);
+	WARN_ON(req->out.args[open_out_index].size != sizeof(*open_out));
+
+	open_out = req->out.args[open_out_index].value;
+
+	daemon_fd = (int)open_out->passthrough_fd;
+	if (daemon_fd < 0)
+		return;
+
+	passthrough_filp = fget_raw(daemon_fd);
+	if (!passthrough_filp)
+		return;
+
+	passthrough_inode = file_inode(passthrough_filp);
+	passthrough_sb = passthrough_inode->i_sb;
+	fs_stack_depth = passthrough_sb->s_stack_depth + 1;
+
+	/* If we reached the stacking limit go through regular io */
+	if (fs_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
+		/* Release the passthrough file. */
+		fput(passthrough_filp);
+		pr_err("FUSE: maximum fs stacking depth exceeded, cannot use passthrough for this file\n");
+		return;
+	}
+	req->passthrough_filp = passthrough_filp;
+}
+
+
+static ssize_t fuse_passthrough_read_write_iter(struct kiocb *iocb,
+					    struct iov_iter *iter, int do_write)
+{
+	ssize_t ret_val;
+	struct fuse_file *ff;
+	struct file *fuse_file, *passthrough_filp;
+	struct inode *fuse_inode, *passthrough_inode;
+	struct fuse_conn *fc;
+
+	ff = iocb->ki_filp->private_data;
+	fuse_file = iocb->ki_filp;
+	passthrough_filp = ff->passthrough_filp;
+	fc = ff->fc;
+
+	/* lock passthrough file to prevent it from being released */
+	get_file(passthrough_filp);
+	iocb->ki_filp = passthrough_filp;
+	fuse_inode = fuse_file->f_path.dentry->d_inode;
+	passthrough_inode = file_inode(passthrough_filp);
+
+	if (do_write) {
+		if (!passthrough_filp->f_op->write_iter)
+			return -EIO;
+
+		ret_val = passthrough_filp->f_op->write_iter(iocb, iter);
+
+		if (ret_val >= 0 || ret_val == -EIOCBQUEUED) {
+			spin_lock(&fc->lock);
+			fsstack_copy_inode_size(fuse_inode, passthrough_inode);
+			spin_unlock(&fc->lock);
+			fsstack_copy_attr_times(fuse_inode, passthrough_inode);
+		}
+	} else {
+		if (!passthrough_filp->f_op->read_iter)
+			return -EIO;
+
+		ret_val = passthrough_filp->f_op->read_iter(iocb, iter);
+		if (ret_val >= 0 || ret_val == -EIOCBQUEUED)
+			fsstack_copy_attr_atime(fuse_inode, passthrough_inode);
+	}
+
+	iocb->ki_filp = fuse_file;
+
+	/* unlock passthrough file */
+	fput(passthrough_filp);
+
+	return ret_val;
+}
+
+ssize_t fuse_passthrough_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+	return fuse_passthrough_read_write_iter(iocb, to, 0);
+}
+
+ssize_t fuse_passthrough_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+	return fuse_passthrough_read_write_iter(iocb, from, 1);
+}
+
+void fuse_passthrough_release(struct fuse_file *ff)
+{
+	if (!(ff->passthrough_filp))
+		return;
+
+	/* Release the passthrough file. */
+	fput(ff->passthrough_filp);
+	ff->passthrough_filp = NULL;
+}
diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c
index 9b92058..6bb5d7c 100644
--- a/fs/hfsplus/posix_acl.c
+++ b/fs/hfsplus/posix_acl.c
@@ -51,8 +51,8 @@
 	return acl;
 }
 
-int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl,
-		int type)
+static int __hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl,
+				   int type)
 {
 	int err;
 	char *xattr_name;
@@ -64,12 +64,6 @@
 	switch (type) {
 	case ACL_TYPE_ACCESS:
 		xattr_name = XATTR_NAME_POSIX_ACL_ACCESS;
-		if (acl) {
-			err = posix_acl_update_mode(inode, &inode->i_mode, &acl);
-			if (err)
-				return err;
-		}
-		err = 0;
 		break;
 
 	case ACL_TYPE_DEFAULT:
@@ -105,6 +99,18 @@
 	return err;
 }
 
+int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl, int type)
+{
+	int err;
+
+	if (type == ACL_TYPE_ACCESS && acl) {
+		err = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+		if (err)
+			return err;
+	}
+	return __hfsplus_set_posix_acl(inode, acl, type);
+}
+
 int hfsplus_init_posix_acl(struct inode *inode, struct inode *dir)
 {
 	int err = 0;
@@ -122,15 +128,15 @@
 		return err;
 
 	if (default_acl) {
-		err = hfsplus_set_posix_acl(inode, default_acl,
-					    ACL_TYPE_DEFAULT);
+		err = __hfsplus_set_posix_acl(inode, default_acl,
+					      ACL_TYPE_DEFAULT);
 		posix_acl_release(default_acl);
 	}
 
 	if (acl) {
 		if (!err)
-			err = hfsplus_set_posix_acl(inode, acl,
-						    ACL_TYPE_ACCESS);
+			err = __hfsplus_set_posix_acl(inode, acl,
+						      ACL_TYPE_ACCESS);
 		posix_acl_release(acl);
 	}
 	return err;
diff --git a/fs/mount.h b/fs/mount.h
index d8295f2..3603884 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -58,6 +58,7 @@
 	struct mnt_namespace *mnt_ns;	/* containing namespace */
 	struct mountpoint *mnt_mp;	/* where is it mounted */
 	struct hlist_node mnt_mp_list;	/* list mounts with the same mountpoint */
+	struct list_head mnt_umounting; /* list entry for umount propagation */
 #ifdef CONFIG_FSNOTIFY
 	struct hlist_head mnt_fsnotify_marks;
 	__u32 mnt_fsnotify_mask;
diff --git a/fs/namespace.c b/fs/namespace.c
index 5147334..7731f77 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -237,6 +237,7 @@
 		INIT_LIST_HEAD(&mnt->mnt_slave_list);
 		INIT_LIST_HEAD(&mnt->mnt_slave);
 		INIT_HLIST_NODE(&mnt->mnt_mp_list);
+		INIT_LIST_HEAD(&mnt->mnt_umounting);
 #ifdef CONFIG_FSNOTIFY
 		INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
 #endif
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 53e02b8..d04ec381 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1167,11 +1167,13 @@
 	/* Force a full look up iff the parent directory has changed */
 	if (!nfs_is_exclusive_create(dir, flags) &&
 	    nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) {
-
-		if (nfs_lookup_verify_inode(inode, flags)) {
+		error = nfs_lookup_verify_inode(inode, flags);
+		if (error) {
 			if (flags & LOOKUP_RCU)
 				return -ECHILD;
-			goto out_zap_parent;
+			if (error == -ESTALE)
+				goto out_zap_parent;
+			goto out_error;
 		}
 		goto out_valid;
 	}
@@ -1195,8 +1197,10 @@
 	trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
 	error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
 	trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error);
-	if (error)
+	if (error == -ESTALE || error == -ENOENT)
 		goto out_bad;
+	if (error)
+		goto out_error;
 	if (nfs_compare_fh(NFS_FH(inode), fhandle))
 		goto out_bad;
 	if ((error = nfs_refresh_inode(inode, fattr)) != 0)
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index bf4ec5e..76ae256 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1278,9 +1278,9 @@
 		return 0;
 	/* Has the inode gone and changed behind our back? */
 	if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
-		return -EIO;
+		return -ESTALE;
 	if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
-		return -EIO;
+		return -ESTALE;
 
 	if (!nfs_file_has_buffered_writers(nfsi)) {
 		/* Verify a few of the more important attributes */
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 0e10085..e7c8ac4 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -1146,6 +1146,7 @@
 	unsigned int stacklen = 0;
 	unsigned int i;
 	bool remote = false;
+	struct cred *cred;
 	int err;
 
 	err = -ENOMEM;
@@ -1309,10 +1310,14 @@
 	else
 		sb->s_d_op = &ovl_dentry_operations;
 
-	ufs->creator_cred = prepare_creds();
-	if (!ufs->creator_cred)
+	err = -ENOMEM;
+	ufs->creator_cred = cred = prepare_creds();
+	if (!cred)
 		goto out_put_lower_mnt;
 
+	/* Never override disk quota limits or use reserved space */
+	cap_lower(cred->cap_effective, CAP_SYS_RESOURCE);
+
 	err = -ENOMEM;
 	oe = ovl_alloc_entry(numlower);
 	if (!oe)
diff --git a/fs/pnode.c b/fs/pnode.c
index e4e428d..ddb846f 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -24,6 +24,11 @@
 	return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
 }
 
+static inline struct mount *last_slave(struct mount *p)
+{
+	return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave);
+}
+
 static inline struct mount *next_slave(struct mount *p)
 {
 	return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
@@ -164,6 +169,19 @@
 	}
 }
 
+static struct mount *skip_propagation_subtree(struct mount *m,
+						struct mount *origin)
+{
+	/*
+	 * Advance m such that propagation_next will not return
+	 * the slaves of m.
+	 */
+	if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
+		m = last_slave(m);
+
+	return m;
+}
+
 static struct mount *next_group(struct mount *m, struct mount *origin)
 {
 	while (1) {
@@ -415,68 +433,107 @@
 	}
 }
 
-/*
- * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
- */
-static void mark_umount_candidates(struct mount *mnt)
+static void umount_one(struct mount *mnt, struct list_head *to_umount)
 {
-	struct mount *parent = mnt->mnt_parent;
-	struct mount *m;
-
-	BUG_ON(parent == mnt);
-
-	for (m = propagation_next(parent, parent); m;
-			m = propagation_next(m, parent)) {
-		struct mount *child = __lookup_mnt(&m->mnt,
-						mnt->mnt_mountpoint);
-		if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
-			continue;
-		if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
-			SET_MNT_MARK(child);
-		}
-	}
+	CLEAR_MNT_MARK(mnt);
+	mnt->mnt.mnt_flags |= MNT_UMOUNT;
+	list_del_init(&mnt->mnt_child);
+	list_del_init(&mnt->mnt_umounting);
+	list_move_tail(&mnt->mnt_list, to_umount);
 }
 
 /*
  * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
  * parent propagates to.
  */
-static void __propagate_umount(struct mount *mnt)
+static bool __propagate_umount(struct mount *mnt,
+			       struct list_head *to_umount,
+			       struct list_head *to_restore)
 {
-	struct mount *parent = mnt->mnt_parent;
-	struct mount *m;
+	bool progress = false;
+	struct mount *child;
 
-	BUG_ON(parent == mnt);
+	/*
+	 * The state of the parent won't change if this mount is
+	 * already unmounted or marked as without children.
+	 */
+	if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED))
+		goto out;
 
-	for (m = propagation_next(parent, parent); m;
-			m = propagation_next(m, parent)) {
-		struct mount *topper;
-		struct mount *child = __lookup_mnt(&m->mnt,
-						mnt->mnt_mountpoint);
-		/*
-		 * umount the child only if the child has no children
-		 * and the child is marked safe to unmount.
-		 */
-		if (!child || !IS_MNT_MARKED(child))
+	/* Verify topper is the only grandchild that has not been
+	 * speculatively unmounted.
+	 */
+	list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
+		if (child->mnt_mountpoint == mnt->mnt.mnt_root)
 			continue;
-		CLEAR_MNT_MARK(child);
+		if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child))
+			continue;
+		/* Found a mounted child */
+		goto children;
+	}
 
-		/* If there is exactly one mount covering all of child
-		 * replace child with that mount.
-		 */
-		topper = find_topper(child);
-		if (topper)
-			mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
-					      topper);
+	/* Mark mounts that can be unmounted if not locked */
+	SET_MNT_MARK(mnt);
+	progress = true;
 
-		if (list_empty(&child->mnt_mounts)) {
-			list_del_init(&child->mnt_child);
-			child->mnt.mnt_flags |= MNT_UMOUNT;
-			list_move_tail(&child->mnt_list, &mnt->mnt_list);
+	/* If a mount is without children and not locked umount it. */
+	if (!IS_MNT_LOCKED(mnt)) {
+		umount_one(mnt, to_umount);
+	} else {
+children:
+		list_move_tail(&mnt->mnt_umounting, to_restore);
+	}
+out:
+	return progress;
+}
+
+static void umount_list(struct list_head *to_umount,
+			struct list_head *to_restore)
+{
+	struct mount *mnt, *child, *tmp;
+	list_for_each_entry(mnt, to_umount, mnt_list) {
+		list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) {
+			/* topper? */
+			if (child->mnt_mountpoint == mnt->mnt.mnt_root)
+				list_move_tail(&child->mnt_umounting, to_restore);
+			else
+				umount_one(child, to_umount);
 		}
 	}
 }
 
+static void restore_mounts(struct list_head *to_restore)
+{
+	/* Restore mounts to a clean working state */
+	while (!list_empty(to_restore)) {
+		struct mount *mnt, *parent;
+		struct mountpoint *mp;
+
+		mnt = list_first_entry(to_restore, struct mount, mnt_umounting);
+		CLEAR_MNT_MARK(mnt);
+		list_del_init(&mnt->mnt_umounting);
+
+		/* Should this mount be reparented? */
+		mp = mnt->mnt_mp;
+		parent = mnt->mnt_parent;
+		while (parent->mnt.mnt_flags & MNT_UMOUNT) {
+			mp = parent->mnt_mp;
+			parent = parent->mnt_parent;
+		}
+		if (parent != mnt->mnt_parent)
+			mnt_change_mountpoint(parent, mp, mnt);
+	}
+}
+
+static void cleanup_umount_visitations(struct list_head *visited)
+{
+	while (!list_empty(visited)) {
+		struct mount *mnt =
+			list_first_entry(visited, struct mount, mnt_umounting);
+		list_del_init(&mnt->mnt_umounting);
+	}
+}
+
 /*
  * collect all mounts that receive propagation from the mount in @list,
  * and return these additional mounts in the same list.
@@ -487,12 +544,69 @@
 int propagate_umount(struct list_head *list)
 {
 	struct mount *mnt;
+	LIST_HEAD(to_restore);
+	LIST_HEAD(to_umount);
+	LIST_HEAD(visited);
 
-	list_for_each_entry_reverse(mnt, list, mnt_list)
-		mark_umount_candidates(mnt);
+	/* Find candidates for unmounting */
+	list_for_each_entry_reverse(mnt, list, mnt_list) {
+		struct mount *parent = mnt->mnt_parent;
+		struct mount *m;
 
-	list_for_each_entry(mnt, list, mnt_list)
-		__propagate_umount(mnt);
+		/*
+		 * If this mount has already been visited it is known that it's
+		 * entire peer group and all of their slaves in the propagation
+		 * tree for the mountpoint has already been visited and there is
+		 * no need to visit them again.
+		 */
+		if (!list_empty(&mnt->mnt_umounting))
+			continue;
+
+		list_add_tail(&mnt->mnt_umounting, &visited);
+		for (m = propagation_next(parent, parent); m;
+		     m = propagation_next(m, parent)) {
+			struct mount *child = __lookup_mnt(&m->mnt,
+							   mnt->mnt_mountpoint);
+			if (!child)
+				continue;
+
+			if (!list_empty(&child->mnt_umounting)) {
+				/*
+				 * If the child has already been visited it is
+				 * know that it's entire peer group and all of
+				 * their slaves in the propgation tree for the
+				 * mountpoint has already been visited and there
+				 * is no need to visit this subtree again.
+				 */
+				m = skip_propagation_subtree(m, parent);
+				continue;
+			} else if (child->mnt.mnt_flags & MNT_UMOUNT) {
+				/*
+				 * We have come accross an partially unmounted
+				 * mount in list that has not been visited yet.
+				 * Remember it has been visited and continue
+				 * about our merry way.
+				 */
+				list_add_tail(&child->mnt_umounting, &visited);
+				continue;
+			}
+
+			/* Check the child and parents while progress is made */
+			while (__propagate_umount(child,
+						  &to_umount, &to_restore)) {
+				/* Is the parent a umount candidate? */
+				child = child->mnt_parent;
+				if (list_empty(&child->mnt_umounting))
+					break;
+			}
+		}
+	}
+
+	umount_list(&to_umount, &to_restore);
+	restore_mounts(&to_restore);
+	cleanup_umount_visitations(&visited);
+	list_splice_tail(&to_umount, list);
+
 	return 0;
 }
 
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 71b62b8..c585e7e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1617,6 +1617,9 @@
 		if (vma->vm_file)
 			continue;
 
+		if (vma->vm_flags & VM_LOCKED)
+			continue;
+
 		if (!rp.nr_to_reclaim)
 			break;
 
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 3d2256a..d92a1dc 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -37,7 +37,14 @@
 	error = journal_begin(&th, inode->i_sb, jcreate_blocks);
 	reiserfs_write_unlock(inode->i_sb);
 	if (error == 0) {
+		if (type == ACL_TYPE_ACCESS && acl) {
+			error = posix_acl_update_mode(inode, &inode->i_mode,
+						      &acl);
+			if (error)
+				goto unlock;
+		}
 		error = __reiserfs_set_acl(&th, inode, type, acl);
+unlock:
 		reiserfs_write_lock(inode->i_sb);
 		error2 = journal_end(&th);
 		reiserfs_write_unlock(inode->i_sb);
@@ -241,11 +248,6 @@
 	switch (type) {
 	case ACL_TYPE_ACCESS:
 		name = XATTR_NAME_POSIX_ACL_ACCESS;
-		if (acl) {
-			error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
-			if (error)
-				return error;
-		}
 		break;
 	case ACL_TYPE_DEFAULT:
 		name = XATTR_NAME_POSIX_ACL_DEFAULT;
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index d48da41..683b492 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -773,13 +773,9 @@
 	 * afterwards in the other cases: we fsstack_copy_inode_size from
 	 * the lower level.
 	 */
-	if (current->mm)
-		down_write(&current->mm->mmap_sem);
 	if (ia->ia_valid & ATTR_SIZE) {
 		err = inode_newsize_ok(&tmp, ia->ia_size);
 		if (err) {
-			if (current->mm)
-				up_write(&current->mm->mmap_sem);
 			goto out;
 		}
 		truncate_setsize(inode, ia->ia_size);
@@ -802,8 +798,6 @@
 	err = notify_change2(lower_mnt, lower_dentry, &lower_ia, /* note: lower_ia */
 			NULL);
 	inode_unlock(d_inode(lower_dentry));
-	if (current->mm)
-		up_write(&current->mm->mmap_sem);
 	if (err)
 		goto out;
 
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 91bc76dc..7d764e3 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -576,7 +576,7 @@
 	/* Make sure to also account for extended attributes */
 	len += host_ui->data_len;
 
-	dent = kmalloc(len, GFP_NOFS);
+	dent = kzalloc(len, GFP_NOFS);
 	if (!dent)
 		return -ENOMEM;
 
@@ -952,7 +952,7 @@
 	if (twoparents)
 		len += plen;
 
-	dent1 = kmalloc(len, GFP_NOFS);
+	dent1 = kzalloc(len, GFP_NOFS);
 	if (!dent1)
 		return -ENOMEM;
 
@@ -1102,7 +1102,7 @@
 	len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
 	if (move)
 		len += plen;
-	dent = kmalloc(len, GFP_NOFS);
+	dent = kzalloc(len, GFP_NOFS);
 	if (!dent)
 		return -ENOMEM;
 
@@ -1466,7 +1466,7 @@
 	hlen = host_ui->data_len + UBIFS_INO_NODE_SZ;
 	len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8);
 
-	xent = kmalloc(len, GFP_NOFS);
+	xent = kzalloc(len, GFP_NOFS);
 	if (!xent)
 		return -ENOMEM;
 
@@ -1573,7 +1573,7 @@
 	aligned_len1 = ALIGN(len1, 8);
 	aligned_len = aligned_len1 + ALIGN(len2, 8);
 
-	ino = kmalloc(aligned_len, GFP_NOFS);
+	ino = kzalloc(aligned_len, GFP_NOFS);
 	if (!ino)
 		return -ENOMEM;
 
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 129b18a..0359435 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1243,8 +1243,8 @@
 			return err;
 		}
 set_size:
-		truncate_setsize(inode, newsize);
 		up_write(&iinfo->i_data_sem);
+		truncate_setsize(inode, newsize);
 	} else {
 		if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
 			down_write(&iinfo->i_data_sem);
@@ -1261,9 +1261,9 @@
 					  udf_get_block);
 		if (err)
 			return err;
+		truncate_setsize(inode, newsize);
 		down_write(&iinfo->i_data_sem);
 		udf_clear_extent_cache(inode);
-		truncate_setsize(inode, newsize);
 		udf_truncate_extents(inode);
 		up_write(&iinfo->i_data_sem);
 	}
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index b468e04..7034e17 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -170,8 +170,8 @@
 	return acl;
 }
 
-STATIC int
-__xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
+int
+__xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 {
 	struct xfs_inode *ip = XFS_I(inode);
 	unsigned char *ea_name;
@@ -268,5 +268,5 @@
 	}
 
  set_acl:
-	return __xfs_set_acl(inode, type, acl);
+	return __xfs_set_acl(inode, acl, type);
 }
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 286fa89..0432731 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -24,6 +24,7 @@
 #ifdef CONFIG_XFS_POSIX_ACL
 extern struct posix_acl *xfs_get_acl(struct inode *inode, int type);
 extern int xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+extern int __xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
 #else
 static inline struct posix_acl *xfs_get_acl(struct inode *inode, int type)
 {
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index f5e0f60..a1247c3 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -190,12 +190,12 @@
 
 #ifdef CONFIG_XFS_POSIX_ACL
 	if (default_acl) {
-		error = xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+		error = __xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
 		if (error)
 			goto out_cleanup_inode;
 	}
 	if (acl) {
-		error = xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
+		error = __xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
 		if (error)
 			goto out_cleanup_inode;
 	}
diff --git a/include/dt-bindings/clock/mdss-10nm-pll-clk.h b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
index 75ddcfa..8108c98 100644
--- a/include/dt-bindings/clock/mdss-10nm-pll-clk.h
+++ b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
@@ -17,23 +17,25 @@
 
 /* DSI PLL clocks */
 #define VCO_CLK_0		0
-#define BITCLK_SRC_0_CLK	1
-#define BYTECLK_SRC_0_CLK	2
-#define POST_BIT_DIV_0_CLK	3
-#define POST_VCO_DIV_0_CLK	4
-#define BYTECLK_MUX_0_CLK	5
-#define PCLK_SRC_MUX_0_CLK	6
-#define PCLK_SRC_0_CLK		7
-#define PCLK_MUX_0_CLK		8
-#define VCO_CLK_1		9
-#define BITCLK_SRC_1_CLK	10
-#define BYTECLK_SRC_1_CLK	11
-#define POST_BIT_DIV_1_CLK	12
-#define POST_VCO_DIV_1_CLK	13
-#define BYTECLK_MUX_1_CLK	14
-#define PCLK_SRC_MUX_1_CLK	15
-#define PCLK_SRC_1_CLK		16
-#define PCLK_MUX_1_CLK		17
+#define PLL_OUT_DIV_0_CLK	1
+#define BITCLK_SRC_0_CLK	2
+#define BYTECLK_SRC_0_CLK	3
+#define POST_BIT_DIV_0_CLK	4
+#define POST_VCO_DIV_0_CLK	5
+#define BYTECLK_MUX_0_CLK	6
+#define PCLK_SRC_MUX_0_CLK	7
+#define PCLK_SRC_0_CLK		8
+#define PCLK_MUX_0_CLK		9
+#define VCO_CLK_1		10
+#define PLL_OUT_DIV_1_CLK	11
+#define BITCLK_SRC_1_CLK	12
+#define BYTECLK_SRC_1_CLK	13
+#define POST_BIT_DIV_1_CLK	14
+#define POST_VCO_DIV_1_CLK	15
+#define BYTECLK_MUX_1_CLK	16
+#define PCLK_SRC_MUX_1_CLK	17
+#define PCLK_SRC_1_CLK		18
+#define PCLK_MUX_1_CLK		19
 
 /* DP PLL clocks */
 #define	DP_VCO_CLK	0
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index 3d58645..339d470 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -211,6 +211,7 @@
 #define GCC_VS_CTRL_CLK						193
 #define GCC_VS_CTRL_CLK_SRC					194
 #define GCC_VSENSOR_CLK_SRC					195
+#define GPLL4							196
 
 /* GCC reset clocks */
 #define GCC_MMSS_BCR						0
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 0538291..10842bb 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -266,9 +266,13 @@
 #ifdef CONFIG_OF
 extern struct coresight_platform_data *of_get_coresight_platform_data(
 				struct device *dev, struct device_node *node);
+extern struct coresight_cti_data *of_get_coresight_cti_data(
+				struct device *dev, struct device_node *node);
 #else
 static inline struct coresight_platform_data *of_get_coresight_platform_data(
 	struct device *dev, struct device_node *node) { return NULL; }
+static inlint struct coresight_cti_data *of_get_coresight_cti_data(
+		struct device *dev, struct device_node *node) { return NULL; }
 #endif
 
 #ifdef CONFIG_PID_NS
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 4fbc62e..b49f866 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -251,6 +251,23 @@
 		(cpu) = cpumask_next_zero((cpu), (mask)),	\
 		(cpu) < nr_cpu_ids;)
 
+extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
+
+/**
+ * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the cpumask poiter
+ * @start: the start location
+ *
+ * The implementation does not assume any bit in @mask is set (including @start).
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_wrap(cpu, mask, start)					\
+	for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false);	\
+	     (cpu) < nr_cpumask_bits;						\
+	     (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
+
 /**
  * for_each_cpu_and - iterate over every cpu in both masks
  * @cpu: the (optionally unsigned) integer iterator
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index a80516f..aa2b4e4 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1576,6 +1576,9 @@
 #define WLAN_AUTH_SHARED_KEY 1
 #define WLAN_AUTH_FT 2
 #define WLAN_AUTH_SAE 3
+#define WLAN_AUTH_FILS_SK 4
+#define WLAN_AUTH_FILS_SK_PFS 5
+#define WLAN_AUTH_FILS_PK 6
 #define WLAN_AUTH_LEAP 128
 
 #define WLAN_AUTH_CHALLENGE_LEN 128
@@ -1715,6 +1718,9 @@
 	WLAN_STATUS_REJECT_DSE_BAND = 96,
 	WLAN_STATUS_DENIED_WITH_SUGGESTED_BAND_AND_CHANNEL = 99,
 	WLAN_STATUS_DENIED_DUE_TO_SPECTRUM_MANAGEMENT = 103,
+	/* 802.11ai */
+	WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108,
+	WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109,
 };
 
 
@@ -2073,6 +2079,15 @@
 #define IEEE80211_GCMP_MIC_LEN		16
 #define IEEE80211_GCMP_PN_LEN		6
 
+#define FILS_NONCE_LEN			16
+#define FILS_MAX_KEK_LEN		64
+
+#define FILS_ERP_MAX_USERNAME_LEN	16
+#define FILS_ERP_MAX_REALM_LEN		253
+#define FILS_ERP_MAX_RRK_LEN		64
+
+#define PMK_MAX_LEN			48
+
 /* Public action codes */
 enum ieee80211_pub_actioncode {
 	WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4,
@@ -2296,31 +2311,37 @@
 };
 
 
-/* cipher suite selectors */
-#define WLAN_CIPHER_SUITE_USE_GROUP	0x000FAC00
-#define WLAN_CIPHER_SUITE_WEP40		0x000FAC01
-#define WLAN_CIPHER_SUITE_TKIP		0x000FAC02
-/* reserved: 				0x000FAC03 */
-#define WLAN_CIPHER_SUITE_CCMP		0x000FAC04
-#define WLAN_CIPHER_SUITE_WEP104	0x000FAC05
-#define WLAN_CIPHER_SUITE_AES_CMAC	0x000FAC06
-#define WLAN_CIPHER_SUITE_GCMP		0x000FAC08
-#define WLAN_CIPHER_SUITE_GCMP_256	0x000FAC09
-#define WLAN_CIPHER_SUITE_CCMP_256	0x000FAC0A
-#define WLAN_CIPHER_SUITE_BIP_GMAC_128	0x000FAC0B
-#define WLAN_CIPHER_SUITE_BIP_GMAC_256	0x000FAC0C
-#define WLAN_CIPHER_SUITE_BIP_CMAC_256	0x000FAC0D
+#define SUITE(oui, id)	(((oui) << 8) | (id))
 
-#define WLAN_CIPHER_SUITE_SMS4		0x00147201
+/* cipher suite selectors */
+#define WLAN_CIPHER_SUITE_USE_GROUP	SUITE(0x000FAC, 0)
+#define WLAN_CIPHER_SUITE_WEP40		SUITE(0x000FAC, 1)
+#define WLAN_CIPHER_SUITE_TKIP		SUITE(0x000FAC, 2)
+/* reserved:				SUITE(0x000FAC, 3) */
+#define WLAN_CIPHER_SUITE_CCMP		SUITE(0x000FAC, 4)
+#define WLAN_CIPHER_SUITE_WEP104	SUITE(0x000FAC, 5)
+#define WLAN_CIPHER_SUITE_AES_CMAC	SUITE(0x000FAC, 6)
+#define WLAN_CIPHER_SUITE_GCMP		SUITE(0x000FAC, 8)
+#define WLAN_CIPHER_SUITE_GCMP_256	SUITE(0x000FAC, 9)
+#define WLAN_CIPHER_SUITE_CCMP_256	SUITE(0x000FAC, 10)
+#define WLAN_CIPHER_SUITE_BIP_GMAC_128	SUITE(0x000FAC, 11)
+#define WLAN_CIPHER_SUITE_BIP_GMAC_256	SUITE(0x000FAC, 12)
+#define WLAN_CIPHER_SUITE_BIP_CMAC_256	SUITE(0x000FAC, 13)
+
+#define WLAN_CIPHER_SUITE_SMS4		SUITE(0x001472, 1)
 
 /* AKM suite selectors */
-#define WLAN_AKM_SUITE_8021X		0x000FAC01
-#define WLAN_AKM_SUITE_PSK		0x000FAC02
-#define WLAN_AKM_SUITE_8021X_SHA256	0x000FAC05
-#define WLAN_AKM_SUITE_PSK_SHA256	0x000FAC06
-#define WLAN_AKM_SUITE_TDLS		0x000FAC07
-#define WLAN_AKM_SUITE_SAE		0x000FAC08
-#define WLAN_AKM_SUITE_FT_OVER_SAE	0x000FAC09
+#define WLAN_AKM_SUITE_8021X		SUITE(0x000FAC, 1)
+#define WLAN_AKM_SUITE_PSK		SUITE(0x000FAC, 2)
+#define WLAN_AKM_SUITE_8021X_SHA256	SUITE(0x000FAC, 5)
+#define WLAN_AKM_SUITE_PSK_SHA256	SUITE(0x000FAC, 6)
+#define WLAN_AKM_SUITE_TDLS		SUITE(0x000FAC, 7)
+#define WLAN_AKM_SUITE_SAE		SUITE(0x000FAC, 8)
+#define WLAN_AKM_SUITE_FT_OVER_SAE	SUITE(0x000FAC, 9)
+#define WLAN_AKM_SUITE_FILS_SHA256	SUITE(0x000FAC, 14)
+#define WLAN_AKM_SUITE_FILS_SHA384	SUITE(0x000FAC, 15)
+#define WLAN_AKM_SUITE_FT_FILS_SHA256	SUITE(0x000FAC, 16)
+#define WLAN_AKM_SUITE_FT_FILS_SHA384	SUITE(0x000FAC, 17)
 
 #define WLAN_MAX_KEY_LEN		32
 
diff --git a/include/linux/ipa_odu_bridge.h b/include/linux/ipa_odu_bridge.h
index 5d30a97..e7f75b7 100644
--- a/include/linux/ipa_odu_bridge.h
+++ b/include/linux/ipa_odu_bridge.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -39,7 +39,85 @@
 	u32 ipa_desc_size;
 };
 
-#if defined CONFIG_IPA || defined CONFIG_IPA3
+/**
+ * struct ipa_bridge_init_params - parameters for IPA bridge initialization API
+ *
+ * @info: structure contains initialization information
+ * @wakeup_request: callback to client to indicate there is downlink data
+ *	available. Client is expected to call ipa_bridge_resume() to start
+ *	receiving data
+ */
+struct ipa_bridge_init_params {
+	struct odu_bridge_params info;
+	void (*wakeup_request)(void *);
+};
+
+#ifdef CONFIG_IPA3
+
+int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl);
+
+int ipa_bridge_connect(u32 hdl);
+
+int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth);
+
+int ipa_bridge_disconnect(u32 hdl);
+
+int ipa_bridge_suspend(u32 hdl);
+
+int ipa_bridge_resume(u32 hdl);
+
+int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb,
+	struct ipa_tx_meta *metadata);
+
+int ipa_bridge_cleanup(u32 hdl);
+
+#else
+
+static inline int ipa_bridge_init(struct odu_bridge_params *params, u32 *hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_connect(u32 hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_disconnect(u32 hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_suspend(u32 hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_resume(u32 hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb,
+struct ipa_tx_meta *metadata)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_cleanup(u32 hdl)
+{
+	return -EPERM;
+}
+
+#endif /* CONFIG_IPA3 */
+
+/* Below API is deprecated. Please use the API above */
+# if defined CONFIG_IPA || defined CONFIG_IPA3
 
 int odu_bridge_init(struct odu_bridge_params *params);
 
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
index 2a663c6..b01d294 100644
--- a/include/linux/libfdt_env.h
+++ b/include/linux/libfdt_env.h
@@ -1,6 +1,7 @@
 #ifndef _LIBFDT_ENV_H
 #define _LIBFDT_ENV_H
 
+#include <linux/kernel.h>
 #include <linux/string.h>
 
 #include <asm/byteorder.h>
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index cb0ba9f..fa7fd03 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -44,6 +44,7 @@
 	/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
 	struct list_lru_memcg	*memcg_lrus;
 #endif
+	long nr_items;
 } ____cacheline_aligned_in_smp;
 
 struct list_lru {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index e1a903a..6a620e0 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -788,6 +788,7 @@
 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
 void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
+void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
 int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
 			struct mlx5_buf *buf, int node);
 int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7627c76..0737cb6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -183,6 +183,7 @@
 				     unsigned int *max_nr,
 				     unsigned int *big_max_nr);
 extern unsigned int sched_get_cpu_util(int cpu);
+extern u64 sched_get_cpu_last_busy_time(int cpu);
 #else
 static inline void sched_update_nr_prod(int cpu, long delta, bool inc)
 {
@@ -196,6 +197,10 @@
 {
 	return 0;
 }
+static inline u64 sched_get_cpu_last_busy_time(int cpu)
+{
+	return 0;
+}
 #endif
 
 extern void calc_global_load(unsigned long ticks);
@@ -3882,6 +3887,7 @@
 #define SCHED_CPUFREQ_IOWAIT	(1U << 2)
 #define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3)
 #define SCHED_CPUFREQ_WALT (1U << 4)
+#define SCHED_CPUFREQ_PL	(1U << 5)
 
 #define SCHED_CPUFREQ_RT_DL	(SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
 
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 322bc23..3597d55 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -24,13 +24,9 @@
 extern unsigned int sysctl_sched_cstate_aware;
 extern unsigned int sysctl_sched_capacity_margin;
 extern unsigned int sysctl_sched_capacity_margin_down;
-#ifdef CONFIG_SCHED_WALT
-extern unsigned int sysctl_sched_use_walt_cpu_util;
-extern unsigned int sysctl_sched_use_walt_task_util;
-extern unsigned int sysctl_sched_init_task_load_pct;
-#endif
 
 #ifdef CONFIG_SCHED_WALT
+extern unsigned int sysctl_sched_init_task_load_pct;
 extern unsigned int sysctl_sched_cpu_high_irqload;
 extern unsigned int sysctl_sched_use_walt_cpu_util;
 extern unsigned int sysctl_sched_use_walt_task_util;
@@ -72,9 +68,6 @@
 extern int sched_migrate_notify_proc_handler(struct ctl_table *table,
 		int write, void __user *buffer, size_t *lenp, loff_t *ppos);
 
-extern int sched_hmp_proc_update_handler(struct ctl_table *table,
-		int write, void __user *buffer, size_t *lenp, loff_t *ppos);
-
 extern int sched_boost_handler(struct ctl_table *table, int write,
 			void __user *buffer, size_t *lenp, loff_t *ppos);
 
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 1f39661..0f9fff3 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -333,6 +333,8 @@
 	struct usb_ssp_cap_descriptor	*ssp_cap;
 	struct usb_ss_container_id_descriptor	*ss_id;
 	struct usb_ptm_cap_descriptor	*ptm_cap;
+	struct usb_config_summary_descriptor	*config_summary;
+	unsigned int	num_config_summary_desc;
 };
 
 int __usb_get_extra_descriptor(char *buffer, unsigned size,
diff --git a/include/linux/usb/audio-v3.h b/include/linux/usb/audio-v3.h
index f2322f3..b6cc17b 100644
--- a/include/linux/usb/audio-v3.h
+++ b/include/linux/usb/audio-v3.h
@@ -50,7 +50,8 @@
 #define CLUSTER_ID_MONO		0x0001
 #define CLUSTER_ID_STEREO	0x0002
 
-#define FULL_ADC_PROFILE	0x01
+/* A.2 audio function subclass codes */
+#define FULL_ADC_3_0		0x01
 
 /* BADD Profile IDs */
 #define PROF_GENERIC_IO		0x20
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 0ecae0b..ed46675 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -88,6 +88,8 @@
  */
 extern struct vfio_group *vfio_group_get_external_user(struct file *filep);
 extern void vfio_group_put_external_user(struct vfio_group *group);
+extern bool vfio_external_group_match_file(struct vfio_group *group,
+					   struct file *filep);
 extern int vfio_external_user_iommu_id(struct vfio_group *group);
 extern long vfio_external_check_extension(struct vfio_group *group,
 					  unsigned long arg);
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 826eef8..f113e0e 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -84,6 +84,7 @@
 			const void *caller);
 
 extern void vfree(const void *addr);
+extern void vfree_atomic(const void *addr);
 
 extern void *vmap(struct page **pages, unsigned int count,
 			unsigned long flags, pgprot_t prot);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 45dbffb..b99b80a 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -24,6 +24,27 @@
 #include <linux/net.h>
 #include <net/regulatory.h>
 
+/* backport support for new cfg80211 ops "update_connect_params" */
+#define CFG80211_UPDATE_CONNECT_PARAMS 1
+
+/**
+ *  backport support for NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA
+ *  and NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED.
+ */
+#define CFG80211_RAND_TA_FOR_PUBLIC_ACTION_FRAME 1
+
+/* backport support for NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI */
+#define CFG80211_REPORT_BETTER_BSS_IN_SCHED_SCAN 1
+
+/* backport support for specifying reason for connect timeout */
+#define CFG80211_CONNECT_TIMEOUT_REASON_CODE 1
+
+/* Indicate backport support for the new connect done api */
+#define CFG80211_CONNECT_DONE 1
+
+/* Indicate backport support for FILS SK offload in cfg80211 */
+#define CFG80211_FILS_SK_OFFLOAD_SUPPORT 1
+
 /**
  * DOC: Introduction
  *
@@ -784,19 +805,15 @@
  * @iftype_num: array with the number of interfaces of each interface
  *	type.  The index is the interface type as specified in &enum
  *	nl80211_iftype.
- * @beacon_int_gcd: a value specifying GCD of all beaconing interfaces,
- *	the GCD of a single value is considered the value itself, so for
- *	a single interface this should be set to that interface's beacon
- *	interval
- * @beacon_int_different: a flag indicating whether or not all beacon
- *	intervals (of beaconing interfaces) are different or not.
+ * @new_beacon_int: set this to the beacon interval of a new interface
+ *	that's not operating yet, if such is to be checked as part of
+ *	the verification
  */
 struct iface_combination_params {
 	int num_different_channels;
 	u8 radar_detect;
 	int iftype_num[NUM_NL80211_IFTYPES];
-	u32 beacon_int_gcd;
-	bool beacon_int_different;
+	u32 new_beacon_int;
 };
 
 /**
@@ -1596,6 +1613,17 @@
 };
 
 /**
+ * struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment.
+ *
+ * @band: band of BSS which should match for RSSI level adjustment.
+ * @delta: value of RSSI level adjustment.
+ */
+struct cfg80211_bss_select_adjust {
+	enum nl80211_band band;
+	s8 delta;
+};
+
+/**
  * struct cfg80211_sched_scan_request - scheduled scan request description
  *
  * @ssids: SSIDs to scan for (passed in the probe_reqs in active scans)
@@ -1630,6 +1658,16 @@
  *	cycle.  The driver may ignore this parameter and start
  *	immediately (or at any other time), if this feature is not
  *	supported.
+ * @relative_rssi_set: Indicates whether @relative_rssi is set or not.
+ * @relative_rssi: Relative RSSI threshold in dB to restrict scan result
+ *	reporting in connected state to cases where a matching BSS is determined
+ *	to have better or slightly worse RSSI than the current connected BSS.
+ *	The relative RSSI threshold values are ignored in disconnected state.
+ * @rssi_adjust: delta dB of RSSI preference to be given to the BSSs that belong
+ *	to the specified band while deciding whether a better BSS is reported
+ *	using @relative_rssi. If delta is a negative number, the BSSs that
+ *	belong to the specified band will be penalized by delta dB in relative
+ *	comparisions.
  */
 struct cfg80211_sched_scan_request {
 	struct cfg80211_ssid *ssids;
@@ -1649,6 +1687,10 @@
 	u8 mac_addr[ETH_ALEN] __aligned(2);
 	u8 mac_addr_mask[ETH_ALEN] __aligned(2);
 
+	bool relative_rssi_set;
+	s8 relative_rssi;
+	struct cfg80211_bss_select_adjust rssi_adjust;
+
 	/* internal */
 	struct wiphy *wiphy;
 	struct net_device *dev;
@@ -1789,9 +1831,11 @@
  * @key_len: length of WEP key for shared key authentication
  * @key_idx: index of WEP key for shared key authentication
  * @key: WEP key for shared key authentication
- * @sae_data: Non-IE data to use with SAE or %NULL. This starts with
- *	Authentication transaction sequence number field.
- * @sae_data_len: Length of sae_data buffer in octets
+ * @auth_data: Fields and elements in Authentication frames. This contains
+ *	the authentication frame body (non-IE and IE data), excluding the
+ *	Authentication algorithm number, i.e., starting at the Authentication
+ *	transaction sequence number field.
+ * @auth_data_len: Length of auth_data buffer in octets
  */
 struct cfg80211_auth_request {
 	struct cfg80211_bss *bss;
@@ -1800,8 +1844,8 @@
 	enum nl80211_auth_type auth_type;
 	const u8 *key;
 	u8 key_len, key_idx;
-	const u8 *sae_data;
-	size_t sae_data_len;
+	const u8 *auth_data;
+	size_t auth_data_len;
 };
 
 /**
@@ -1842,6 +1886,12 @@
  * @ht_capa_mask:  The bits of ht_capa which are to be used.
  * @vht_capa: VHT capability override
  * @vht_capa_mask: VHT capability mask indicating which fields to use
+ * @fils_kek: FILS KEK for protecting (Re)Association Request/Response frame or
+ *	%NULL if FILS is not used.
+ * @fils_kek_len: Length of fils_kek in octets
+ * @fils_nonces: FILS nonces (part of AAD) for protecting (Re)Association
+ *	Request/Response frame or %NULL if FILS is not used. This field starts
+ *	with 16 octets of STA Nonce followed by 16 octets of AP Nonce.
  */
 struct cfg80211_assoc_request {
 	struct cfg80211_bss *bss;
@@ -1853,6 +1903,9 @@
 	struct ieee80211_ht_cap ht_capa;
 	struct ieee80211_ht_cap ht_capa_mask;
 	struct ieee80211_vht_cap vht_capa, vht_capa_mask;
+	const u8 *fils_kek;
+	size_t fils_kek_len;
+	const u8 *fils_nonces;
 };
 
 /**
@@ -1946,17 +1999,6 @@
 };
 
 /**
- * struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment.
- *
- * @band: band of BSS which should match for RSSI level adjustment.
- * @delta: value of RSSI level adjustment.
- */
-struct cfg80211_bss_select_adjust {
-	enum nl80211_band band;
-	s8 delta;
-};
-
-/**
  * struct cfg80211_bss_selection - connection parameters for BSS selection.
  *
  * @behaviour: requested BSS selection behaviour.
@@ -2016,6 +2058,19 @@
  *	the BSSID of the current association, i.e., to the value that is
  *	included in the Current AP address field of the Reassociation Request
  *	frame.
+ * @fils_erp_username: EAP re-authentication protocol (ERP) username part of the
+ *	NAI or %NULL if not specified. This is used to construct FILS wrapped
+ *	data IE.
+ * @fils_erp_username_len: Length of @fils_erp_username in octets.
+ * @fils_erp_realm: EAP re-authentication protocol (ERP) realm part of NAI or
+ *	%NULL if not specified. This specifies the domain name of ER server and
+ *	is used to construct FILS wrapped data IE.
+ * @fils_erp_realm_len: Length of @fils_erp_realm in octets.
+ * @fils_erp_next_seq_num: The next sequence number to use in the FILS ERP
+ *	messages. This is also used to construct FILS wrapped data IE.
+ * @fils_erp_rrk: ERP re-authentication Root Key (rRK) used to derive additional
+ *	keys in FILS or %NULL if not specified.
+ * @fils_erp_rrk_len: Length of @fils_erp_rrk in octets.
  */
 struct cfg80211_connect_params {
 	struct ieee80211_channel *channel;
@@ -2041,6 +2096,25 @@
 	bool pbss;
 	struct cfg80211_bss_selection bss_select;
 	const u8 *prev_bssid;
+	const u8 *fils_erp_username;
+	size_t fils_erp_username_len;
+	const u8 *fils_erp_realm;
+	size_t fils_erp_realm_len;
+	u16 fils_erp_next_seq_num;
+	const u8 *fils_erp_rrk;
+	size_t fils_erp_rrk_len;
+};
+
+/**
+ * enum cfg80211_connect_params_changed - Connection parameters being updated
+ *
+ * This enum provides information of all connect parameters that
+ * have to be updated as part of update_connect_params() call.
+ *
+ * @UPDATE_ASSOC_IES: Indicates whether association request IEs are updated
+ */
+enum cfg80211_connect_params_changed {
+	UPDATE_ASSOC_IES		= BIT(0),
 };
 
 /**
@@ -2067,12 +2141,27 @@
  * This structure is passed to the set/del_pmksa() method for PMKSA
  * caching.
  *
- * @bssid: The AP's BSSID.
- * @pmkid: The PMK material itself.
+ * @bssid: The AP's BSSID (may be %NULL).
+ * @pmkid: The identifier to refer a PMKSA.
+ * @pmk: The PMK for the PMKSA identified by @pmkid. This is used for key
+ *	derivation by a FILS STA. Otherwise, %NULL.
+ * @pmk_len: Length of the @pmk. The length of @pmk can differ depending on
+ *	the hash algorithm used to generate this.
+ * @ssid: SSID to specify the ESS within which a PMKSA is valid when using FILS
+ *	cache identifier (may be %NULL).
+ * @ssid_len: Length of the @ssid in octets.
+ * @cache_id: 2-octet cache identifier advertized by a FILS AP identifying the
+ *	scope of PMKSA. This is valid only if @ssid_len is non-zero (may be
+ *	%NULL).
  */
 struct cfg80211_pmksa {
 	const u8 *bssid;
 	const u8 *pmkid;
+	const u8 *pmk;
+	size_t pmk_len;
+	const u8 *ssid;
+	size_t ssid_len;
+	const u8 *cache_id;
 };
 
 /**
@@ -2564,10 +2653,17 @@
  *	cases, the result of roaming is indicated with a call to
  *	cfg80211_roamed() or cfg80211_roamed_bss().
  *	(invoked with the wireless_dev mutex held)
+ * @update_connect_params: Update the connect parameters while connected to a
+ *	BSS. The updated parameters can be used by driver/firmware for
+ *	subsequent BSS selection (roaming) decisions and to form the
+ *	Authentication/(Re)Association Request frames. This call does not
+ *	request an immediate disassociation or reassociation with the current
+ *	BSS, i.e., this impacts only subsequent (re)associations. The bits in
+ *	changed are defined in &enum cfg80211_connect_params_changed.
+ *	(invoked with the wireless_dev mutex held)
  * @disconnect: Disconnect from the BSS/ESS. Once done, call
  *	cfg80211_disconnected().
  *	(invoked with the wireless_dev mutex held)
- *
  * @join_ibss: Join the specified IBSS (or create if necessary). Once done, call
  *	cfg80211_ibss_joined(), also call that function when changing BSSID due
  *	to a merge.
@@ -2734,6 +2830,8 @@
  * @nan_change_conf: changes NAN configuration. The changed parameters must
  *	be specified in @changes (using &enum cfg80211_nan_conf_changes);
  *	All other parameters must be ignored.
+ *
+ * @set_multicast_to_unicast: configure multicast to unicast conversion for BSS
  */
 struct cfg80211_ops {
 	int	(*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -2848,6 +2946,10 @@
 
 	int	(*connect)(struct wiphy *wiphy, struct net_device *dev,
 			   struct cfg80211_connect_params *sme);
+	int	(*update_connect_params)(struct wiphy *wiphy,
+					 struct net_device *dev,
+					 struct cfg80211_connect_params *sme,
+					 u32 changed);
 	int	(*disconnect)(struct wiphy *wiphy, struct net_device *dev,
 			      u16 reason_code);
 
@@ -3010,6 +3112,10 @@
 				   struct wireless_dev *wdev,
 				   struct cfg80211_nan_conf *conf,
 				   u32 changes);
+
+	int	(*set_multicast_to_unicast)(struct wiphy *wiphy,
+					    struct net_device *dev,
+					    const bool enabled);
 };
 
 /*
@@ -4999,6 +5105,78 @@
 #endif
 
 /**
+ * struct cfg80211_connect_resp_params - Connection response params
+ * @status: Status code, %WLAN_STATUS_SUCCESS for successful connection, use
+ *	%WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
+ *	the real status code for failures. If this call is used to report a
+ *	failure due to a timeout (e.g., not receiving an Authentication frame
+ *	from the AP) instead of an explicit rejection by the AP, -1 is used to
+ *	indicate that this is a failure, but without a status code.
+ *	@timeout_reason is used to report the reason for the timeout in that
+ *	case.
+ * @bssid: The BSSID of the AP (may be %NULL)
+ * @bss: Entry of bss to which STA got connected to, can be obtained through
+ *	cfg80211_get_bss() (may be %NULL). Only one parameter among @bssid and
+ *	@bss needs to be specified.
+ * @req_ie: Association request IEs (may be %NULL)
+ * @req_ie_len: Association request IEs length
+ * @resp_ie: Association response IEs (may be %NULL)
+ * @resp_ie_len: Association response IEs length
+ * @fils_kek: KEK derived from a successful FILS connection (may be %NULL)
+ * @fils_kek_len: Length of @fils_kek in octets
+ * @update_erp_next_seq_num: Boolean value to specify whether the value in
+ *	@fils_erp_next_seq_num is valid.
+ * @fils_erp_next_seq_num: The next sequence number to use in ERP message in
+ *	FILS Authentication. This value should be specified irrespective of the
+ *	status for a FILS connection.
+ * @pmk: A new PMK if derived from a successful FILS connection (may be %NULL).
+ * @pmk_len: Length of @pmk in octets
+ * @pmkid: A new PMKID if derived from a successful FILS connection or the PMKID
+ *	used for this FILS connection (may be %NULL).
+ * @timeout_reason: Reason for connection timeout. This is used when the
+ *	connection fails due to a timeout instead of an explicit rejection from
+ *	the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is
+ *	not known. This value is used only if @status < 0 to indicate that the
+ *	failure is due to a timeout and not due to explicit rejection by the AP.
+ *	This value is ignored in other cases (@status >= 0).
+ */
+struct cfg80211_connect_resp_params {
+	int status;
+	const u8 *bssid;
+	struct cfg80211_bss *bss;
+	const u8 *req_ie;
+	size_t req_ie_len;
+	const u8 *resp_ie;
+	size_t resp_ie_len;
+	const u8 *fils_kek;
+	size_t fils_kek_len;
+	bool update_erp_next_seq_num;
+	u16 fils_erp_next_seq_num;
+	const u8 *pmk;
+	size_t pmk_len;
+	const u8 *pmkid;
+	enum nl80211_timeout_reason timeout_reason;
+};
+
+/**
+ * cfg80211_connect_done - notify cfg80211 of connection result
+ *
+ * @dev: network device
+ * @params: connection response parameters
+ * @gfp: allocation flags
+ *
+ * It should be called by the underlying driver once execution of the connection
+ * request from connect() has been completed. This is similar to
+ * cfg80211_connect_bss(), but takes a structure pointer for connection response
+ * parameters. Only one of the functions among cfg80211_connect_bss(),
+ * cfg80211_connect_result(), cfg80211_connect_timeout(),
+ * and cfg80211_connect_done() should be called.
+ */
+void cfg80211_connect_done(struct net_device *dev,
+			   struct cfg80211_connect_resp_params *params,
+			   gfp_t gfp);
+
+/**
  * cfg80211_connect_bss - notify cfg80211 of connection result
  *
  * @dev: network device
@@ -5009,20 +5187,50 @@
  * @req_ie_len: association request IEs length
  * @resp_ie: association response IEs (may be %NULL)
  * @resp_ie_len: assoc response IEs length
- * @status: status code, 0 for successful connection, use
- *      %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
- *      the real status code for failures.
+ * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use
+ *	%WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
+ *	the real status code for failures. If this call is used to report a
+ *	failure due to a timeout (e.g., not receiving an Authentication frame
+ *	from the AP) instead of an explicit rejection by the AP, -1 is used to
+ *	indicate that this is a failure, but without a status code.
+ *	@timeout_reason is used to report the reason for the timeout in that
+ *	case.
  * @gfp: allocation flags
+ * @timeout_reason: reason for connection timeout. This is used when the
+ *	connection fails due to a timeout instead of an explicit rejection from
+ *	the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is
+ *	not known. This value is used only if @status < 0 to indicate that the
+ *	failure is due to a timeout and not due to explicit rejection by the AP.
+ *	This value is ignored in other cases (@status >= 0).
  *
- * It should be called by the underlying driver whenever connect() has
- * succeeded. This is similar to cfg80211_connect_result(), but with the
- * option of identifying the exact bss entry for the connection. Only one of
- * these functions should be called.
+ * It should be called by the underlying driver once execution of the connection
+ * request from connect() has been completed. This is similar to
+ * cfg80211_connect_result(), but with the option of identifying the exact bss
+ * entry for the connection. Only one of the functions among
+ * cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
  */
-void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
-			  struct cfg80211_bss *bss, const u8 *req_ie,
-			  size_t req_ie_len, const u8 *resp_ie,
-			  size_t resp_ie_len, int status, gfp_t gfp);
+static inline void
+cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
+		     struct cfg80211_bss *bss, const u8 *req_ie,
+		     size_t req_ie_len, const u8 *resp_ie,
+		     size_t resp_ie_len, int status, gfp_t gfp,
+		     enum nl80211_timeout_reason timeout_reason)
+{
+	struct cfg80211_connect_resp_params params;
+
+	memset(&params, 0, sizeof(params));
+	params.status = status;
+	params.bssid = bssid;
+	params.bss = bss;
+	params.req_ie = req_ie;
+	params.req_ie_len = req_ie_len;
+	params.resp_ie = resp_ie;
+	params.resp_ie_len = resp_ie_len;
+	params.timeout_reason = timeout_reason;
+
+	cfg80211_connect_done(dev, &params, gfp);
+}
 
 /**
  * cfg80211_connect_result - notify cfg80211 of connection result
@@ -5033,13 +5241,16 @@
  * @req_ie_len: association request IEs length
  * @resp_ie: association response IEs (may be %NULL)
  * @resp_ie_len: assoc response IEs length
- * @status: status code, 0 for successful connection, use
+ * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use
  *	%WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
  *	the real status code for failures.
  * @gfp: allocation flags
  *
- * It should be called by the underlying driver whenever connect() has
- * succeeded.
+ * It should be called by the underlying driver once execution of the connection
+ * request from connect() has been completed. This is similar to
+ * cfg80211_connect_bss() which allows the exact bss entry to be specified. Only
+ * one of the functions among cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
  */
 static inline void
 cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
@@ -5048,7 +5259,8 @@
 			u16 status, gfp_t gfp)
 {
 	cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, resp_ie,
-			     resp_ie_len, status, gfp);
+			     resp_ie_len, status, gfp,
+			     NL80211_TIMEOUT_UNSPECIFIED);
 }
 
 /**
@@ -5059,19 +5271,23 @@
  * @req_ie: association request IEs (maybe be %NULL)
  * @req_ie_len: association request IEs length
  * @gfp: allocation flags
+ * @timeout_reason: reason for connection timeout.
  *
  * It should be called by the underlying driver whenever connect() has failed
  * in a sequence where no explicit authentication/association rejection was
  * received from the AP. This could happen, e.g., due to not being able to send
  * out the Authentication or Association Request frame or timing out while
- * waiting for the response.
+ * waiting for the response. Only one of the functions among
+ * cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
  */
 static inline void
 cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid,
-			 const u8 *req_ie, size_t req_ie_len, gfp_t gfp)
+			 const u8 *req_ie, size_t req_ie_len, gfp_t gfp,
+			 enum nl80211_timeout_reason timeout_reason)
 {
 	cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, NULL, 0, -1,
-			     gfp);
+			     gfp, timeout_reason);
 }
 
 /**
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index f5e625f..4341731 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -22,6 +22,7 @@
 #include <net/flow.h>
 #include <net/ip6_fib.h>
 #include <net/sock.h>
+#include <net/lwtunnel.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/route.h>
@@ -233,4 +234,11 @@
 		return daddr;
 }
 
+static inline bool rt6_duplicate_nexthop(struct rt6_info *a, struct rt6_info *b)
+{
+	return a->dst.dev == b->dst.dev &&
+	       a->rt6i_idev == b->rt6i_idev &&
+	       ipv6_addr_equal(&a->rt6i_gateway, &b->rt6i_gateway) &&
+	       !lwtunnel_cmp_encap(a->dst.lwtstate, b->dst.lwtstate);
+}
 #endif
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 308adc4..9fce47e 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -221,9 +221,17 @@
 	bool			no_share;
 };
 
+struct vxlan_dev_node {
+	struct hlist_node hlist;
+	struct vxlan_dev *vxlan;
+};
+
 /* Pseudo network device */
 struct vxlan_dev {
-	struct hlist_node hlist;	/* vni hash table */
+	struct vxlan_dev_node hlist4;	/* vni hash table for IPv4 socket */
+#if IS_ENABLED(CONFIG_IPV6)
+	struct vxlan_dev_node hlist6;	/* vni hash table for IPv6 socket */
+#endif
 	struct list_head  next;		/* vxlan's per namespace list */
 	struct vxlan_sock __rcu *vn4_sock;	/* listening socket for IPv4 */
 #if IS_ENABLED(CONFIG_IPV6)
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 9d249f6..69fa6b3 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -254,6 +254,7 @@
 	STARGET_CREATED = 1,
 	STARGET_RUNNING,
 	STARGET_REMOVE,
+	STARGET_CREATED_REMOVE,
 	STARGET_DEL,
 };
 
diff --git a/include/soc/qcom/glink.h b/include/soc/qcom/glink.h
index 7b86481..4522b11 100644
--- a/include/soc/qcom/glink.h
+++ b/include/soc/qcom/glink.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -340,6 +340,22 @@
  */
 unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size);
 
+/**
+ * glink_start_rx_rt() - Vote for RT thread priority on RX.
+ * @handle:	Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_start_rx_rt(void *handle);
+
+/**
+ * glink_end_rx_rt() - Vote for RT thread priority on RX.
+ * @handle:	Channel handle for which transaction are occurring.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_end_rx_rt(void *handle);
+
 #else /* CONFIG_MSM_GLINK */
 static inline void *glink_open(const struct glink_open_config *cfg_ptr)
 {
@@ -428,5 +444,16 @@
 {
 	return 0;
 }
+
+static inline int glink_start_rx_rt(void *handle)
+{
+	return -ENODEV;
+}
+
+static inline int glink_end_rx_rt(void *handle)
+{
+	return -ENODEV;
+}
+
 #endif /* CONFIG_MSM_GLINK */
 #endif /* _SOC_QCOM_GLINK_H_ */
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
index 665708d..12fa374 100644
--- a/include/soc/qcom/secure_buffer.h
+++ b/include/soc/qcom/secure_buffer.h
@@ -50,8 +50,6 @@
 #define PERM_EXEC			0x1
 
 #ifdef CONFIG_QCOM_SECURE_BUFFER
-int msm_secure_table(struct sg_table *table);
-int msm_unsecure_table(struct sg_table *table);
 int hyp_assign_table(struct sg_table *table,
 			u32 *source_vm_list, int source_nelems,
 			int *dest_vmids, int *dest_perms,
@@ -59,17 +57,8 @@
 extern int hyp_assign_phys(phys_addr_t addr, u64 size,
 			u32 *source_vmlist, int source_nelems,
 			int *dest_vmids, int *dest_perms, int dest_nelems);
-bool msm_secure_v2_is_supported(void);
 const char *msm_secure_vmid_to_string(int secure_vmid);
 #else
-static inline int msm_secure_table(struct sg_table *table)
-{
-	return -EINVAL;
-}
-static inline int msm_unsecure_table(struct sg_table *table)
-{
-	return -EINVAL;
-}
 static inline int hyp_assign_table(struct sg_table *table,
 			u32 *source_vm_list, int source_nelems,
 			int *dest_vmids, int *dest_perms,
@@ -85,10 +74,6 @@
 	return -EINVAL;
 }
 
-static inline bool msm_secure_v2_is_supported(void)
-{
-	return false;
-}
 static inline const char *msm_secure_vmid_to_string(int secure_vmid)
 {
 	return "N/A";
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index dc404e4..f196d40 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -233,6 +233,7 @@
 uint32_t socinfo_get_version(void);
 uint32_t socinfo_get_raw_id(void);
 char *socinfo_get_build_id(void);
+char *socinfo_get_id_string(void);
 uint32_t socinfo_get_platform_type(void);
 uint32_t socinfo_get_platform_subtype(void);
 uint32_t socinfo_get_platform_version(void);
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 4ac24f5..33b2e75 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -64,6 +64,14 @@
 #define TA_DEFAULT_FABRIC_PROT_TYPE	0
 /* TPG status needs to be enabled to return sendtargets discovery endpoint info */
 #define TA_DEFAULT_TPG_ENABLED_SENDTARGETS 1
+/*
+ * Used to control the sending of keys with optional to respond state bit,
+ * as a workaround for non RFC compliant initiators,that do not propose,
+ * nor respond to specific keys required for login to complete.
+ *
+ * See iscsi_check_proposer_for_optional_reply() for more details.
+ */
+#define TA_DEFAULT_LOGIN_KEYS_WORKAROUND 1
 
 #define ISCSI_IOV_DATA_BUFFER		5
 
@@ -766,6 +774,7 @@
 	u8			t10_pi;
 	u32			fabric_prot_type;
 	u32			tpg_enabled_sendtargets;
+	u32			login_keys_workaround;
 	struct iscsi_portal_group *tpg;
 };
 
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index bbcb3d5..cf3f5e3 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -71,10 +71,8 @@
 		__field(unsigned long,	cpu_load		)
 		__field(unsigned int,	rt_nr_running		)
 		__field(unsigned int,	cpus_allowed		)
-#ifdef CONFIG_SCHED_WALT
 		__field(unsigned int,	demand			)
 		__field(unsigned int,	pred_demand		)
-#endif
 	),
 
 	TP_fast_assign(
@@ -87,24 +85,17 @@
 		__entry->cpu_load	= task_rq(p)->cpu_load[0];
 		__entry->rt_nr_running	= task_rq(p)->rt.rt_nr_running;
 		__entry->cpus_allowed	= cpus_allowed;
-#ifdef CONFIG_SCHED_WALT
-		__entry->demand		= p->ravg.demand;
-		__entry->pred_demand	= p->ravg.pred_demand;
-#endif
+		__entry->demand		= task_load(p);
+		__entry->pred_demand	= task_pl(p);
 	),
 
-	TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x"
-#ifdef CONFIG_SCHED_WALT
-			" demand=%u pred_demand=%u"
-#endif
-			, __entry->cpu,
+	TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x demand=%u pred_demand=%u",
+			__entry->cpu,
 			__entry->enqueue ? "enqueue" : "dequeue",
 			__entry->comm, __entry->pid,
 			__entry->prio, __entry->nr_running,
 			__entry->cpu_load, __entry->rt_nr_running, __entry->cpus_allowed
-#ifdef CONFIG_SCHED_WALT
 			, __entry->demand, __entry->pred_demand
-#endif
 			)
 );
 
@@ -254,7 +245,7 @@
 		__entry->pred_demand     = p->ravg.pred_demand;
 		memcpy(__entry->hist, p->ravg.sum_history,
 					RAVG_HIST_SIZE_MAX * sizeof(u32));
-		__entry->nr_big_tasks   = rq->hmp_stats.nr_big_tasks;
+		__entry->nr_big_tasks   = rq->walt_stats.nr_big_tasks;
 		__entry->cpu            = rq->cpu;
 	),
 
@@ -572,10 +563,10 @@
 		__entry->cpu			= rq->cpu;
 		__entry->idle			= idle;
 		__entry->nr_running		= rq->nr_running;
-		__entry->nr_big_tasks		= rq->hmp_stats.nr_big_tasks;
+		__entry->nr_big_tasks		= rq->walt_stats.nr_big_tasks;
 		__entry->load_scale_factor	= cpu_load_scale_factor(rq->cpu);
 		__entry->capacity		= cpu_capacity(rq->cpu);
-		__entry->cumulative_runnable_avg = rq->hmp_stats.cumulative_runnable_avg;
+		__entry->cumulative_runnable_avg = rq->walt_stats.cumulative_runnable_avg;
 		__entry->irqload		= irqload;
 		__entry->max_freq		= cpu_max_freq(rq->cpu);
 		__entry->power_cost		= power_cost;
@@ -627,7 +618,7 @@
 		__entry->grp_rq_ps	= rq->grp_time.prev_runnable_sum;
 		__entry->nt_ps		= rq->nt_prev_runnable_sum;
 		__entry->grp_nt_ps	= rq->grp_time.nt_prev_runnable_sum;
-		__entry->pl		= rq->hmp_stats.pred_demands_sum;
+		__entry->pl		= rq->walt_stats.pred_demands_sum;
 		__entry->load		= load;
 	),
 
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 7668b57..5539933 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -37,9 +37,56 @@
 	BINDER_TYPE_PTR		= B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
 };
 
-enum {
+/**
+ * enum flat_binder_object_shifts: shift values for flat_binder_object_flags
+ * @FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT: shift for getting scheduler policy.
+ *
+ */
+enum flat_binder_object_shifts {
+	FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT = 9,
+};
+
+/**
+ * enum flat_binder_object_flags - flags for use in flat_binder_object.flags
+ */
+enum flat_binder_object_flags {
+	/**
+	 * @FLAT_BINDER_FLAG_PRIORITY_MASK: bit-mask for min scheduler priority
+	 *
+	 * These bits can be used to set the minimum scheduler priority
+	 * at which transactions into this node should run. Valid values
+	 * in these bits depend on the scheduler policy encoded in
+	 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK.
+	 *
+	 * For SCHED_NORMAL/SCHED_BATCH, the valid range is between [-20..19]
+	 * For SCHED_FIFO/SCHED_RR, the value can run between [1..99]
+	 */
 	FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+	/**
+	 * @FLAT_BINDER_FLAG_ACCEPTS_FDS: whether the node accepts fds.
+	 */
 	FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+	/**
+	 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK: bit-mask for scheduling policy
+	 *
+	 * These two bits can be used to set the min scheduling policy at which
+	 * transactions on this node should run. These match the UAPI
+	 * scheduler policy values, eg:
+	 * 00b: SCHED_NORMAL
+	 * 01b: SCHED_FIFO
+	 * 10b: SCHED_RR
+	 * 11b: SCHED_BATCH
+	 */
+	FLAT_BINDER_FLAG_SCHED_POLICY_MASK =
+		3U << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT,
+
+	/**
+	 * @FLAT_BINDER_FLAG_INHERIT_RT: whether the node inherits RT policy
+	 *
+	 * Only when set, calls into this node will inherit a real-time
+	 * scheduling policy from the caller (for synchronous transactions).
+	 */
+	FLAT_BINDER_FLAG_INHERIT_RT = 0x800,
 };
 
 #ifdef BINDER_IPC_32BIT
@@ -186,6 +233,19 @@
 #define BINDER_CURRENT_PROTOCOL_VERSION 8
 #endif
 
+/*
+ * Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields.
+ * Set ptr to NULL for the first call to get the info for the first node, and
+ * then repeat the call passing the previously returned value to get the next
+ * nodes.  ptr will be 0 when there are no more nodes.
+ */
+struct binder_node_debug_info {
+	binder_uintptr_t ptr;
+	binder_uintptr_t cookie;
+	__u32            has_strong_ref;
+	__u32            has_weak_ref;
+};
+
 #define BINDER_WRITE_READ		_IOWR('b', 1, struct binder_write_read)
 #define BINDER_SET_IDLE_TIMEOUT		_IOW('b', 3, __s64)
 #define BINDER_SET_MAX_THREADS		_IOW('b', 5, __u32)
@@ -193,6 +253,7 @@
 #define BINDER_SET_CONTEXT_MGR		_IOW('b', 7, __s32)
 #define BINDER_THREAD_EXIT		_IOW('b', 8, __s32)
 #define BINDER_VERSION			_IOWR('b', 9, struct binder_version)
+#define BINDER_GET_NODE_DEBUG_INFO	_IOWR('b', 11, struct binder_node_debug_info)
 
 /*
  * NOTE: Two special error codes you should check for when calling
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 0932378..e645f17 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -266,6 +266,7 @@
 #define FUSE_PARALLEL_DIROPS    (1 << 18)
 #define FUSE_HANDLE_KILLPRIV	(1 << 19)
 #define FUSE_POSIX_ACL		(1 << 20)
+#define FUSE_PASSTHROUGH	(1 << 21)
 
 /**
  * CUSE INIT request/reply flags
@@ -498,7 +499,7 @@
 struct fuse_open_out {
 	uint64_t	fh;
 	uint32_t	open_flags;
-	uint32_t	padding;
+	int32_t   passthrough_fd;
 };
 
 struct fuse_release_in {
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 7d1e3b2..8c0fc7b 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -173,6 +173,42 @@
  */
 
 /**
+ * DOC: FILS shared key authentication offload
+ *
+ * FILS shared key authentication offload can be advertized by drivers by
+ * setting @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD flag. The drivers that support
+ * FILS shared key authentication offload should be able to construct the
+ * authentication and association frames for FILS shared key authentication and
+ * eventually do a key derivation as per IEEE 802.11ai. The below additional
+ * parameters should be given to driver in %NL80211_CMD_CONNECT.
+ *	%NL80211_ATTR_FILS_ERP_USERNAME - used to construct keyname_nai
+ *	%NL80211_ATTR_FILS_ERP_REALM - used to construct keyname_nai
+ *	%NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used to construct erp message
+ *	%NL80211_ATTR_FILS_ERP_RRK - used to generate the rIK and rMSK
+ * rIK should be used to generate an authentication tag on the ERP message and
+ * rMSK should be used to derive a PMKSA.
+ * rIK, rMSK should be generated and keyname_nai, sequence number should be used
+ * as specified in IETF RFC 6696.
+ *
+ * When FILS shared key authentication is completed, driver needs to provide the
+ * below additional parameters to userspace.
+ *	%NL80211_ATTR_FILS_KEK - used for key renewal
+ *	%NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used in further EAP-RP exchanges
+ *	%NL80211_ATTR_PMKID - used to identify the PMKSA used/generated
+ *	%Nl80211_ATTR_PMK - used to update PMKSA cache in userspace
+ * The PMKSA can be maintained in userspace persistently so that it can be used
+ * later after reboots or wifi turn off/on also.
+ *
+ * %NL80211_ATTR_FILS_CACHE_ID is the cache identifier advertized by a FILS
+ * capable AP supporting PMK caching. It specifies the scope within which the
+ * PMKSAs are cached in an ESS. %NL80211_CMD_SET_PMKSA and
+ * %NL80211_CMD_DEL_PMKSA are enhanced to allow support for PMKSA caching based
+ * on FILS cache identifier. Additionally %NL80211_ATTR_PMK is used with
+ * %NL80211_SET_PMKSA to specify the PMK corresponding to a PMKSA for driver to
+ * use in a FILS shared key connection with PMKSA caching.
+ */
+
+/**
  * enum nl80211_commands - supported nl80211 commands
  *
  * @NL80211_CMD_UNSPEC: unspecified command to catch errors
@@ -370,10 +406,18 @@
  * @NL80211_CMD_NEW_SURVEY_RESULTS: survey data notification (as a reply to
  *	NL80211_CMD_GET_SURVEY and on the "scan" multicast group)
  *
- * @NL80211_CMD_SET_PMKSA: Add a PMKSA cache entry, using %NL80211_ATTR_MAC
- *	(for the BSSID) and %NL80211_ATTR_PMKID.
+ * @NL80211_CMD_SET_PMKSA: Add a PMKSA cache entry using %NL80211_ATTR_MAC
+ *	(for the BSSID), %NL80211_ATTR_PMKID, and optionally %NL80211_ATTR_PMK
+ *	(PMK is used for PTKSA derivation in case of FILS shared key offload) or
+ *	using %NL80211_ATTR_SSID, %NL80211_ATTR_FILS_CACHE_ID,
+ *	%NL80211_ATTR_PMKID, and %NL80211_ATTR_PMK in case of FILS
+ *	authentication where %NL80211_ATTR_FILS_CACHE_ID is the identifier
+ *	advertized by a FILS capable AP identifying the scope of PMKSA in an
+ *	ESS.
  * @NL80211_CMD_DEL_PMKSA: Delete a PMKSA cache entry, using %NL80211_ATTR_MAC
- *	(for the BSSID) and %NL80211_ATTR_PMKID.
+ *	(for the BSSID) and %NL80211_ATTR_PMKID or using %NL80211_ATTR_SSID,
+ *	%NL80211_ATTR_FILS_CACHE_ID, and %NL80211_ATTR_PMKID in case of FILS
+ *	authentication.
  * @NL80211_CMD_FLUSH_PMKSA: Flush all PMKSA cache entries.
  *
  * @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain
@@ -600,6 +644,20 @@
  *
  * @NL80211_CMD_SET_WDS_PEER: Set the MAC address of the peer on a WDS interface.
  *
+ * @NL80211_CMD_SET_MULTICAST_TO_UNICAST: Configure if this AP should perform
+ *	multicast to unicast conversion. When enabled, all multicast packets
+ *	with ethertype ARP, IPv4 or IPv6 (possibly within an 802.1Q header)
+ *	will be sent out to each station once with the destination (multicast)
+ *	MAC address replaced by the station's MAC address. Note that this may
+ *	break certain expectations of the receiver, e.g. the ability to drop
+ *	unicast IP packets encapsulated in multicast L2 frames, or the ability
+ *	to not send destination unreachable messages in such cases.
+ *	This can only be toggled per BSS. Configure this on an interface of
+ *	type %NL80211_IFTYPE_AP. It applies to all its VLAN interfaces
+ *	(%NL80211_IFTYPE_AP_VLAN), except for those in 4addr (WDS) mode.
+ *	If %NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED is not present with this
+ *	command, the feature is disabled.
+ *
  * @NL80211_CMD_JOIN_MESH: Join a mesh. The mesh ID must be given, and initial
  *	mesh config parameters may be given.
  * @NL80211_CMD_LEAVE_MESH: Leave the mesh network -- no special arguments, the
@@ -874,6 +932,12 @@
  *	This will contain a %NL80211_ATTR_NAN_MATCH nested attribute and
  *	%NL80211_ATTR_COOKIE.
  *
+ * @NL80211_CMD_UPDATE_CONNECT_PARAMS: Update one or more connect parameters
+ *	for subsequent roaming cases if the driver or firmware uses internal
+ *	BSS selection. This command can be issued only while connected and it
+ *	does not result in a change for the current association. Currently,
+ *	only the %NL80211_ATTR_IE data is used and updated with this command.
+ *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
  */
@@ -1069,6 +1133,10 @@
 	NL80211_CMD_CHANGE_NAN_CONFIG,
 	NL80211_CMD_NAN_MATCH,
 
+	NL80211_CMD_SET_MULTICAST_TO_UNICAST,
+
+	NL80211_CMD_UPDATE_CONNECT_PARAMS,
+
 	/* add new commands above here */
 
 	/* used to define NL80211_CMD_MAX below */
@@ -1638,8 +1706,16 @@
  *	the connection request from a station. nl80211_connect_failed_reason
  *	enum has different reasons of connection failure.
  *
- * @NL80211_ATTR_SAE_DATA: SAE elements in Authentication frames. This starts
- *	with the Authentication transaction sequence number field.
+ * @NL80211_ATTR_AUTH_DATA: Fields and elements in Authentication frames.
+ *	This contains the authentication frame body (non-IE and IE data),
+ *	excluding the Authentication algorithm number, i.e., starting at the
+ *	Authentication transaction sequence number field. It is used with
+ *	authentication algorithms that need special fields to be added into
+ *	the frames (SAE and FILS). Currently, only the SAE cases use the
+ *	initial two fields (Authentication transaction sequence number and
+ *	Status code). However, those fields are included in the attribute data
+ *	for all authentication algorithms to keep the attribute definition
+ *	consistent.
  *
  * @NL80211_ATTR_VHT_CAPABILITY: VHT Capability information element (from
  *	association request when used with NL80211_CMD_NEW_STATION)
@@ -1936,10 +2012,61 @@
  *	attribute.
  * @NL80211_ATTR_NAN_MATCH: used to report a match. This is a nested attribute.
  *	See &enum nl80211_nan_match_attributes.
+ * @NL80211_ATTR_FILS_KEK: KEK for FILS (Re)Association Request/Response frame
+ *	protection.
+ * @NL80211_ATTR_FILS_NONCES: Nonces (part of AAD) for FILS (Re)Association
+ *	Request/Response frame protection. This attribute contains the 16 octet
+ *	STA Nonce followed by 16 octets of AP Nonce.
+ *
+ * @NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED: Indicates whether or not multicast
+ *	packets should be send out as unicast to all stations (flag attribute).
  *
  * @NL80211_ATTR_BSSID: The BSSID of the AP. Note that %NL80211_ATTR_MAC is also
  *	used in various commands/events for specifying the BSSID.
  *
+ * @NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI: Relative RSSI threshold by which
+ *	other BSSs has to be better or slightly worse than the current
+ *	connected BSS so that they get reported to user space.
+ *	This will give an opportunity to userspace to consider connecting to
+ *	other matching BSSs which have better or slightly worse RSSI than
+ *	the current connected BSS by using an offloaded operation to avoid
+ *	unnecessary wakeups.
+ *
+ * @NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST: When present the RSSI level for BSSs in
+ *	the specified band is to be adjusted before doing
+ *	%NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI based comparision to figure out
+ *	better BSSs. The attribute value is a packed structure
+ *	value as specified by &struct nl80211_bss_select_rssi_adjust.
+ *
+ * @NL80211_ATTR_TIMEOUT_REASON: The reason for which an operation timed out.
+ *	u32 attribute with an &enum nl80211_timeout_reason value. This is used,
+ *	e.g., with %NL80211_CMD_CONNECT event.
+ *
+ * @NL80211_ATTR_FILS_ERP_USERNAME: EAP Re-authentication Protocol (ERP)
+ *	username part of NAI used to refer keys rRK and rIK. This is used with
+ *	%NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_ERP_REALM: EAP Re-authentication Protocol (ERP) realm part
+ *	of NAI specifying the domain name of the ER server. This is used with
+ *	%NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM: Unsigned 16-bit ERP next sequence number
+ *	to use in ERP messages. This is used in generating the FILS wrapped data
+ *	for FILS authentication and is used with %NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_ERP_RRK: ERP re-authentication Root Key (rRK) for the
+ *	NAI specified by %NL80211_ATTR_FILS_ERP_USERNAME and
+ *	%NL80211_ATTR_FILS_ERP_REALM. This is used for generating rIK and rMSK
+ *	from successful FILS authentication and is used with
+ *	%NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_CACHE_ID: A 2-octet identifier advertized by a FILS AP
+ *	identifying the scope of PMKSAs. This is used with
+ *	@NL80211_CMD_SET_PMKSA and @NL80211_CMD_DEL_PMKSA.
+ *
+ * @NL80211_ATTR_PMK: PMK for the PMKSA identified by %NL80211_ATTR_PMKID.
+ *	This is used with @NL80211_CMD_SET_PMKSA.
+ *
  * @NUM_NL80211_ATTR: total number of nl80211_attrs available
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2198,7 +2325,7 @@
 
 	NL80211_ATTR_CONN_FAILED_REASON,
 
-	NL80211_ATTR_SAE_DATA,
+	NL80211_ATTR_AUTH_DATA,
 
 	NL80211_ATTR_VHT_CAPABILITY,
 
@@ -2339,8 +2466,26 @@
 	NL80211_ATTR_NAN_FUNC,
 	NL80211_ATTR_NAN_MATCH,
 
+	NL80211_ATTR_FILS_KEK,
+	NL80211_ATTR_FILS_NONCES,
+
+	NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED,
+
 	NL80211_ATTR_BSSID,
 
+	NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI,
+	NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST,
+
+	NL80211_ATTR_TIMEOUT_REASON,
+
+	NL80211_ATTR_FILS_ERP_USERNAME,
+	NL80211_ATTR_FILS_ERP_REALM,
+	NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM,
+	NL80211_ATTR_FILS_ERP_RRK,
+	NL80211_ATTR_FILS_CACHE_ID,
+
+	NL80211_ATTR_PMK,
+
 	/* add attributes here, update the policy in nl80211.c */
 
 	__NL80211_ATTR_AFTER_LAST,
@@ -2352,6 +2497,7 @@
 #define NL80211_ATTR_SCAN_GENERATION NL80211_ATTR_GENERATION
 #define	NL80211_ATTR_MESH_PARAMS NL80211_ATTR_MESH_CONFIG
 #define NL80211_ATTR_IFACE_SOCKET_OWNER NL80211_ATTR_SOCKET_OWNER
+#define NL80211_ATTR_SAE_DATA NL80211_ATTR_AUTH_DATA
 
 /*
  * Allow user space programs to use #ifdef on new attributes by defining them
@@ -3032,6 +3178,13 @@
  *	how this API was implemented in the past. Also, due to the same problem,
  *	the only way to create a matchset with only an RSSI filter (with this
  *	attribute) is if there's only a single matchset with the RSSI attribute.
+ * @NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI: Flag indicating whether
+ *	%NL80211_SCHED_SCAN_MATCH_ATTR_RSSI to be used as absolute RSSI or
+ *	relative to current bss's RSSI.
+ * @NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST: When present the RSSI level for
+ *	BSS-es in the specified band is to be adjusted before doing
+ *	RSSI-based BSS selection. The attribute value is a packed structure
+ *	value as specified by &struct nl80211_bss_select_rssi_adjust.
  * @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter
  *	attribute number currently defined
  * @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use
@@ -3041,6 +3194,8 @@
 
 	NL80211_SCHED_SCAN_MATCH_ATTR_SSID,
 	NL80211_SCHED_SCAN_MATCH_ATTR_RSSI,
+	NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI,
+	NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST,
 
 	/* keep last */
 	__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST,
@@ -3665,6 +3820,9 @@
  * @NL80211_AUTHTYPE_FT: Fast BSS Transition (IEEE 802.11r)
  * @NL80211_AUTHTYPE_NETWORK_EAP: Network EAP (some Cisco APs and mainly LEAP)
  * @NL80211_AUTHTYPE_SAE: Simultaneous authentication of equals
+ * @NL80211_AUTHTYPE_FILS_SK: Fast Initial Link Setup shared key
+ * @NL80211_AUTHTYPE_FILS_SK_PFS: Fast Initial Link Setup shared key with PFS
+ * @NL80211_AUTHTYPE_FILS_PK: Fast Initial Link Setup public key
  * @__NL80211_AUTHTYPE_NUM: internal
  * @NL80211_AUTHTYPE_MAX: maximum valid auth algorithm
  * @NL80211_AUTHTYPE_AUTOMATIC: determine automatically (if necessary by
@@ -3677,6 +3835,9 @@
 	NL80211_AUTHTYPE_FT,
 	NL80211_AUTHTYPE_NETWORK_EAP,
 	NL80211_AUTHTYPE_SAE,
+	NL80211_AUTHTYPE_FILS_SK,
+	NL80211_AUTHTYPE_FILS_SK_PFS,
+	NL80211_AUTHTYPE_FILS_PK,
 
 	/* keep last */
 	__NL80211_AUTHTYPE_NUM,
@@ -4643,6 +4804,20 @@
  *	configuration (AP/mesh) with HT rates.
  * @NL80211_EXT_FEATURE_BEACON_RATE_VHT: Driver supports beacon rate
  *	configuration (AP/mesh) with VHT rates.
+ * @NL80211_EXT_FEATURE_FILS_STA: This driver supports Fast Initial Link Setup
+ *	with user space SME (NL80211_CMD_AUTHENTICATE) in station mode.
+ * @NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA: This driver supports randomized TA
+ *	in @NL80211_CMD_FRAME while not associated.
+ * @NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED: This driver supports
+ *	randomized TA in @NL80211_CMD_FRAME while associated.
+ * @NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI: The driver supports sched_scan
+ *	for reporting BSSs with better RSSI than the current connected BSS
+ *	(%NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI).
+ * @NL80211_EXT_FEATURE_CQM_RSSI_LIST: With this driver the
+ *	%NL80211_ATTR_CQM_RSSI_THOLD attribute accepts a list of zero or more
+ *	RSSI threshold values to monitor rather than exactly one threshold.
+ * @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD: Driver SME supports FILS shared key
+ *	authentication with %NL80211_CMD_CONNECT.
  *
  * @NUM_NL80211_EXT_FEATURES: number of extended features.
  * @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -4657,6 +4832,12 @@
 	NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
 	NL80211_EXT_FEATURE_BEACON_RATE_HT,
 	NL80211_EXT_FEATURE_BEACON_RATE_VHT,
+	NL80211_EXT_FEATURE_FILS_STA,
+	NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA,
+	NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED,
+	NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI,
+	NL80211_EXT_FEATURE_CQM_RSSI_LIST,
+	NL80211_EXT_FEATURE_FILS_SK_OFFLOAD,
 
 	/* add new features before the definition below */
 	NUM_NL80211_EXT_FEATURES,
@@ -4696,6 +4877,21 @@
 };
 
 /**
+ * enum nl80211_timeout_reason - timeout reasons
+ *
+ * @NL80211_TIMEOUT_UNSPECIFIED: Timeout reason unspecified.
+ * @NL80211_TIMEOUT_SCAN: Scan (AP discovery) timed out.
+ * @NL80211_TIMEOUT_AUTH: Authentication timed out.
+ * @NL80211_TIMEOUT_ASSOC: Association timed out.
+ */
+enum nl80211_timeout_reason {
+	NL80211_TIMEOUT_UNSPECIFIED,
+	NL80211_TIMEOUT_SCAN,
+	NL80211_TIMEOUT_AUTH,
+	NL80211_TIMEOUT_ASSOC,
+};
+
+/**
  * enum nl80211_scan_flags -  scan request control flags
  *
  * Scan request control flags are used to control the handling
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index a8acc24..0e5ce0d 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -1051,6 +1051,30 @@
  */
 #define USB_DT_USB_SSP_CAP_SIZE(ssac)	(16 + ssac * 4)
 
+/*
+ * Configuration Summary descriptors: Defines a list of functions in the
+ * configuration. This descriptor may be used by Host software to decide
+ * which Configuration to use to obtain the desired functionality.
+ */
+#define	USB_CAP_TYPE_CONFIG_SUMMARY	0x10
+
+struct function_class_info {
+	__u8 bClass;
+	__u8 bSubClass;
+	__u8 bProtocol;
+};
+
+struct usb_config_summary_descriptor {
+	__u8 bLength;
+	__u8 bDescriptorType;
+	__u8 bDevCapabilityType;
+	__u16 bcdVersion;
+	__u8 bConfigurationValue;
+	__u8 bMaxPower;
+	__u8 bNumFunctions;
+	struct function_class_info cs_info[];
+} __attribute__((packed));
+
 /*-------------------------------------------------------------------------*/
 
 /* USB_DT_WIRELESS_ENDPOINT_COMP:  companion descriptor associated with
diff --git a/include/uapi/media/Kbuild b/include/uapi/media/Kbuild
index 98844ac..4ded0a4 100644
--- a/include/uapi/media/Kbuild
+++ b/include/uapi/media/Kbuild
@@ -4,6 +4,7 @@
 header-y += cam_isp.h
 header-y += cam_isp_vfe.h
 header-y += cam_isp_ife.h
+header-y += cam_jpeg.h
 header-y += cam_req_mgr.h
 header-y += cam_sensor.h
 header-y += cam_sync.h
diff --git a/include/uapi/media/cam_jpeg.h b/include/uapi/media/cam_jpeg.h
new file mode 100644
index 0000000..f3082f3
--- /dev/null
+++ b/include/uapi/media/cam_jpeg.h
@@ -0,0 +1,117 @@
+#ifndef __UAPI_CAM_JPEG_H__
+#define __UAPI_CAM_JPEG_H__
+
+#include "cam_defs.h"
+
+/* enc, dma, cdm(enc/dma) are used in querycap */
+#define CAM_JPEG_DEV_TYPE_ENC      0
+#define CAM_JPEG_DEV_TYPE_DMA      1
+#define CAM_JPEG_DEV_TYPE_MAX      2
+
+#define CAM_JPEG_NUM_DEV_PER_RES_MAX      1
+
+/* definitions needed for jpeg aquire device */
+#define CAM_JPEG_RES_TYPE_ENC        0
+#define CAM_JPEG_RES_TYPE_DMA        1
+#define CAM_JPEG_RES_TYPE_MAX        2
+
+/* packet opcode types */
+#define CAM_JPEG_OPCODE_ENC_UPDATE 0
+#define CAM_JPEG_OPCODE_DMA_UPDATE 1
+
+/* ENC input port resource type */
+#define CAM_JPEG_ENC_INPUT_IMAGE                 0x0
+
+/* ENC output port resource type */
+#define CAM_JPEG_ENC_OUTPUT_IMAGE                0x1
+
+#define CAM_JPEG_ENC_IO_IMAGES_MAX               0x2
+
+/* DMA input port resource type */
+#define CAM_JPEG_DMA_INPUT_IMAGE                 0x0
+
+/* DMA output port resource type */
+#define CAM_JPEG_DMA_OUTPUT_IMAGE                0x1
+
+#define CAM_JPEG_DMA_IO_IMAGES_MAX               0x2
+
+#define CAM_JPEG_IMAGE_MAX                       0x2
+
+/**
+ * struct cam_jpeg_dev_ver - Device information for particular hw type
+ *
+ * This is used to get device version info of JPEG ENC, JPEG DMA
+ * from hardware and use this info in CAM_QUERY_CAP IOCTL
+ *
+ * @size : Size of struct passed
+ * @dev_type: Hardware type for the cap info(jpeg enc, jpeg dma)
+ * @hw_ver: Major, minor and incr values of a device version
+ */
+struct cam_jpeg_dev_ver {
+	uint32_t size;
+	uint32_t dev_type;
+	struct cam_hw_version hw_ver;
+};
+
+/**
+ * struct cam_jpeg_query_cap_cmd - JPEG query device capability payload
+ *
+ * @dev_iommu_handle: Jpeg iommu handles for secure/non secure
+ *      modes
+ * @cdm_iommu_handle: Iommu handles for secure/non secure modes
+ * @num_enc: Number of encoder
+ * @num_dma: Number of dma
+ * @dev_ver: Returned device capability array
+ */
+struct cam_jpeg_query_cap_cmd {
+	struct cam_iommu_handle dev_iommu_handle;
+	struct cam_iommu_handle cdm_iommu_handle;
+	uint32_t num_enc;
+	uint32_t num_dma;
+	struct cam_jpeg_dev_ver dev_ver[CAM_JPEG_DEV_TYPE_MAX];
+};
+
+/**
+ * struct cam_jpeg_res_info - JPEG output resource info
+ *
+ * @format: Format of the resource
+ * @width:  Width in pixels
+ * @height: Height in lines
+ * @fps:  Fps
+ */
+struct cam_jpeg_res_info {
+	uint32_t format;
+	uint32_t width;
+	uint32_t height;
+	uint32_t fps;
+};
+
+/**
+ * struct cam_jpeg_acquire_dev_info - An JPEG device info
+ *
+ * @dev_type: Device type (ENC/DMA)
+ * @reserved: Reserved Bytes
+ * @in_res: In resource info
+ * @in_res: Iut resource info
+ */
+struct cam_jpeg_acquire_dev_info {
+	uint32_t dev_type;
+	uint32_t reserved;
+	struct cam_jpeg_res_info in_res;
+	struct cam_jpeg_res_info out_res;
+};
+
+/**
+ * struct cam_jpeg_config_inout_param_info - JPEG Config time
+ *     input output params
+ *
+ * @clk_index: Input Param- clock selection index.(-1 default)
+ * @output_size: Output Param - jpeg encode/dma output size in
+ *     bytes
+ */
+struct cam_jpeg_config_inout_param_info {
+	int32_t clk_index;
+	int32_t output_size;
+};
+
+#endif /* __UAPI_CAM_JPEG_H__ */
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index e073c5c..23a8ccf 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -22,6 +22,7 @@
 #define CAM_ACTUATOR_DEVICE_TYPE  (CAM_DEVICE_TYPE_BASE + 9)
 #define CAM_CCI_DEVICE_TYPE       (CAM_DEVICE_TYPE_BASE + 10)
 #define CAM_FLASH_DEVICE_TYPE     (CAM_DEVICE_TYPE_BASE + 11)
+#define CAM_EEPROM_DEVICE_TYPE    (CAM_DEVICE_TYPE_BASE + 12)
 
 /* cam_req_mgr hdl info */
 #define CAM_REQ_MGR_HDL_IDX_POS           8
diff --git a/include/uapi/media/cam_sensor.h b/include/uapi/media/cam_sensor.h
index 83f1a02..ac370ba 100644
--- a/include/uapi/media/cam_sensor.h
+++ b/include/uapi/media/cam_sensor.h
@@ -6,7 +6,7 @@
 #include <media/cam_defs.h>
 
 #define CAM_SENSOR_PROBE_CMD   (CAM_COMMON_OPCODE_MAX + 1)
-#define CAM_SENSOR_MAX_LED_TRIGGERS 3
+#define CAM_FLASH_MAX_LED_TRIGGERS 3
 /**
  * struct cam_sensor_query_cap - capabilities info for sensor
  *
@@ -63,6 +63,18 @@
 } __attribute__((packed));
 
 /**
+ * struct cam_eeprom_query_cap_t - capabilities info for eeprom
+ *
+ * @slot_info                  :  Indicates about the slotId or cell Index
+ * @eeprom_kernel_probe        :  Indicates about the kernel or userspace probe
+ */
+struct cam_eeprom_query_cap_t {
+	uint32_t            slot_info;
+	uint16_t            eeprom_kernel_probe;
+	uint16_t            reserved;
+} __attribute__((packed));
+
+/**
  * struct cam_cmd_i2c_info - Contains slave I2C related info
  *
  * @slave_addr      :    Slave address
@@ -360,7 +372,7 @@
 	uint16_t    reserved;
 	uint32_t    led_on_delay_ms;
 	uint32_t    led_off_delay_ms;
-	uint32_t    led_current_ma[CAM_SENSOR_MAX_LED_TRIGGERS];
+	uint32_t    led_current_ma[CAM_FLASH_MAX_LED_TRIGGERS];
 } __attribute__((packed));
 
 /**
@@ -379,7 +391,7 @@
 	uint16_t    count;
 	uint8_t     opcode;
 	uint8_t     cmd_type;
-	uint32_t    led_current_ma[CAM_SENSOR_MAX_LED_TRIGGERS];
+	uint32_t    led_current_ma[CAM_FLASH_MAX_LED_TRIGGERS];
 } __attribute__((packed));
 
 /**
@@ -409,9 +421,9 @@
  */
 struct cam_flash_query_cap_info {
 	uint32_t    slot_info;
-	uint32_t    max_current_flash[CAM_SENSOR_MAX_LED_TRIGGERS];
-	uint32_t    max_duration_flash[CAM_SENSOR_MAX_LED_TRIGGERS];
-	uint32_t    max_current_torch[CAM_SENSOR_MAX_LED_TRIGGERS];
+	uint32_t    max_current_flash[CAM_FLASH_MAX_LED_TRIGGERS];
+	uint32_t    max_duration_flash[CAM_FLASH_MAX_LED_TRIGGERS];
+	uint32_t    max_current_torch[CAM_FLASH_MAX_LED_TRIGGERS];
 } __attribute__ ((packed));
 
 #endif
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 44c17f4..8ce679d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -885,6 +885,11 @@
 	if (err)
 		return err;
 
+	if (is_pointer_value(env, insn->src_reg)) {
+		verbose("R%d leaks addr into mem\n", insn->src_reg);
+		return -EACCES;
+	}
+
 	/* check whether atomic_add can read the memory */
 	err = check_mem_access(env, insn->dst_reg, insn->off,
 			       BPF_SIZE(insn->code), BPF_READ, -1);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e2ac135..2ac75e2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7112,21 +7112,6 @@
 	perf_output_end(&handle);
 }
 
-static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
-{
-	/*
-	 * Due to interrupt latency (AKA "skid"), we may enter the
-	 * kernel before taking an overflow, even if the PMU is only
-	 * counting user events.
-	 * To avoid leaking information to userspace, we must always
-	 * reject kernel samples when exclude_kernel is set.
-	 */
-	if (event->attr.exclude_kernel && !user_mode(regs))
-		return false;
-
-	return true;
-}
-
 /*
  * Generic event overflow handling, sampling.
  */
@@ -7174,12 +7159,6 @@
 	}
 
 	/*
-	 * For security, drop the skid kernel samples if necessary.
-	 */
-	if (!sample_is_allowed(event, regs))
-		return ret;
-
-	/*
 	 * XXX event_limit might not quite work as expected on inherited
 	 * events
 	 */
diff --git a/kernel/extable.c b/kernel/extable.c
index e820cce..4f06fc3 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -66,7 +66,7 @@
 	return 0;
 }
 
-int core_kernel_text(unsigned long addr)
+int notrace core_kernel_text(unsigned long addr)
 {
 	if (addr >= (unsigned long)_stext &&
 	    addr < (unsigned long)_etext)
diff --git a/kernel/fork.c b/kernel/fork.c
index f90327b..52e5505 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1015,8 +1015,7 @@
 
 	mm = get_task_mm(task);
 	if (mm && mm != current->mm &&
-			!ptrace_may_access(task, mode) &&
-			!capable(CAP_SYS_RESOURCE)) {
+			!ptrace_may_access(task, mode)) {
 		mmput(mm);
 		mm = ERR_PTR(-EACCES);
 	}
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 56583e7..e3944c4 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1767,6 +1767,7 @@
 	if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) {
 		/* Prior smp_mb__after_atomic() orders against prior enqueue. */
 		WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
+		smp_mb(); /* ->nocb_leader_sleep before swake_up(). */
 		swake_up(&rdp_leader->nocb_wq);
 	}
 }
@@ -2021,6 +2022,7 @@
 	 * nocb_gp_head, where they await a grace period.
 	 */
 	gotcbs = false;
+	smp_mb(); /* wakeup before ->nocb_head reads. */
 	for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
 		rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head);
 		if (!rdp->nocb_gp_head)
diff --git a/kernel/sched/boost.c b/kernel/sched/boost.c
index 1a3309b..1ccd19d 100644
--- a/kernel/sched/boost.c
+++ b/kernel/sched/boost.c
@@ -32,7 +32,7 @@
 {
 	struct rq *rq = cpu_rq(cpu);
 
-	if (!test_and_set_bit(BOOST_KICK, &rq->hmp_flags))
+	if (!test_and_set_bit(BOOST_KICK, &rq->walt_flags))
 		smp_send_reschedule(cpu);
 }
 
@@ -57,14 +57,14 @@
 	int cpu = smp_processor_id();
 	struct rq *rq = cpu_rq(cpu);
 
-	return test_bit(BOOST_KICK, &rq->hmp_flags);
+	return test_bit(BOOST_KICK, &rq->walt_flags);
 }
 
 void clear_boost_kick(int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
 
-	clear_bit(BOOST_KICK, &rq->hmp_flags);
+	clear_bit(BOOST_KICK, &rq->walt_flags);
 }
 
 /*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 46e4643..d706c96 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1283,6 +1283,8 @@
 #endif
 #endif
 
+	trace_sched_migrate_task(p, new_cpu, task_util(p));
+
 	if (task_cpu(p) != new_cpu) {
 		if (p->sched_class->migrate_task_rq)
 			p->sched_class->migrate_task_rq(p);
@@ -1679,7 +1681,7 @@
 	return cpu;
 }
 
-void update_avg(u64 *avg, u64 sample)
+static void update_avg(u64 *avg, u64 sample)
 {
 	s64 diff = sample - *avg;
 	*avg += diff >> 3;
@@ -2187,9 +2189,6 @@
 		notif_required = true;
 	}
 
-	if (!__task_in_cum_window_demand(cpu_rq(cpu), p))
-		inc_cum_window_demand(cpu_rq(cpu), p, task_load(p));
-
 	note_task_waking(p, wallclock);
 #endif /* CONFIG_SMP */
 
@@ -2199,6 +2198,15 @@
 out:
 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
+	if (success && sched_predl) {
+		raw_spin_lock_irqsave(&cpu_rq(cpu)->lock, flags);
+		if (do_pl_notif(cpu_rq(cpu)))
+			cpufreq_update_util(cpu_rq(cpu),
+					    SCHED_CPUFREQ_WALT |
+					    SCHED_CPUFREQ_PL);
+		raw_spin_unlock_irqrestore(&cpu_rq(cpu)->lock, flags);
+	}
+
 	return success;
 }
 
@@ -2249,8 +2257,6 @@
 
 		update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 		update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
-		if (!__task_in_cum_window_demand(rq, p))
-			inc_cum_window_demand(rq, p, task_load(p));
 		cpufreq_update_util(rq, 0);
 		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
 		note_task_waking(p, wallclock);
@@ -5958,7 +5964,7 @@
 	 */
 	nohz_balance_clear_nohz_mask(cpu);
 
-	clear_hmp_request(cpu);
+	clear_walt_request(cpu);
 	local_irq_enable();
 	return 0;
 }
@@ -6691,6 +6697,9 @@
  * Build an iteration mask that can exclude certain CPUs from the upwards
  * domain traversal.
  *
+ * Only CPUs that can arrive at this group should be considered to continue
+ * balancing.
+ *
  * Asymmetric node setups can result in situations where the domain tree is of
  * unequal depth, make sure to skip domains that already cover the entire
  * range.
@@ -6702,18 +6711,31 @@
  */
 static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
 {
-	const struct cpumask *span = sched_domain_span(sd);
+	const struct cpumask *sg_span = sched_group_cpus(sg);
 	struct sd_data *sdd = sd->private;
 	struct sched_domain *sibling;
 	int i;
 
-	for_each_cpu(i, span) {
+	for_each_cpu(i, sg_span) {
 		sibling = *per_cpu_ptr(sdd->sd, i);
-		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
+
+		/*
+		 * Can happen in the asymmetric case, where these siblings are
+		 * unused. The mask will not be empty because those CPUs that
+		 * do have the top domain _should_ span the domain.
+		 */
+		if (!sibling->child)
+			continue;
+
+		/* If we would not end up here, we can't continue from here */
+		if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
 			continue;
 
 		cpumask_set_cpu(i, sched_group_mask(sg));
 	}
+
+	/* We must not have empty masks here */
+	WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg)));
 }
 
 /*
@@ -6737,7 +6759,7 @@
 
 	cpumask_clear(covered);
 
-	for_each_cpu(i, span) {
+	for_each_cpu_wrap(i, span, cpu) {
 		struct cpumask *sg_span;
 
 		if (cpumask_test_cpu(i, covered))
@@ -8068,7 +8090,7 @@
 	BUG_ON(rq->nr_running != 1);
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
 
-	clear_hmp_request(cpu);
+	clear_walt_request(cpu);
 
 	calc_load_migrate(rq);
 	update_max_interval();
@@ -8078,22 +8100,6 @@
 }
 #endif
 
-#ifdef CONFIG_SCHED_SMT
-DEFINE_STATIC_KEY_FALSE(sched_smt_present);
-
-static void sched_init_smt(void)
-{
-	/*
-	 * We've enumerated all CPUs and will assume that if any CPU
-	 * has SMT siblings, CPU0 will too.
-	 */
-	if (cpumask_weight(cpu_smt_mask(0)) > 1)
-		static_branch_enable(&sched_smt_present);
-}
-#else
-static inline void sched_init_smt(void) { }
-#endif
-
 void __init sched_init_smp(void)
 {
 	cpumask_var_t non_isolated_cpus;
@@ -8125,9 +8131,6 @@
 
 	init_sched_rt_class();
 	init_sched_dl_class();
-
-	sched_init_smt();
-
 	sched_smp_initialized = true;
 }
 
@@ -8308,7 +8311,6 @@
 		rq->avg_idle = 2*sysctl_sched_migration_cost;
 		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
 		rq->push_task = NULL;
-
 		walt_sched_init(rq);
 
 		INIT_LIST_HEAD(&rq->cfs_tasks);
@@ -9578,8 +9580,13 @@
 	wallclock = sched_ktime_clock();
 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 	dequeue_task(rq, p, 0);
+	/*
+	 * task's contribution is already removed from the
+	 * cumulative window demand in dequeue. As the
+	 * task's stats are reset, the next enqueue does
+	 * not change the cumulative window demand.
+	 */
 	reset_task_stats(p);
-	dec_cum_window_demand(rq, p);
 	p->ravg.mark_start = wallclock;
 	p->ravg.sum_history[0] = EXITING_TASK_MARKER;
 	free_task_load_ptrs(p);
@@ -9589,3 +9596,5 @@
 	task_rq_unlock(rq, p, &rf);
 }
 #endif /* CONFIG_SCHED_WALT */
+
+__read_mostly bool sched_predl;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 10a807c..08d4511 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -19,41 +19,6 @@
 
 #include <linux/slab.h>
 
-#ifdef CONFIG_SCHED_WALT
-
-static void
-inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
-{
-	inc_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
-{
-	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-fixup_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p,
-			 u32 new_task_load, u32 new_pred_demand)
-{
-	s64 task_load_delta = (s64)new_task_load - task_load(p);
-	s64 pred_demand_delta = PRED_DEMAND_DELTA;
-
-	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
-				      pred_demand_delta);
-}
-
-#else	/* CONFIG_SCHED_WALT */
-
-static inline void
-inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
-
-static inline void
-dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
-
-#endif	/* CONFIG_SCHED_WALT */
-
 struct dl_bandwidth def_dl_bandwidth;
 
 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
@@ -865,7 +830,7 @@
 	WARN_ON(!dl_prio(prio));
 	dl_rq->dl_nr_running++;
 	add_nr_running(rq_of_dl_rq(dl_rq), 1);
-	inc_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
+	walt_inc_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
 
 	inc_dl_deadline(dl_rq, deadline);
 	inc_dl_migration(dl_se, dl_rq);
@@ -880,7 +845,7 @@
 	WARN_ON(!dl_rq->dl_nr_running);
 	dl_rq->dl_nr_running--;
 	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
-	dec_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
+	walt_dec_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
 
 	dec_dl_deadline(dl_rq, dl_se->deadline);
 	dec_dl_migration(dl_se, dl_rq);
@@ -1845,7 +1810,7 @@
 
 	.update_curr		= update_curr_dl,
 #ifdef CONFIG_SCHED_WALT
-	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_dl,
+	.fixup_walt_sched_stats	= fixup_walt_sched_stats_common,
 #endif
 };
 
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index b520691..0f8c0b2 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -600,12 +600,6 @@
 			cfs_rq->throttle_count);
 	SEQ_printf(m, "  .%-30s: %d\n", "runtime_enabled",
 			cfs_rq->runtime_enabled);
-#ifdef CONFIG_SCHED_WALT
-	SEQ_printf(m, "  .%-30s: %d\n", "nr_big_tasks",
-			cfs_rq->hmp_stats.nr_big_tasks);
-	SEQ_printf(m, "  .%-30s: %llu\n", "cumulative_runnable_avg",
-			cfs_rq->hmp_stats.cumulative_runnable_avg);
-#endif
 #endif
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -706,10 +700,10 @@
 	P(cluster->max_freq);
 	P(cluster->exec_scale_factor);
 #ifdef CONFIG_SCHED_WALT
-	P(hmp_stats.nr_big_tasks);
+	P(walt_stats.nr_big_tasks);
 #endif
-	SEQ_printf(m, "  .%-30s: %llu\n", "hmp_stats.cumulative_runnable_avg",
-			rq->hmp_stats.cumulative_runnable_avg);
+	SEQ_printf(m, "  .%-30s: %llu\n", "walt_stats.cumulative_runnable_avg",
+			rq->walt_stats.cumulative_runnable_avg);
 #endif
 #undef P
 #undef PN
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e0ab4d6..b31522e 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -37,50 +37,39 @@
 #include "walt.h"
 #include <trace/events/sched.h>
 
-/* QHMP/Zone forward declarations */
-
-struct lb_env;
-struct sd_lb_stats;
-struct sg_lb_stats;
-
 #ifdef CONFIG_SCHED_WALT
-static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
-				       u32 new_task_load, u32 new_pred_demand);
-#endif
 
-static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-				 struct task_struct *p, int change_cra) { }
-static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-				 struct task_struct *p, int change_cra) { }
-static inline void dec_throttled_cfs_rq_hmp_stats(
-				struct hmp_sched_stats *stats,
-				struct cfs_rq *cfs_rq) { }
-static inline void inc_throttled_cfs_rq_hmp_stats(
-				struct hmp_sched_stats *stats,
-				struct cfs_rq *cfs_rq) { }
-static inline void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq) { }
-
-#ifdef CONFIG_SMP
-
-static inline int
-bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds)
-{
-	return 0;
-}
-
-static inline bool update_sd_pick_busiest_active_balance(struct lb_env *env,
-						  struct sd_lb_stats *sds,
-						  struct sched_group *sg,
-						  struct sg_lb_stats *sgs)
-{
-	return false;
-}
-#endif /* CONFIG_SMP */
-
-#ifdef CONFIG_SCHED_WALT
 static inline bool task_fits_max(struct task_struct *p, int cpu);
+static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p,
+					u32 new_task_load, u32 new_pred_demand);
+static void walt_fixup_nr_big_tasks(struct rq *rq, struct task_struct *p,
+				    int delta, bool inc);
+#endif /* CONFIG_SCHED_WALT */
+
+#if defined(CONFIG_SCHED_WALT) && defined(CONFIG_CFS_BANDWIDTH)
+
+static void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq);
+static void walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq,
+				  struct task_struct *p);
+static void walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq,
+				  struct task_struct *p);
+static void walt_inc_throttled_cfs_rq_stats(struct walt_sched_stats *stats,
+					    struct cfs_rq *cfs_rq);
+static void walt_dec_throttled_cfs_rq_stats(struct walt_sched_stats *stats,
+					    struct cfs_rq *cfs_rq);
+#else
+static inline void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq) {}
+static inline void
+walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) {}
+static inline void
+walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p) {}
+
+#define walt_inc_throttled_cfs_rq_stats(...)
+#define walt_dec_throttled_cfs_rq_stats(...)
+
 #endif
 
+
 /*
  * Targeted preemption latency for CPU-bound tasks:
  * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
@@ -4047,7 +4036,7 @@
 		if (dequeue)
 			dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
 		qcfs_rq->h_nr_running -= task_delta;
-		dec_throttled_cfs_rq_hmp_stats(&qcfs_rq->hmp_stats, cfs_rq);
+		walt_dec_throttled_cfs_rq_stats(&qcfs_rq->walt_stats, cfs_rq);
 
 		if (qcfs_rq->load.weight)
 			dequeue = 0;
@@ -4055,7 +4044,7 @@
 
 	if (!se) {
 		sub_nr_running(rq, task_delta);
-		dec_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, cfs_rq);
+		walt_dec_throttled_cfs_rq_stats(&rq->walt_stats, cfs_rq);
 	}
 
 	cfs_rq->throttled = 1;
@@ -4077,11 +4066,6 @@
 		start_cfs_bandwidth(cfs_b);
 
 	raw_spin_unlock(&cfs_b->lock);
-
-	/* Log effect on hmp stats after throttling */
-	trace_sched_cpu_load_cgroup(rq, idle_cpu(cpu_of(rq)),
-				sched_irqload(cpu_of(rq)),
-				power_cost(cpu_of(rq), 0));
 }
 
 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
@@ -4119,7 +4103,7 @@
 		if (enqueue)
 			enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
 		cfs_rq->h_nr_running += task_delta;
-		inc_throttled_cfs_rq_hmp_stats(&cfs_rq->hmp_stats, tcfs_rq);
+		walt_inc_throttled_cfs_rq_stats(&cfs_rq->walt_stats, tcfs_rq);
 
 		if (cfs_rq_throttled(cfs_rq))
 			break;
@@ -4127,17 +4111,12 @@
 
 	if (!se) {
 		add_nr_running(rq, task_delta);
-		inc_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, tcfs_rq);
+		walt_inc_throttled_cfs_rq_stats(&rq->walt_stats, tcfs_rq);
 	}
 
 	/* determine whether we need to wake up potentially idle cpu */
 	if (rq->curr == rq->idle && rq->cfs.nr_running)
 		resched_curr(rq);
-
-	/* Log effect on hmp stats after un-throttling */
-	trace_sched_cpu_load_cgroup(rq, idle_cpu(cpu_of(rq)),
-				sched_irqload(cpu_of(rq)),
-				power_cost(cpu_of(rq), 0));
 }
 
 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
@@ -4475,7 +4454,7 @@
 {
 	cfs_rq->runtime_enabled = 0;
 	INIT_LIST_HEAD(&cfs_rq->throttled_list);
-	init_cfs_rq_hmp_stats(cfs_rq);
+	walt_init_cfs_rq_stats(cfs_rq);
 }
 
 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
@@ -4670,6 +4649,9 @@
 	int task_wakeup = flags & ENQUEUE_WAKEUP;
 #endif
 
+#ifdef CONFIG_SCHED_WALT
+	p->misfit = !task_fits_max(p, rq->cpu);
+#endif
 	/*
 	 * If in_iowait is set, the code below may not trigger any cpufreq
 	 * utilization updates, so do it here explicitly with the IOWAIT flag
@@ -4693,7 +4675,7 @@
 		if (cfs_rq_throttled(cfs_rq))
 			break;
 		cfs_rq->h_nr_running++;
-		inc_cfs_rq_hmp_stats(cfs_rq, p, 1);
+		walt_inc_cfs_rq_stats(cfs_rq, p);
 
 		flags = ENQUEUE_WAKEUP;
 	}
@@ -4701,7 +4683,7 @@
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
 		cfs_rq->h_nr_running++;
-		inc_cfs_rq_hmp_stats(cfs_rq, p, 1);
+		walt_inc_cfs_rq_stats(cfs_rq, p);
 
 		if (cfs_rq_throttled(cfs_rq))
 			break;
@@ -4712,10 +4694,7 @@
 
 	if (!se) {
 		add_nr_running(rq, 1);
-#ifdef CONFIG_SCHED_WALT
-		p->misfit = !task_fits_max(p, rq->cpu);
-#endif
-		inc_rq_hmp_stats(rq, p, 1);
+		inc_rq_walt_stats(rq, p);
 	}
 
 #ifdef CONFIG_SMP
@@ -4771,7 +4750,7 @@
 		if (cfs_rq_throttled(cfs_rq))
 			break;
 		cfs_rq->h_nr_running--;
-		dec_cfs_rq_hmp_stats(cfs_rq, p, 1);
+		walt_dec_cfs_rq_stats(cfs_rq, p);
 
 		/* Don't dequeue parent if it has other entities besides us */
 		if (cfs_rq->load.weight) {
@@ -4791,7 +4770,7 @@
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
 		cfs_rq->h_nr_running--;
-		dec_cfs_rq_hmp_stats(cfs_rq, p, 1);
+		walt_dec_cfs_rq_stats(cfs_rq, p);
 
 		if (cfs_rq_throttled(cfs_rq))
 			break;
@@ -4802,7 +4781,7 @@
 
 	if (!se) {
 		sub_nr_running(rq, 1);
-		dec_rq_hmp_stats(rq, p, 1);
+		dec_rq_walt_stats(rq, p);
 	}
 
 #ifdef CONFIG_SMP
@@ -5344,18 +5323,6 @@
 	return DIV_ROUND_UP(util << SCHED_CAPACITY_SHIFT, capacity);
 }
 
-static inline int task_util(struct task_struct *p)
-{
-#ifdef CONFIG_SCHED_WALT
-	if (!walt_disabled && sysctl_sched_use_walt_task_util) {
-		u64 demand = p->ravg.demand;
-
-		return (demand << 10) / sched_ravg_window;
-	}
-#endif
-	return p->se.avg.util_avg;
-}
-
 static inline bool
 bias_to_waker_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target)
 {
@@ -6177,43 +6144,6 @@
 	return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
 }
 
-/*
- * Implement a for_each_cpu() variant that starts the scan at a given cpu
- * (@start), and wraps around.
- *
- * This is used to scan for idle CPUs; such that not all CPUs looking for an
- * idle CPU find the same CPU. The down-side is that tasks tend to cycle
- * through the LLC domain.
- *
- * Especially tbench is found sensitive to this.
- */
-
-static int cpumask_next_wrap(int n, const struct cpumask *mask, int start, int *wrapped)
-{
-	int next;
-
-again:
-	next = find_next_bit(cpumask_bits(mask), nr_cpumask_bits, n+1);
-
-	if (*wrapped) {
-		if (next >= start)
-			return nr_cpumask_bits;
-	} else {
-		if (next >= nr_cpumask_bits) {
-			*wrapped = 1;
-			n = -1;
-			goto again;
-		}
-	}
-
-	return next;
-}
-
-#define for_each_cpu_wrap(cpu, mask, start, wrap)				\
-	for ((wrap) = 0, (cpu) = (start)-1;					\
-		(cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)),	\
-		(cpu) < nr_cpumask_bits; )
-
 #ifdef CONFIG_SCHED_SMT
 
 static inline void set_idle_cores(int cpu, int val)
@@ -6243,7 +6173,7 @@
  * Since SMT siblings share all cache levels, inspecting this limited remote
  * state should be fairly cheap.
  */
-void __update_idle_core(struct rq *rq)
+void update_idle_core(struct rq *rq)
 {
 	int core = cpu_of(rq);
 	int cpu;
@@ -6273,17 +6203,14 @@
 static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
 {
 	struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
-	int core, cpu, wrap;
-
-	if (!static_branch_likely(&sched_smt_present))
-		return -1;
+	int core, cpu;
 
 	if (!test_idle_cores(target, false))
 		return -1;
 
 	cpumask_and(cpus, sched_domain_span(sd), tsk_cpus_allowed(p));
 
-	for_each_cpu_wrap(core, cpus, target, wrap) {
+	for_each_cpu_wrap(core, cpus, target) {
 		bool idle = true;
 
 		for_each_cpu(cpu, cpu_smt_mask(core)) {
@@ -6311,9 +6238,6 @@
 {
 	int cpu;
 
-	if (!static_branch_likely(&sched_smt_present))
-		return -1;
-
 	for_each_cpu(cpu, cpu_smt_mask(target)) {
 		if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
 			continue;
@@ -6351,7 +6275,7 @@
 	u64 avg_cost, avg_idle = this_rq()->avg_idle;
 	u64 time, cost;
 	s64 delta;
-	int cpu, wrap;
+	int cpu;
 
 	this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
 	if (!this_sd)
@@ -6368,7 +6292,7 @@
 
 	time = local_clock();
 
-	for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
+	for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
 		if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
 			continue;
 		if (cpu_isolated(cpu))
@@ -8657,9 +8581,6 @@
 {
 	struct sg_lb_stats *busiest = &sds->busiest_stat;
 
-	if (update_sd_pick_busiest_active_balance(env, sds, sg, sgs))
-		return true;
-
 	if (sgs->group_type > busiest->group_type)
 		return true;
 
@@ -9123,9 +9044,6 @@
 	if (env->flags & LBF_BIG_TASK_ACTIVE_BALANCE)
 		goto force_balance;
 
-	if (bail_inter_cluster_balance(env, &sds))
-		goto out_balanced;
-
 	sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
 						/ sds.total_capacity;
 
@@ -10458,7 +10376,7 @@
 	rq->misfit_task = misfit;
 
 	if (old_misfit != misfit) {
-		adjust_nr_big_tasks(&rq->hmp_stats, 1, misfit);
+		walt_fixup_nr_big_tasks(rq, curr, 1, misfit);
 		curr->misfit = misfit;
 	}
 #endif
@@ -10923,7 +10841,7 @@
 	.task_change_group	= task_change_group_fair,
 #endif
 #ifdef CONFIG_SCHED_WALT
-	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_fair,
+	.fixup_walt_sched_stats	= walt_fixup_sched_stats_fair,
 #endif
 };
 
@@ -10973,70 +10891,68 @@
 }
 
 /* WALT sched implementation begins here */
+#ifdef CONFIG_SCHED_WALT
 
-#if defined(CONFIG_SCHED_WALT) && defined(CONFIG_CFS_BANDWIDTH)
-static inline struct task_group *next_task_group(struct task_group *tg)
+#ifdef CONFIG_CFS_BANDWIDTH
+
+static void walt_init_cfs_rq_stats(struct cfs_rq *cfs_rq)
 {
-	tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list);
-
-	return (&tg->list == &task_groups) ? NULL : tg;
+	cfs_rq->walt_stats.nr_big_tasks = 0;
+	cfs_rq->walt_stats.cumulative_runnable_avg = 0;
+	cfs_rq->walt_stats.pred_demands_sum = 0;
 }
 
-/* Iterate over all cfs_rq in a cpu */
-#define for_each_cfs_rq(cfs_rq, tg, cpu)	\
-	for (tg = container_of(&task_groups, struct task_group, list);	\
-		((tg = next_task_group(tg)) && (cfs_rq = tg->cfs_rq[cpu]));)
-
-void reset_cfs_rq_hmp_stats(int cpu, int reset_cra)
+static void walt_inc_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p)
 {
-	struct task_group *tg;
-	struct cfs_rq *cfs_rq;
-
-	rcu_read_lock();
-
-	for_each_cfs_rq(cfs_rq, tg, cpu)
-		reset_hmp_stats(&cfs_rq->hmp_stats, reset_cra);
-
-	rcu_read_unlock();
+	inc_nr_big_task(&cfs_rq->walt_stats, p);
+	fixup_cumulative_runnable_avg(&cfs_rq->walt_stats, p->ravg.demand,
+				      p->ravg.pred_demand);
 }
 
-static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
-
-static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-	 struct task_struct *p, int change_cra);
-static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
-	 struct task_struct *p, int change_cra);
-
-/* Add task's contribution to a cpu' HMP statistics */
-void inc_hmp_sched_stats_fair(struct rq *rq,
-			struct task_struct *p, int change_cra)
+static void walt_dec_cfs_rq_stats(struct cfs_rq *cfs_rq, struct task_struct *p)
 {
-	struct cfs_rq *cfs_rq;
-	struct sched_entity *se = &p->se;
+	dec_nr_big_task(&cfs_rq->walt_stats, p);
+	fixup_cumulative_runnable_avg(&cfs_rq->walt_stats, -(s64)p->ravg.demand,
+				      -(s64)p->ravg.pred_demand);
+}
+
+static void walt_inc_throttled_cfs_rq_stats(struct walt_sched_stats *stats,
+					    struct cfs_rq *tcfs_rq)
+{
+	struct rq *rq = rq_of(tcfs_rq);
+
+	stats->nr_big_tasks += tcfs_rq->walt_stats.nr_big_tasks;
+	fixup_cumulative_runnable_avg(stats,
+				tcfs_rq->walt_stats.cumulative_runnable_avg,
+				tcfs_rq->walt_stats.pred_demands_sum);
+
+	if (stats == &rq->walt_stats)
+		walt_fixup_cum_window_demand(rq,
+			tcfs_rq->walt_stats.cumulative_runnable_avg);
+
+}
+
+static void walt_dec_throttled_cfs_rq_stats(struct walt_sched_stats *stats,
+					    struct cfs_rq *tcfs_rq)
+{
+	struct rq *rq = rq_of(tcfs_rq);
+
+	stats->nr_big_tasks -= tcfs_rq->walt_stats.nr_big_tasks;
+	fixup_cumulative_runnable_avg(stats,
+				-tcfs_rq->walt_stats.cumulative_runnable_avg,
+				-tcfs_rq->walt_stats.pred_demands_sum);
 
 	/*
-	 * Although below check is not strictly required  (as
-	 * inc/dec_nr_big_task and inc/dec_cumulative_runnable_avg called
-	 * from inc_cfs_rq_hmp_stats() have similar checks), we gain a bit on
-	 * efficiency by short-circuiting for_each_sched_entity() loop when
-	 * sched_disable_window_stats
+	 * We remove the throttled cfs_rq's tasks's contribution from the
+	 * cumulative window demand so that the same can be added
+	 * unconditionally when the cfs_rq is unthrottled.
 	 */
-	if (sched_disable_window_stats)
-		return;
-
-	for_each_sched_entity(se) {
-		cfs_rq = cfs_rq_of(se);
-		inc_cfs_rq_hmp_stats(cfs_rq, p, change_cra);
-		if (cfs_rq_throttled(cfs_rq))
-			break;
-	}
-
-	/* Update rq->hmp_stats only if we didn't find any throttled cfs_rq */
-	if (!se)
-		inc_rq_hmp_stats(rq, p, change_cra);
+	if (stats == &rq->walt_stats)
+		walt_fixup_cum_window_demand(rq,
+			-tcfs_rq->walt_stats.cumulative_runnable_avg);
 }
 
-static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
+static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p,
 				       u32 new_task_load, u32 new_pred_demand)
 {
 	struct cfs_rq *cfs_rq;
@@ -11047,47 +10963,91 @@
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
 
-		fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p,
+		fixup_cumulative_runnable_avg(&cfs_rq->walt_stats,
 					      task_load_delta,
 					      pred_demand_delta);
 		if (cfs_rq_throttled(cfs_rq))
 			break;
 	}
 
-	/* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */
+	/* Fix up rq->walt_stats only if we didn't find any throttled cfs_rq */
 	if (!se) {
-		fixup_cumulative_runnable_avg(&rq->hmp_stats, p,
+		fixup_cumulative_runnable_avg(&rq->walt_stats,
 					      task_load_delta,
 					      pred_demand_delta);
+		walt_fixup_cum_window_demand(rq, task_load_delta);
 	}
 }
 
-#elif defined(CONFIG_SCHED_WALT)
-
-inline void reset_cfs_rq_hmp_stats(int cpu, int reset_cra) { }
-
-static void
-fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
-			   u32 new_task_load, u32 new_pred_demand)
+static void walt_fixup_nr_big_tasks(struct rq *rq, struct task_struct *p,
+				    int delta, bool inc)
 {
-	s64 task_load_delta = (s64)new_task_load - task_load(p);
-	s64 pred_demand_delta = PRED_DEMAND_DELTA;
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &p->se;
 
-	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
-				      pred_demand_delta);
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+
+		cfs_rq->walt_stats.nr_big_tasks += inc ? delta : -delta;
+		BUG_ON(cfs_rq->walt_stats.nr_big_tasks < 0);
+
+		if (cfs_rq_throttled(cfs_rq))
+			break;
+	}
+
+	/* Fix up rq->walt_stats only if we didn't find any throttled cfs_rq */
+	if (!se)
+		walt_adjust_nr_big_tasks(rq, delta, inc);
 }
 
-static inline int task_will_be_throttled(struct task_struct *p)
+/*
+ * Check if task is part of a hierarchy where some cfs_rq does not have any
+ * runtime left.
+ *
+ * We can't rely on throttled_hierarchy() to do this test, as
+ * cfs_rq->throttle_count will not be updated yet when this function is called
+ * from scheduler_tick()
+ */
+static int task_will_be_throttled(struct task_struct *p)
 {
+	struct sched_entity *se = &p->se;
+	struct cfs_rq *cfs_rq;
+
+	if (!cfs_bandwidth_used())
+		return 0;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		if (!cfs_rq->runtime_enabled)
+			continue;
+		if (cfs_rq->runtime_remaining <= 0)
+			return 1;
+	}
+
 	return 0;
 }
 
-void inc_hmp_sched_stats_fair(struct rq *rq,
-			struct task_struct *p, int change_cra)
+#else /* CONFIG_CFS_BANDWIDTH */
+
+static void walt_fixup_sched_stats_fair(struct rq *rq, struct task_struct *p,
+				       u32 new_task_load, u32 new_pred_demand)
 {
-	inc_nr_big_task(&rq->hmp_stats, p);
+	fixup_walt_sched_stats_common(rq, p, new_task_load, new_pred_demand);
 }
 
+static void walt_fixup_nr_big_tasks(struct rq *rq, struct task_struct *p,
+				    int delta, bool inc)
+{
+	walt_adjust_nr_big_tasks(rq, delta, inc);
+}
+
+static int task_will_be_throttled(struct task_struct *p)
+{
+	return false;
+}
+
+#endif /* CONFIG_CFS_BANDWIDTH */
+
 static inline int
 kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
 {
@@ -11108,17 +11068,6 @@
 	return rc;
 }
 
-#else
-
-static inline int task_will_be_throttled(struct task_struct *p)
-{
-	return 0;
-}
-
-#endif
-
-#if defined(CONFIG_SCHED_WALT)
-
 static DEFINE_RAW_SPINLOCK(migration_lock);
 void check_for_migration(struct rq *rq, struct task_struct *p)
 {
@@ -11131,6 +11080,9 @@
 		    rq->curr->nr_cpus_allowed == 1)
 			return;
 
+		if (task_will_be_throttled(p))
+			return;
+
 		raw_spin_lock(&migration_lock);
 		rcu_read_lock();
 		new_cpu = energy_aware_wake_cpu(p, cpu, 0);
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index b852cbe..5405d3f 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -78,14 +78,6 @@
 {
 }
 
-#ifdef CONFIG_SCHED_WALT
-static void
-fixup_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p,
-			   u32 new_task_load, u32 new_pred_demand)
-{
-}
-#endif
-
 /*
  * Simple, special scheduling class for the per-CPU idle tasks:
  */
@@ -114,7 +106,4 @@
 	.prio_changed		= prio_changed_idle,
 	.switched_to		= switched_to_idle,
 	.update_curr		= update_curr_idle,
-#ifdef CONFIG_SCHED_WALT
-	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_idle,
-#endif
 };
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 2b556d0..6b935e7 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -11,45 +11,6 @@
 #include <linux/irq_work.h>
 #include <trace/events/sched.h>
 
-#ifdef CONFIG_SCHED_WALT
-
-static void
-inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
-{
-	inc_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
-{
-	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-fixup_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p,
-			 u32 new_task_load, u32 new_pred_demand)
-{
-	s64 task_load_delta = (s64)new_task_load - task_load(p);
-	s64 pred_demand_delta = PRED_DEMAND_DELTA;
-
-	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
-				      pred_demand_delta);
-}
-
-#ifdef CONFIG_SMP
-static int find_lowest_rq(struct task_struct *task);
-
-#endif /* CONFIG_SMP */
-#else  /* CONFIG_SCHED_WALT */
-
-static inline void
-inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }
-
-static inline void
-dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }
-
-#endif	/* CONFIG_SCHED_WALT */
-
 #include "walt.h"
 
 int sched_rr_timeslice = RR_TIMESLICE;
@@ -1421,7 +1382,7 @@
 		rt_se->timeout = 0;
 
 	enqueue_rt_entity(rt_se, flags);
-	inc_hmp_sched_stats_rt(rq, p);
+	walt_inc_cumulative_runnable_avg(rq, p);
 
 	if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
 		enqueue_pushable_task(rq, p);
@@ -1433,7 +1394,7 @@
 
 	update_curr_rt(rq);
 	dequeue_rt_entity(rt_se, flags);
-	dec_hmp_sched_stats_rt(rq, p);
+	walt_dec_cumulative_runnable_avg(rq, p);
 
 	dequeue_pushable_task(rq, p);
 }
@@ -1752,18 +1713,6 @@
 
 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
 
-static inline unsigned long task_util(struct task_struct *p)
-{
-#ifdef CONFIG_SCHED_WALT
-	if (!walt_disabled && sysctl_sched_use_walt_task_util) {
-		u64 demand = p->ravg.demand;
-
-		return (demand << 10) / sched_ravg_window;
-	}
-#endif
-	return p->se.avg.util_avg;
-}
-
 static int find_lowest_rq(struct task_struct *task)
 {
 	struct sched_domain *sd;
@@ -2623,7 +2572,7 @@
 
 	.update_curr		= update_curr_rt,
 #ifdef CONFIG_SCHED_WALT
-	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_rt,
+	.fixup_walt_sched_stats	= fixup_walt_sched_stats_common,
 #endif
 };
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 07d7731..14c62f4 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -25,10 +25,12 @@
 struct rq;
 struct cpuidle_state;
 
+extern __read_mostly bool sched_predl;
+
 #ifdef CONFIG_SCHED_WALT
 extern unsigned int sched_ravg_window;
 
-struct hmp_sched_stats {
+struct walt_sched_stats {
 	int nr_big_tasks;
 	u64 cumulative_runnable_avg;
 	u64 pred_demands_sum;
@@ -106,6 +108,12 @@
 static inline void cpu_load_update_active(struct rq *this_rq) { }
 #endif
 
+#ifdef CONFIG_SCHED_SMT
+extern void update_idle_core(struct rq *rq);
+#else
+static inline void update_idle_core(struct rq *rq) { }
+#endif
+
 /*
  * Helpers for converting nanosecond timing to jiffy resolution
  */
@@ -503,13 +511,10 @@
 	struct list_head leaf_cfs_rq_list;
 	struct task_group *tg;	/* group that "owns" this runqueue */
 
-#ifdef CONFIG_SCHED_WALT
-	u64 cumulative_runnable_avg;
-#endif
-
 #ifdef CONFIG_CFS_BANDWIDTH
+
 #ifdef CONFIG_SCHED_WALT
-	struct hmp_sched_stats hmp_stats;
+	struct walt_sched_stats walt_stats;
 #endif
 
 	int runtime_enabled;
@@ -768,13 +773,13 @@
 #ifdef CONFIG_SCHED_WALT
 	struct sched_cluster *cluster;
 	struct cpumask freq_domain_cpumask;
-	struct hmp_sched_stats hmp_stats;
+	struct walt_sched_stats walt_stats;
 
 	int cstate, wakeup_latency, wakeup_energy;
 	u64 window_start;
 	s64 cum_window_start;
 	u64 load_reported_window;
-	unsigned long hmp_flags;
+	unsigned long walt_flags;
 
 	u64 cur_irqload;
 	u64 avg_irqload;
@@ -859,23 +864,6 @@
 #endif
 }
 
-
-#ifdef CONFIG_SCHED_SMT
-
-extern struct static_key_false sched_smt_present;
-
-extern void __update_idle_core(struct rq *rq);
-
-static inline void update_idle_core(struct rq *rq)
-{
-	if (static_branch_unlikely(&sched_smt_present))
-		__update_idle_core(rq);
-}
-
-#else
-static inline void update_idle_core(struct rq *rq) { }
-#endif
-
 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 
 #define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
@@ -1424,7 +1412,7 @@
 	void (*task_change_group) (struct task_struct *p, int type);
 #endif
 #ifdef CONFIG_SCHED_WALT
-	void (*fixup_hmp_sched_stats)(struct rq *rq, struct task_struct *p,
+	void (*fixup_walt_sched_stats)(struct rq *rq, struct task_struct *p,
 				      u32 new_task_load, u32 new_pred_demand);
 #endif
 };
@@ -1716,9 +1704,18 @@
 	return cpu_rq(cpu)->cpu_capacity_orig;
 }
 
-extern unsigned int sysctl_sched_use_walt_cpu_util;
 extern unsigned int walt_disabled;
 
+static inline unsigned long task_util(struct task_struct *p)
+{
+#ifdef CONFIG_SCHED_WALT
+	if (!walt_disabled && sysctl_sched_use_walt_task_util)
+		return p->ravg.demand /
+		       (sched_ravg_window >> SCHED_CAPACITY_SHIFT);
+#endif
+	return p->se.avg.util_avg;
+}
+
 /*
  * cpu_util returns the amount of capacity of a CPU that is used by CFS
  * tasks. The unit of the return value must be the one of capacity so we can
@@ -1752,7 +1749,7 @@
 
 #ifdef CONFIG_SCHED_WALT
 	if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
-		util = cpu_rq(cpu)->hmp_stats.cumulative_runnable_avg;
+		util = cpu_rq(cpu)->walt_stats.cumulative_runnable_avg;
 		util = div64_u64(util,
 				 sched_ravg_window >> SCHED_CAPACITY_SHIFT);
 	}
@@ -1816,13 +1813,18 @@
 		if (walt_load) {
 			u64 nl = cpu_rq(cpu)->nt_prev_runnable_sum +
 				rq->grp_time.nt_prev_runnable_sum;
+			u64 pl = rq->walt_stats.pred_demands_sum;
 
 			nl = div64_u64(nl, sched_ravg_window >>
 						SCHED_CAPACITY_SHIFT);
+			pl = div64_u64(pl, sched_ravg_window >>
+						SCHED_CAPACITY_SHIFT);
 
 			walt_load->prev_window_util = util;
 			walt_load->nl = nl;
-			walt_load->pl = 0;
+			walt_load->pl = pl;
+			rq->old_busy_time = util;
+			rq->old_estimated_time = pl;
 			walt_load->ws = rq->window_start;
 		}
 	}
@@ -2206,14 +2208,12 @@
 #ifdef CONFIG_SCHED_WALT
 u64 sched_ktime_clock(void);
 void note_task_waking(struct task_struct *p, u64 wallclock);
-extern void update_avg_burst(struct task_struct *p);
 #else /* CONFIG_SCHED_WALT */
 static inline u64 sched_ktime_clock(void)
 {
 	return 0;
 }
 static inline void note_task_waking(struct task_struct *p, u64 wallclock) { }
-static inline void update_avg_burst(struct task_struct *p) { }
 #endif /* CONFIG_SCHED_WALT */
 
 #ifdef CONFIG_CPU_FREQ
@@ -2246,6 +2246,9 @@
 	struct update_util_data *data;
 
 #ifdef CONFIG_SCHED_WALT
+	unsigned int exception_flags = SCHED_CPUFREQ_INTERCLUSTER_MIG |
+						SCHED_CPUFREQ_PL;
+
 	/*
 	 * Skip if we've already reported, but not if this is an inter-cluster
 	 * migration. Also only allow WALT update sites.
@@ -2254,7 +2257,7 @@
 		return;
 	if (!sched_disable_window_stats &&
 		(rq->load_reported_window == rq->window_start) &&
-		!(flags & SCHED_CPUFREQ_INTERCLUSTER_MIG))
+		!(flags & exception_flags))
 		return;
 	rq->load_reported_window = rq->window_start;
 #endif
@@ -2348,13 +2351,11 @@
 extern unsigned int  __read_mostly sched_load_granule;
 
 extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
-extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
 extern int update_preferred_cluster(struct related_thread_group *grp,
 			struct task_struct *p, u32 old_load);
 extern void set_preferred_cluster(struct related_thread_group *grp);
 extern void add_new_task_to_grp(struct task_struct *new);
 extern unsigned int update_freq_aggregate_threshold(unsigned int threshold);
-extern void update_avg(u64 *avg, u64 sample);
 
 #define NO_BOOST 0
 #define FULL_THROTTLE_BOOST 1
@@ -2482,6 +2483,11 @@
 	return cpu_max_possible_capacity(cpu) == max_possible_capacity;
 }
 
+static inline bool is_min_capacity_cpu(int cpu)
+{
+	return cpu_max_possible_capacity(cpu) == min_max_possible_capacity;
+}
+
 /*
  * 'load' is in reference to "best cpu" at its best frequency.
  * Scale that in reference to a given cpu, accounting for how bad it is
@@ -2536,6 +2542,11 @@
 	return p->ravg.demand;
 }
 
+static inline unsigned int task_pl(struct task_struct *p)
+{
+	return p->ravg.pred_demand;
+}
+
 #define pct_to_real(tunable)	\
 		(div64_u64((u64)tunable * (u64)max_task_load(), 100))
 
@@ -2569,54 +2580,11 @@
 #define	BOOST_KICK	0
 #define	CPU_RESERVED	1
 
-static inline u64 cpu_cravg_sync(int cpu, int sync)
-{
-	struct rq *rq = cpu_rq(cpu);
-	u64 load;
-
-	load = rq->hmp_stats.cumulative_runnable_avg;
-
-	/*
-	 * If load is being checked in a sync wakeup environment,
-	 * we may want to discount the load of the currently running
-	 * task.
-	 */
-	if (sync && cpu == smp_processor_id()) {
-		if (load > rq->curr->ravg.demand)
-			load -= rq->curr->ravg.demand;
-		else
-			load = 0;
-	}
-
-	return load;
-}
-
-extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost);
-extern void reset_all_window_stats(u64 window_start, unsigned int window_size);
 extern int sched_boost(void);
-extern int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu,
-					enum sched_boost_policy boost_policy);
-extern int task_will_fit(struct task_struct *p, int cpu);
-extern u64 cpu_load(int cpu);
-extern u64 cpu_load_sync(int cpu, int sync);
 extern int preferred_cluster(struct sched_cluster *cluster,
 						struct task_struct *p);
-extern void inc_rq_hmp_stats(struct rq *rq,
-				struct task_struct *p, int change_cra);
-extern void dec_rq_hmp_stats(struct rq *rq,
-				struct task_struct *p, int change_cra);
-extern void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra);
-extern int upmigrate_discouraged(struct task_struct *p);
 extern struct sched_cluster *rq_cluster(struct rq *rq);
-extern int nr_big_tasks(struct rq *rq);
 extern void reset_task_stats(struct task_struct *p);
-extern void reset_cfs_rq_hmp_stats(int cpu, int reset_cra);
-extern void inc_hmp_sched_stats_fair(struct rq *rq,
-			struct task_struct *p, int change_cra);
-extern u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
-					struct cftype *cft);
-extern int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
-				struct cftype *cft, u64 upmigrate_discourage);
 extern void clear_top_tasks_bitmap(unsigned long *bitmap);
 
 #if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE)
@@ -2652,53 +2620,42 @@
 {
 	struct rq *rq = cpu_rq(cpu);
 
-	return test_bit(CPU_RESERVED, &rq->hmp_flags);
+	return test_bit(CPU_RESERVED, &rq->walt_flags);
 }
 
 static inline int mark_reserved(int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
 
-	return test_and_set_bit(CPU_RESERVED, &rq->hmp_flags);
+	return test_and_set_bit(CPU_RESERVED, &rq->walt_flags);
 }
 
 static inline void clear_reserved(int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
 
-	clear_bit(CPU_RESERVED, &rq->hmp_flags);
-}
-
-static inline bool
-__task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
-{
-	return (p->on_rq || p->last_sleep_ts >= rq->window_start);
+	clear_bit(CPU_RESERVED, &rq->walt_flags);
 }
 
 static inline bool
 task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
 {
-	return cpu_of(rq) == task_cpu(p) && __task_in_cum_window_demand(rq, p);
+	return cpu_of(rq) == task_cpu(p) && (p->on_rq || p->last_sleep_ts >=
+							 rq->window_start);
 }
 
-static inline void
-dec_cum_window_demand(struct rq *rq, struct task_struct *p)
-{
-	rq->cum_window_demand -= p->ravg.demand;
-	WARN_ON_ONCE(rq->cum_window_demand < 0);
-}
-
-static inline void
-inc_cum_window_demand(struct rq *rq, struct task_struct *p, s64 delta)
+static inline void walt_fixup_cum_window_demand(struct rq *rq, s64 delta)
 {
 	rq->cum_window_demand += delta;
+	if (unlikely((s64)rq->cum_window_demand < 0))
+		rq->cum_window_demand = 0;
 }
 
 extern void update_cpu_cluster_capacity(const cpumask_t *cpus);
 
 extern unsigned long thermal_cap(int cpu);
 
-extern void clear_hmp_request(int cpu);
+extern void clear_walt_request(int cpu);
 
 extern int got_boost_kick(void);
 extern void clear_boost_kick(int cpu);
@@ -2714,7 +2671,7 @@
 
 #else	/* CONFIG_SCHED_WALT */
 
-struct hmp_sched_stats;
+struct walt_sched_stats;
 struct related_thread_group;
 struct sched_cluster;
 
@@ -2725,43 +2682,14 @@
 
 static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
 
-static inline int task_will_fit(struct task_struct *p, int cpu)
-{
-	return 1;
-}
-
 static inline int sched_boost(void)
 {
 	return 0;
 }
 
-static inline int is_big_task(struct task_struct *p)
-{
-	return 0;
-}
-
-static inline int nr_big_tasks(struct rq *rq)
-{
-	return 0;
-}
-
-static inline int is_cpu_throttling_imminent(int cpu)
-{
-	return 0;
-}
-
-static inline int is_task_migration_throttled(struct task_struct *p)
-{
-	return 0;
-}
-
+static inline bool hmp_capable(void) { return false; }
 static inline bool is_max_capacity_cpu(int cpu) { return true; }
-
-static inline void
-inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
-
-static inline void
-dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
+static inline bool is_min_capacity_cpu(int cpu) { return true; }
 
 static inline int
 preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
@@ -2798,6 +2726,7 @@
 }
 
 static inline u32 task_load(struct task_struct *p) { return 0; }
+static inline u32 task_pl(struct task_struct *p) { return 0; }
 
 static inline int update_preferred_cluster(struct related_thread_group *grp,
 			 struct task_struct *p, u32 old_load)
@@ -2822,17 +2751,7 @@
 #define trace_sched_cpu_load_cgroup(...)
 #define trace_sched_cpu_load_wakeup(...)
 
-static inline bool
-__task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
-{
-	return 0;
-}
-
-static inline void
-dec_cum_window_demand(struct rq *rq, struct task_struct *p) { }
-
-static inline void
-inc_cum_window_demand(struct rq *rq, struct task_struct *p, s64 delta) { }
+static inline void walt_fixup_cum_window_demand(struct rq *rq, s64 delta) { }
 
 static inline void update_cpu_cluster_capacity(const cpumask_t *cpus) { }
 
@@ -2843,7 +2762,7 @@
 }
 #endif
 
-static inline void clear_hmp_request(int cpu) { }
+static inline void clear_walt_request(int cpu) { }
 
 static inline int got_boost_kick(void)
 {
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index 4238924..166c643 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -33,6 +33,8 @@
 static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
 static s64 last_get_time;
 
+static DEFINE_PER_CPU(atomic64_t, last_busy_time) = ATOMIC64_INIT(0);
+
 #define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
 /**
  * sched_get_nr_running_avg
@@ -120,6 +122,27 @@
 }
 EXPORT_SYMBOL(sched_get_nr_running_avg);
 
+#define BUSY_NR_RUN		3
+#define BUSY_LOAD_FACTOR	2
+static inline void update_last_busy_time(int cpu, bool dequeue,
+				unsigned long prev_nr_run, u64 curr_time)
+{
+	bool nr_run_trigger = false, load_trigger = false;
+
+	if (!hmp_capable() || is_min_capacity_cpu(cpu))
+		return;
+
+	if (prev_nr_run >= BUSY_NR_RUN && per_cpu(nr, cpu) < BUSY_NR_RUN)
+		nr_run_trigger = true;
+
+	if (dequeue && (cpu_util(cpu) * BUSY_LOAD_FACTOR) >
+			capacity_orig_of(cpu))
+		load_trigger = true;
+
+	if (nr_run_trigger || load_trigger)
+		atomic64_set(&per_cpu(last_busy_time, cpu), curr_time);
+}
+
 /**
  * sched_update_nr_prod
  * @cpu: The core id of the nr running driver.
@@ -148,6 +171,8 @@
 	if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu))
 		per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
 
+	update_last_busy_time(cpu, !inc, nr_running, curr_time);
+
 	per_cpu(nr_prod_sum, cpu) += nr_running * diff;
 	per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff;
 	per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
@@ -184,3 +209,8 @@
 	busy = (util * 100) / capacity;
 	return busy;
 }
+
+u64 sched_get_cpu_last_busy_time(int cpu)
+{
+	return atomic64_read(&per_cpu(last_busy_time, cpu));
+}
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index dcc4a36..11a1888 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -18,41 +18,6 @@
 }
 #endif /* CONFIG_SMP */
 
-#ifdef CONFIG_SCHED_WALT
-
-static void
-inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p)
-{
-	inc_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p)
-{
-	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
-}
-
-static void
-fixup_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p,
-			   u32 new_task_load, u32 new_pred_demand)
-{
-	s64 task_load_delta = (s64)new_task_load - task_load(p);
-	s64 pred_demand_delta = PRED_DEMAND_DELTA;
-
-	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
-				      pred_demand_delta);
-}
-
-#else	/* CONFIG_SCHED_WALT */
-
-static inline void
-inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { }
-
-static inline void
-dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { }
-
-#endif	/* CONFIG_SCHED_WALT */
-
 static void
 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
 {
@@ -78,14 +43,14 @@
 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
 {
 	add_nr_running(rq, 1);
-	inc_hmp_sched_stats_stop(rq, p);
+	walt_inc_cumulative_runnable_avg(rq, p);
 }
 
 static void
 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
 {
 	sub_nr_running(rq, 1);
-	dec_hmp_sched_stats_stop(rq, p);
+	walt_dec_cumulative_runnable_avg(rq, p);
 }
 
 static void yield_task_stop(struct rq *rq)
@@ -173,6 +138,6 @@
 	.switched_to		= switched_to_stop,
 	.update_curr		= update_curr_stop,
 #ifdef CONFIG_SCHED_WALT
-	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_stop,
+	.fixup_walt_sched_stats	= fixup_walt_sched_stats_common,
 #endif
 };
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 6f7d34d..48f3512 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -116,8 +116,6 @@
  * IMPORTANT: Initialize both copies to same value!!
  */
 
-static __read_mostly bool sched_predl;
-
 __read_mostly unsigned int sched_ravg_hist_size = 5;
 __read_mostly unsigned int sysctl_sched_ravg_hist_size = 5;
 
@@ -207,27 +205,28 @@
 }
 early_param("sched_predl", set_sched_predl);
 
-void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
+void inc_rq_walt_stats(struct rq *rq, struct task_struct *p)
 {
-	inc_nr_big_task(&rq->hmp_stats, p);
-	if (change_cra)
-		inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+	inc_nr_big_task(&rq->walt_stats, p);
+	walt_inc_cumulative_runnable_avg(rq, p);
 }
 
-void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
+void dec_rq_walt_stats(struct rq *rq, struct task_struct *p)
 {
-	dec_nr_big_task(&rq->hmp_stats, p);
-	if (change_cra)
-		dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+	dec_nr_big_task(&rq->walt_stats, p);
+	walt_dec_cumulative_runnable_avg(rq, p);
 }
 
-void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra)
+void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
+				   u32 new_task_load, u32 new_pred_demand)
 {
-	stats->nr_big_tasks = 0; /* never happens on EAS */
-	if (reset_cra) {
-		stats->cumulative_runnable_avg = 0;
-		stats->pred_demands_sum = 0;
-	}
+	s64 task_load_delta = (s64)new_task_load - task_load(p);
+	s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+	fixup_cumulative_runnable_avg(&rq->walt_stats, task_load_delta,
+				      pred_demand_delta);
+
+	walt_fixup_cum_window_demand(rq, task_load_delta);
 }
 
 /*
@@ -295,9 +294,7 @@
 	nr_windows = div64_u64(delta, sched_ravg_window);
 	rq->window_start += (u64)nr_windows * (u64)sched_ravg_window;
 
-	rq->cum_window_demand = rq->hmp_stats.cumulative_runnable_avg;
-	if (event == PUT_PREV_TASK)
-		rq->cum_window_demand += rq->curr->ravg.demand;
+	rq->cum_window_demand = rq->walt_stats.cumulative_runnable_avg;
 
 	return old_window_start;
 }
@@ -379,12 +376,12 @@
 	struct rq *rq = cpu_rq(cpu);
 
 	if (!is_max_capacity_cpu(cpu))
-		return rq->hmp_stats.nr_big_tasks;
+		return rq->walt_stats.nr_big_tasks;
 
 	return rq->nr_running;
 }
 
-void clear_hmp_request(int cpu)
+void clear_walt_request(int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
 	unsigned long flags;
@@ -622,20 +619,6 @@
 	raw_spin_unlock(&cluster->load_lock);
 }
 
-static inline void
-init_new_task_load_hmp(struct task_struct *p, bool idle_task)
-{
-}
-
-static inline void
-update_task_burst(struct task_struct *p, struct rq *rq, int event, int runtime)
-{
-}
-
-static void reset_task_stats_hmp(struct task_struct *p)
-{
-}
-
 static inline void inter_cluster_migration_fixup
 	(struct task_struct *p, int new_cpu, int task_cpu, bool new_task)
 {
@@ -788,9 +771,15 @@
 
 	update_task_cpu_cycles(p, new_cpu);
 
-	if (__task_in_cum_window_demand(src_rq, p)) {
-		dec_cum_window_demand(src_rq, p);
-		inc_cum_window_demand(dest_rq, p, p->ravg.demand);
+	/*
+	 * When a task is migrating during the wakeup, adjust
+	 * the task's contribution towards cumulative window
+	 * demand.
+	 */
+	if (p->state == TASK_WAKING && p->last_sleep_ts >=
+				       src_rq->window_start) {
+		walt_fixup_cum_window_demand(src_rq, -(s64)p->ravg.demand);
+		walt_fixup_cum_window_demand(dest_rq, p->ravg.demand);
 	}
 
 	new_task = is_new_task(p);
@@ -1071,7 +1060,7 @@
 
 	if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
 				!p->dl.dl_throttled))
-		p->sched_class->fixup_hmp_sched_stats(rq, p,
+		p->sched_class->fixup_walt_sched_stats(rq, p,
 				p->ravg.demand,
 				new);
 
@@ -1288,6 +1277,33 @@
 	return delta;
 }
 
+/* Convert busy time to frequency equivalent
+ * Assumes load is scaled to 1024
+ */
+static inline unsigned int load_to_freq(struct rq *rq, u64 load)
+{
+	return mult_frac(cpu_max_possible_freq(cpu_of(rq)), load,
+			 capacity_orig_of(cpu_of(rq)));
+}
+
+bool do_pl_notif(struct rq *rq)
+{
+	u64 prev = rq->old_busy_time;
+	u64 pl = rq->walt_stats.pred_demands_sum;
+	int cpu = cpu_of(rq);
+
+	/* If already at max freq, bail out */
+	if (capacity_orig_of(cpu) == capacity_curr_of(cpu))
+		return false;
+
+	prev = max(prev, rq->old_estimated_time);
+
+	pl = div64_u64(pl, sched_ravg_window >> SCHED_CAPACITY_SHIFT);
+
+	/* 400 MHz filter. */
+	return (pl > prev) && (load_to_freq(rq, pl - prev) > 400000);
+}
+
 static void rollover_cpu_window(struct rq *rq, bool full_window)
 {
 	u64 curr_sum = rq->curr_runnable_sum;
@@ -1666,21 +1682,28 @@
 
 	/*
 	 * A throttled deadline sched class task gets dequeued without
-	 * changing p->on_rq. Since the dequeue decrements hmp stats
+	 * changing p->on_rq. Since the dequeue decrements walt stats
 	 * avoid decrementing it here again.
+	 *
+	 * When window is rolled over, the cumulative window demand
+	 * is reset to the cumulative runnable average (contribution from
+	 * the tasks on the runqueue). If the current task is dequeued
+	 * already, it's demand is not included in the cumulative runnable
+	 * average. So add the task demand separately to cumulative window
+	 * demand.
 	 */
-	if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
-						!p->dl.dl_throttled))
-		p->sched_class->fixup_hmp_sched_stats(rq, p, demand,
-						      pred_demand);
+	if (!task_has_dl_policy(p) || !p->dl.dl_throttled) {
+		if (task_on_rq_queued(p))
+			p->sched_class->fixup_walt_sched_stats(rq, p, demand,
+							       pred_demand);
+		else if (rq->curr == p)
+			walt_fixup_cum_window_demand(rq, demand);
+	}
 
 	p->ravg.demand = demand;
 	p->ravg.coloc_demand = div64_u64(sum, sched_ravg_hist_size);
 	p->ravg.pred_demand = pred_demand;
 
-	if (__task_in_cum_window_demand(rq, p))
-		inc_cum_window_demand(rq, p, p->ravg.demand - prev_demand);
-
 done:
 	trace_sched_update_history(rq, p, runtime, samples, event);
 }
@@ -1879,7 +1902,7 @@
 void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
 						u64 wallclock, u64 irqtime)
 {
-	u64 runtime, old_window_start;
+	u64 old_window_start;
 
 	if (!rq->window_start || sched_disable_window_stats ||
 	    p->ravg.mark_start == wallclock)
@@ -1895,9 +1918,7 @@
 	}
 
 	update_task_rq_cpu_cycles(p, rq, event, wallclock, irqtime);
-	runtime = update_task_demand(p, rq, event, wallclock);
-	if (runtime)
-		update_task_burst(p, rq, event, runtime);
+	update_task_demand(p, rq, event, wallclock);
 	update_cpu_busy_time(p, rq, event, wallclock, irqtime);
 	update_task_pred_demand(rq, p, event);
 done:
@@ -1938,8 +1959,6 @@
 	memset(&p->ravg, 0, sizeof(struct ravg));
 	p->cpu_cycles = 0;
 
-	init_new_task_load_hmp(p, idle_task);
-
 	p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
 	p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
 
@@ -1995,8 +2014,6 @@
 	p->ravg.curr_window_cpu = curr_window_ptr;
 	p->ravg.prev_window_cpu = prev_window_ptr;
 
-	reset_task_stats_hmp(p);
-
 	/* Retain EXITING_TASK marker */
 	p->ravg.sum_history[0] = sum;
 }
@@ -3069,11 +3086,11 @@
 	cpumask_set_cpu(cpu_of(rq), &rq->freq_domain_cpumask);
 	init_irq_work(&walt_migration_irq_work, walt_irq_work);
 	init_irq_work(&walt_cpufreq_irq_work, walt_irq_work);
-	rq->hmp_stats.cumulative_runnable_avg = 0;
+	rq->walt_stats.cumulative_runnable_avg = 0;
 	rq->window_start = 0;
 	rq->cum_window_start = 0;
-	rq->hmp_stats.nr_big_tasks = 0;
-	rq->hmp_flags = 0;
+	rq->walt_stats.nr_big_tasks = 0;
+	rq->walt_flags = 0;
 	rq->cur_irqload = 0;
 	rq->avg_irqload = 0;
 	rq->irqload_ts = 0;
@@ -3096,7 +3113,7 @@
 	rq->old_busy_time = 0;
 	rq->old_estimated_time = 0;
 	rq->old_busy_time_group = 0;
-	rq->hmp_stats.pred_demands_sum = 0;
+	rq->walt_stats.pred_demands_sum = 0;
 	rq->ed_task = NULL;
 	rq->curr_table = 0;
 	rq->prev_top = 0;
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index 3f4739e..535f14b 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -66,7 +66,7 @@
 extern unsigned int nr_eligible_big_tasks(int cpu);
 
 static inline void
-inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+inc_nr_big_task(struct walt_sched_stats *stats, struct task_struct *p)
 {
 	if (sched_disable_window_stats)
 		return;
@@ -76,7 +76,7 @@
 }
 
 static inline void
-dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+dec_nr_big_task(struct walt_sched_stats *stats, struct task_struct *p)
 {
 	if (sched_disable_window_stats)
 		return;
@@ -88,57 +88,20 @@
 }
 
 static inline void
-adjust_nr_big_tasks(struct hmp_sched_stats *stats, int delta, bool inc)
+walt_adjust_nr_big_tasks(struct rq *rq, int delta, bool inc)
 {
-	struct rq *rq = container_of(stats, struct rq, hmp_stats);
-
 	if (sched_disable_window_stats)
 		return;
 
 	sched_update_nr_prod(cpu_of(rq), 0, true);
-	stats->nr_big_tasks += inc ? delta : -delta;
+	rq->walt_stats.nr_big_tasks += inc ? delta : -delta;
 
-	BUG_ON(stats->nr_big_tasks < 0);
+	BUG_ON(rq->walt_stats.nr_big_tasks < 0);
 }
 
 static inline void
-inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
-				 struct task_struct *p)
-{
-	u32 task_load;
-
-	if (sched_disable_window_stats)
-		return;
-
-	task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
-
-	stats->cumulative_runnable_avg += task_load;
-	stats->pred_demands_sum += p->ravg.pred_demand;
-}
-
-static inline void
-dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
-				struct task_struct *p)
-{
-	u32 task_load;
-
-	if (sched_disable_window_stats)
-		return;
-
-	task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
-
-	stats->cumulative_runnable_avg -= task_load;
-
-	BUG_ON((s64)stats->cumulative_runnable_avg < 0);
-
-	stats->pred_demands_sum -= p->ravg.pred_demand;
-	BUG_ON((s64)stats->pred_demands_sum < 0);
-}
-
-static inline void
-fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats,
-			      struct task_struct *p, s64 task_load_delta,
-			      s64 pred_demand_delta)
+fixup_cumulative_runnable_avg(struct walt_sched_stats *stats,
+			      s64 task_load_delta, s64 pred_demand_delta)
 {
 	if (sched_disable_window_stats)
 		return;
@@ -150,17 +113,56 @@
 	BUG_ON((s64)stats->pred_demands_sum < 0);
 }
 
-extern void inc_rq_hmp_stats(struct rq *rq,
-				struct task_struct *p, int change_cra);
-extern void dec_rq_hmp_stats(struct rq *rq,
-				struct task_struct *p, int change_cra);
-extern void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra);
+static inline void
+walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
+{
+	if (sched_disable_window_stats)
+		return;
+
+	fixup_cumulative_runnable_avg(&rq->walt_stats, p->ravg.demand,
+				      p->ravg.pred_demand);
+
+	/*
+	 * Add a task's contribution to the cumulative window demand when
+	 *
+	 * (1) task is enqueued with on_rq = 1 i.e migration,
+	 *     prio/cgroup/class change.
+	 * (2) task is waking for the first time in this window.
+	 */
+	if (p->on_rq || (p->last_sleep_ts < rq->window_start))
+		walt_fixup_cum_window_demand(rq, p->ravg.demand);
+}
+
+static inline void
+walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
+{
+	if (sched_disable_window_stats)
+		return;
+
+	fixup_cumulative_runnable_avg(&rq->walt_stats, -(s64)p->ravg.demand,
+				      -(s64)p->ravg.pred_demand);
+
+	/*
+	 * on_rq will be 1 for sleeping tasks. So check if the task
+	 * is migrating or dequeuing in RUNNING state to change the
+	 * prio/cgroup/class.
+	 */
+	if (task_on_rq_migrating(p) || p->state == TASK_RUNNING)
+		walt_fixup_cum_window_demand(rq, -(s64)p->ravg.demand);
+}
+
+extern void fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
+					  u32 new_task_load,
+					  u32 new_pred_demand);
+extern void inc_rq_walt_stats(struct rq *rq, struct task_struct *p);
+extern void dec_rq_walt_stats(struct rq *rq, struct task_struct *p);
 extern void fixup_busy_time(struct task_struct *p, int new_cpu);
 extern void init_new_task_load(struct task_struct *p, bool idle_task);
 extern void mark_task_starting(struct task_struct *p);
 extern void set_window_start(struct rq *rq);
 void account_irqtime(int cpu, struct task_struct *curr, u64 delta,
                                   u64 wallclock);
+extern bool do_pl_notif(struct rq *rq);
 
 #define SCHED_HIGH_IRQ_TIMEOUT 3
 static inline u64 sched_irqload(int cpu)
@@ -297,8 +299,8 @@
 
 static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
 				int event, u64 wallclock, u64 irqtime) { }
-static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
-		 struct task_struct *p)
+static inline void walt_inc_cumulative_runnable_avg(struct rq *rq,
+		struct task_struct *p)
 {
 }
 
@@ -307,21 +309,21 @@
 	return 0;
 }
 
-static inline void adjust_nr_big_tasks(struct hmp_sched_stats *stats,
+static inline void walt_adjust_nr_big_tasks(struct rq *rq,
 		int delta, bool inc)
 {
 }
 
-static inline void inc_nr_big_task(struct hmp_sched_stats *stats,
+static inline void inc_nr_big_task(struct walt_sched_stats *stats,
 		struct task_struct *p)
 {
 }
 
-static inline void dec_nr_big_task(struct hmp_sched_stats *stats,
+static inline void dec_nr_big_task(struct walt_sched_stats *stats,
 		struct task_struct *p)
 {
 }
-static inline void dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+static inline void walt_dec_cumulative_runnable_avg(struct rq *rq,
 		 struct task_struct *p)
 {
 }
@@ -348,6 +350,19 @@
 }
 
 static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; }
+static inline bool do_pl_notif(struct rq *rq) { return false; }
+
+static inline void
+inc_rq_walt_stats(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+dec_rq_walt_stats(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+fixup_walt_sched_stats_common(struct rq *rq, struct task_struct *p,
+			      u32 new_task_load, u32 new_pred_demand)
+{
+}
 
 #endif /* CONFIG_SCHED_WALT */
 
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 5dfdf39..842928a 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -845,7 +845,8 @@
 	 * Rate limit to the tick as a hot fix to prevent DOS. Will be
 	 * mopped up later.
 	 */
-	if (ktime_to_ns(timr->it.alarm.interval) < TICK_NSEC)
+	if (timr->it.alarm.interval.tv64 &&
+			ktime_to_ns(timr->it.alarm.interval) < TICK_NSEC)
 		timr->it.alarm.interval = ktime_set(0, TICK_NSEC);
 
 	exp = timespec_to_ktime(new_setting->it_value);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 221eb59..4f7ea84 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3590,7 +3590,7 @@
 	int exclude_mod = 0;
 	int found = 0;
 	int ret;
-	int clear_filter;
+	int clear_filter = 0;
 
 	if (func) {
 		func_g.type = filter_parse_regex(func, len, &func_g.search,
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ebf9498..4a848f7 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1928,7 +1928,7 @@
 #endif
 		((pc & NMI_MASK    ) ? TRACE_FLAG_NMI     : 0) |
 		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
-		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+		((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
 		(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
 		(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
 }
@@ -7257,6 +7257,7 @@
 	}
 	kfree(tr->topts);
 
+	free_cpumask_var(tr->tracing_cpumask);
 	kfree(tr->name);
 	kfree(tr);
 
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 562fa69..997ac0b 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -259,7 +259,8 @@
 void perf_trace_del(struct perf_event *p_event, int flags)
 {
 	struct trace_event_call *tp_event = p_event->tp_event;
-	hlist_del_rcu(&p_event->hlist_entry);
+	if (!hlist_unhashed(&p_event->hlist_entry))
+		hlist_del_rcu(&p_event->hlist_entry);
 	tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
 }
 
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 81dedaa..4731a08 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -43,6 +43,38 @@
 }
 EXPORT_SYMBOL(cpumask_any_but);
 
+/**
+ * cpumask_next_wrap - helper to implement for_each_cpu_wrap
+ * @n: the cpu prior to the place to search
+ * @mask: the cpumask pointer
+ * @start: the start point of the iteration
+ * @wrap: assume @n crossing @start terminates the iteration
+ *
+ * Returns >= nr_cpu_ids on completion
+ *
+ * Note: the @wrap argument is required for the start condition when
+ * we cannot assume @start is set in @mask.
+ */
+int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
+{
+	int next;
+
+again:
+	next = cpumask_next(n, mask);
+
+	if (wrap && n < start && next >= start) {
+		return nr_cpumask_bits;
+
+	} else if (next >= nr_cpumask_bits) {
+		wrap = true;
+		n = -1;
+		goto again;
+	}
+
+	return next;
+}
+EXPORT_SYMBOL(cpumask_next_wrap);
+
 /* These are not inline because of header tangles. */
 #ifdef CONFIG_CPUMASK_OFFSTACK
 /**
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e7d5db9..8258e9e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1373,8 +1373,8 @@
 		get_page(page);
 		spin_unlock(ptl);
 		split_huge_page(page);
-		put_page(page);
 		unlock_page(page);
+		put_page(page);
 		goto out_unlocked;
 	}
 
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 234676e..7a40fa2 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -117,6 +117,7 @@
 		l = list_lru_from_kmem(nlru, item);
 		list_add_tail(item, &l->list);
 		l->nr_items++;
+		nlru->nr_items++;
 		spin_unlock(&nlru->lock);
 		return true;
 	}
@@ -136,6 +137,7 @@
 		l = list_lru_from_kmem(nlru, item);
 		list_del_init(item);
 		l->nr_items--;
+		nlru->nr_items--;
 		spin_unlock(&nlru->lock);
 		return true;
 	}
@@ -183,15 +185,10 @@
 
 unsigned long list_lru_count_node(struct list_lru *lru, int nid)
 {
-	long count = 0;
-	int memcg_idx;
+	struct list_lru_node *nlru;
 
-	count += __list_lru_count_one(lru, nid, -1);
-	if (list_lru_memcg_aware(lru)) {
-		for_each_memcg_cache_index(memcg_idx)
-			count += __list_lru_count_one(lru, nid, memcg_idx);
-	}
-	return count;
+	nlru = &lru->node[nid];
+	return nlru->nr_items;
 }
 EXPORT_SYMBOL_GPL(list_lru_count_node);
 
@@ -226,6 +223,7 @@
 			assert_spin_locked(&nlru->lock);
 		case LRU_REMOVED:
 			isolated++;
+			nlru->nr_items--;
 			/*
 			 * If the lru lock has been dropped, our list
 			 * traversal is now invalid and so we have to
diff --git a/mm/mmap.c b/mm/mmap.c
index b8f91e0e..6f90f07 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2237,7 +2237,7 @@
 
 	/* Guard against exceeding limits of the address space. */
 	address &= PAGE_MASK;
-	if (address >= TASK_SIZE)
+	if (address >= (TASK_SIZE & PAGE_MASK))
 		return -ENOMEM;
 	address += PAGE_SIZE;
 
diff --git a/mm/page_owner.c b/mm/page_owner.c
index d2db436..65e24fb 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -285,7 +285,11 @@
 				continue;
 
 			if (PageBuddy(page)) {
-				pfn += (1UL << page_order(page)) - 1;
+				unsigned long freepage_order;
+
+				freepage_order = page_order_unsafe(page);
+				if (freepage_order < MAX_ORDER)
+					pfn += (1UL << freepage_order) - 1;
 				continue;
 			}
 
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ad182e6..ed89128 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -426,7 +426,7 @@
 	BUG_ON(offset_in_page(size));
 	BUG_ON(!is_power_of_2(align));
 
-	might_sleep_if(gfpflags_allow_blocking(gfp_mask));
+	might_sleep();
 
 	va = kmalloc_node(sizeof(struct vmap_area),
 			gfp_mask & GFP_RECLAIM_MASK, node);
@@ -662,6 +662,13 @@
 
 static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
 
+/*
+ * Serialize vmap purging.  There is no actual criticial section protected
+ * by this look, but we want to avoid concurrent calls for performance
+ * reasons and to make the pcpu_get_vm_areas more deterministic.
+ */
+static DEFINE_MUTEX(vmap_purge_lock);
+
 /* for per-CPU blocks */
 static void purge_fragmented_blocks_allcpus(void);
 
@@ -676,59 +683,40 @@
 
 /*
  * Purges all lazily-freed vmap areas.
- *
- * If sync is 0 then don't purge if there is already a purge in progress.
- * If force_flush is 1, then flush kernel TLBs between *start and *end even
- * if we found no lazy vmap areas to unmap (callers can use this to optimise
- * their own TLB flushing).
- * Returns with *start = min(*start, lowest purged address)
- *              *end = max(*end, highest purged address)
  */
-static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
-					int sync, int force_flush)
+static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
 {
-	static DEFINE_SPINLOCK(purge_lock);
 	struct llist_node *valist;
 	struct vmap_area *va;
 	struct vmap_area *n_va;
-	int nr = 0;
+	bool do_free = false;
 
-	/*
-	 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
-	 * should not expect such behaviour. This just simplifies locking for
-	 * the case that isn't actually used at the moment anyway.
-	 */
-	if (!sync && !force_flush) {
-		if (!spin_trylock(&purge_lock))
-			return;
-	} else
-		spin_lock(&purge_lock);
-
-	if (sync)
-		purge_fragmented_blocks_allcpus();
+	lockdep_assert_held(&vmap_purge_lock);
 
 	valist = llist_del_all(&vmap_purge_list);
 	llist_for_each_entry(va, valist, purge_list) {
-		if (va->va_start < *start)
-			*start = va->va_start;
-		if (va->va_end > *end)
-			*end = va->va_end;
-		nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
+		if (va->va_start < start)
+			start = va->va_start;
+		if (va->va_end > end)
+			end = va->va_end;
+		do_free = true;
 	}
 
-	if (nr)
+	if (!do_free)
+		return false;
+
+	flush_tlb_kernel_range(start, end);
+
+	spin_lock(&vmap_area_lock);
+	llist_for_each_entry_safe(va, n_va, valist, purge_list) {
+		int nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
+
+		__free_vmap_area(va);
 		atomic_sub(nr, &vmap_lazy_nr);
-
-	if (nr || force_flush)
-		flush_tlb_kernel_range(*start, *end);
-
-	if (nr) {
-		spin_lock(&vmap_area_lock);
-		llist_for_each_entry_safe(va, n_va, valist, purge_list)
-			__free_vmap_area(va);
-		spin_unlock(&vmap_area_lock);
+		cond_resched_lock(&vmap_area_lock);
 	}
-	spin_unlock(&purge_lock);
+	spin_unlock(&vmap_area_lock);
+	return true;
 }
 
 /*
@@ -737,9 +725,10 @@
  */
 static void try_purge_vmap_area_lazy(void)
 {
-	unsigned long start = ULONG_MAX, end = 0;
-
-	__purge_vmap_area_lazy(&start, &end, 0, 0);
+	if (mutex_trylock(&vmap_purge_lock)) {
+		__purge_vmap_area_lazy(ULONG_MAX, 0);
+		mutex_unlock(&vmap_purge_lock);
+	}
 }
 
 /*
@@ -747,9 +736,10 @@
  */
 static void purge_vmap_area_lazy(void)
 {
-	unsigned long start = ULONG_MAX, end = 0;
-
-	__purge_vmap_area_lazy(&start, &end, 1, 0);
+	mutex_lock(&vmap_purge_lock);
+	purge_fragmented_blocks_allcpus();
+	__purge_vmap_area_lazy(ULONG_MAX, 0);
+	mutex_unlock(&vmap_purge_lock);
 }
 
 /*
@@ -772,22 +762,13 @@
 }
 
 /*
- * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
- * called for the correct range previously.
- */
-static void free_unmap_vmap_area_noflush(struct vmap_area *va)
-{
-	unmap_vmap_area(va);
-	free_vmap_area_noflush(va);
-}
-
-/*
  * Free and unmap a vmap area
  */
 static void free_unmap_vmap_area(struct vmap_area *va)
 {
 	flush_cache_vunmap(va->va_start, va->va_end);
-	free_unmap_vmap_area_noflush(va);
+	unmap_vmap_area(va);
+	free_vmap_area_noflush(va);
 }
 
 static struct vmap_area *find_vmap_area(unsigned long addr)
@@ -801,16 +782,6 @@
 	return va;
 }
 
-static void free_unmap_vmap_area_addr(unsigned long addr)
-{
-	struct vmap_area *va;
-
-	va = find_vmap_area(addr);
-	BUG_ON(!va);
-	free_unmap_vmap_area(va);
-}
-
-
 /*** Per cpu kva allocator ***/
 
 /*
@@ -1131,6 +1102,8 @@
 	if (unlikely(!vmap_initialized))
 		return;
 
+	might_sleep();
+
 	for_each_possible_cpu(cpu) {
 		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
 		struct vmap_block *vb;
@@ -1155,7 +1128,11 @@
 		rcu_read_unlock();
 	}
 
-	__purge_vmap_area_lazy(&start, &end, 1, flush);
+	mutex_lock(&vmap_purge_lock);
+	purge_fragmented_blocks_allcpus();
+	if (!__purge_vmap_area_lazy(start, end) && flush)
+		flush_tlb_kernel_range(start, end);
+	mutex_unlock(&vmap_purge_lock);
 }
 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
 
@@ -1168,7 +1145,9 @@
 {
 	unsigned long size = (unsigned long)count << PAGE_SHIFT;
 	unsigned long addr = (unsigned long)mem;
+	struct vmap_area *va;
 
+	might_sleep();
 	BUG_ON(!addr);
 	BUG_ON(addr < VMALLOC_START);
 	BUG_ON(addr > VMALLOC_END);
@@ -1177,10 +1156,14 @@
 	debug_check_no_locks_freed(mem, size);
 	vmap_debug_free_range(addr, addr+size);
 
-	if (likely(count <= VMAP_MAX_ALLOC))
+	if (likely(count <= VMAP_MAX_ALLOC)) {
 		vb_free(mem, size);
-	else
-		free_unmap_vmap_area_addr(addr);
+		return;
+	}
+
+	va = find_vmap_area(addr);
+	BUG_ON(!va);
+	free_unmap_vmap_area(va);
 }
 EXPORT_SYMBOL(vm_unmap_ram);
 
@@ -1554,6 +1537,8 @@
 {
 	struct vmap_area *va;
 
+	might_sleep();
+
 	va = find_vmap_area((unsigned long)addr);
 	if (va && va->flags & VM_VM_AREA) {
 		struct vm_struct *vm = va->vm;
@@ -1609,7 +1594,39 @@
 	kfree(area);
 	return;
 }
- 
+
+static inline void __vfree_deferred(const void *addr)
+{
+	/*
+	 * Use raw_cpu_ptr() because this can be called from preemptible
+	 * context. Preemption is absolutely fine here, because the llist_add()
+	 * implementation is lockless, so it works even if we are adding to
+	 * nother cpu's list.  schedule_work() should be fine with this too.
+	 */
+	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
+
+	if (llist_add((struct llist_node *)addr, &p->list))
+		schedule_work(&p->wq);
+}
+
+/**
+ *	vfree_atomic  -  release memory allocated by vmalloc()
+ *	@addr:		memory base address
+ *
+ *	This one is just like vfree() but can be called in any atomic context
+ *	except NMIs.
+ */
+void vfree_atomic(const void *addr)
+{
+	BUG_ON(in_nmi());
+
+	kmemleak_free(addr);
+
+	if (!addr)
+		return;
+	__vfree_deferred(addr);
+}
+
 /**
  *	vfree  -  release memory allocated by vmalloc()
  *	@addr:		memory base address
@@ -1632,11 +1649,9 @@
 
 	if (!addr)
 		return;
-	if (unlikely(in_interrupt())) {
-		struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
-		if (llist_add((struct llist_node *)addr, &p->list))
-			schedule_work(&p->wq);
-	} else
+	if (unlikely(in_interrupt()))
+		__vfree_deferred(addr);
+	else
 		__vunmap(addr, 1);
 }
 EXPORT_SYMBOL(vfree);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 42098b4..513c37a 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1122,6 +1122,7 @@
 
 /* Walk all the zones in a node and print using a callback */
 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
+		bool nolock,
 		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
 {
 	struct zone *zone;
@@ -1132,9 +1133,11 @@
 		if (!populated_zone(zone))
 			continue;
 
-		spin_lock_irqsave(&zone->lock, flags);
+		if (!nolock)
+			spin_lock_irqsave(&zone->lock, flags);
 		print(m, pgdat, zone);
-		spin_unlock_irqrestore(&zone->lock, flags);
+		if (!nolock)
+			spin_unlock_irqrestore(&zone->lock, flags);
 	}
 }
 #endif
@@ -1157,7 +1160,7 @@
 static int frag_show(struct seq_file *m, void *arg)
 {
 	pg_data_t *pgdat = (pg_data_t *)arg;
-	walk_zones_in_node(m, pgdat, frag_show_print);
+	walk_zones_in_node(m, pgdat, false, frag_show_print);
 	return 0;
 }
 
@@ -1198,7 +1201,7 @@
 		seq_printf(m, "%6d ", order);
 	seq_putc(m, '\n');
 
-	walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
+	walk_zones_in_node(m, pgdat, false, pagetypeinfo_showfree_print);
 
 	return 0;
 }
@@ -1250,7 +1253,8 @@
 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
 		seq_printf(m, "%12s ", migratetype_names[mtype]);
 	seq_putc(m, '\n');
-	walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
+	walk_zones_in_node(m, pgdat, false,
+		pagetypeinfo_showblockcount_print);
 
 	return 0;
 }
@@ -1276,7 +1280,8 @@
 		seq_printf(m, "%12s ", migratetype_names[mtype]);
 	seq_putc(m, '\n');
 
-	walk_zones_in_node(m, pgdat, pagetypeinfo_showmixedcount_print);
+	walk_zones_in_node(m, pgdat, true,
+		pagetypeinfo_showmixedcount_print);
 #endif /* CONFIG_PAGE_OWNER */
 }
 
@@ -1433,7 +1438,7 @@
 static int zoneinfo_show(struct seq_file *m, void *arg)
 {
 	pg_data_t *pgdat = (pg_data_t *)arg;
-	walk_zones_in_node(m, pgdat, zoneinfo_show_print);
+	walk_zones_in_node(m, pgdat, false, zoneinfo_show_print);
 	return 0;
 }
 
@@ -1862,7 +1867,7 @@
 	if (!node_state(pgdat->node_id, N_MEMORY))
 		return 0;
 
-	walk_zones_in_node(m, pgdat, unusable_show_print);
+	walk_zones_in_node(m, pgdat, false, unusable_show_print);
 
 	return 0;
 }
@@ -1914,7 +1919,7 @@
 {
 	pg_data_t *pgdat = (pg_data_t *)arg;
 
-	walk_zones_in_node(m, pgdat, extfrag_show_print);
+	walk_zones_in_node(m, pgdat, false, extfrag_show_print);
 
 	return 0;
 }
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 43faf2a..658c900 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -23,6 +23,7 @@
 #include <linux/debugfs.h>
 #include <linux/scatterlist.h>
 #include <linux/crypto.h>
+#include <crypto/algapi.h>
 #include <crypto/b128ops.h>
 #include <crypto/hash.h>
 
@@ -506,7 +507,7 @@
 	if (err)
 		return false;
 
-	return !memcmp(bdaddr->b, hash, 3);
+	return !crypto_memneq(bdaddr->b, hash, 3);
 }
 
 int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa)
@@ -559,7 +560,7 @@
 			/* This is unlikely, but we need to check that
 			 * we didn't accidentially generate a debug key.
 			 */
-			if (memcmp(smp->local_sk, debug_sk, 32))
+			if (crypto_memneq(smp->local_sk, debug_sk, 32))
 				break;
 		}
 		smp->debug_key = false;
@@ -973,7 +974,7 @@
 	if (ret)
 		return SMP_UNSPECIFIED;
 
-	if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) {
+	if (crypto_memneq(smp->pcnf, confirm, sizeof(smp->pcnf))) {
 		BT_ERR("Pairing failed (confirmation values mismatch)");
 		return SMP_CONFIRM_FAILED;
 	}
@@ -1473,7 +1474,7 @@
 			   smp->rrnd, r, cfm))
 			return SMP_UNSPECIFIED;
 
-		if (memcmp(smp->pcnf, cfm, 16))
+		if (crypto_memneq(smp->pcnf, cfm, 16))
 			return SMP_CONFIRM_FAILED;
 
 		smp->passkey_round++;
@@ -1857,7 +1858,7 @@
 			/* This is unlikely, but we need to check that
 			 * we didn't accidentially generate a debug key.
 			 */
-			if (memcmp(smp->local_sk, debug_sk, 32))
+			if (crypto_memneq(smp->local_sk, debug_sk, 32))
 				break;
 		}
 	}
@@ -2122,7 +2123,7 @@
 		if (err)
 			return SMP_UNSPECIFIED;
 
-		if (memcmp(smp->pcnf, cfm, 16))
+		if (crypto_memneq(smp->pcnf, cfm, 16))
 			return SMP_CONFIRM_FAILED;
 	} else {
 		smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
@@ -2603,7 +2604,7 @@
 		if (err)
 			return SMP_UNSPECIFIED;
 
-		if (memcmp(cfm.confirm_val, smp->pcnf, 16))
+		if (crypto_memneq(cfm.confirm_val, smp->pcnf, 16))
 			return SMP_CONFIRM_FAILED;
 	}
 
@@ -2636,7 +2637,7 @@
 	else
 		hcon->pending_sec_level = BT_SECURITY_FIPS;
 
-	if (!memcmp(debug_pk, smp->remote_pk, 64))
+	if (!crypto_memneq(debug_pk, smp->remote_pk, 64))
 		set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags);
 
 	if (smp->method == DSP_PASSKEY) {
@@ -2735,7 +2736,7 @@
 	if (err)
 		return SMP_UNSPECIFIED;
 
-	if (memcmp(check->e, e, 16))
+	if (crypto_memneq(check->e, e, 16))
 		return SMP_DHKEY_CHECK_FAILED;
 
 	if (!hcon->out) {
@@ -3446,7 +3447,7 @@
 	if (err)
 		return err;
 
-	if (memcmp(res, exp, 3))
+	if (crypto_memneq(res, exp, 3))
 		return -EINVAL;
 
 	return 0;
@@ -3476,7 +3477,7 @@
 	if (err)
 		return err;
 
-	if (memcmp(res, exp, 16))
+	if (crypto_memneq(res, exp, 16))
 		return -EINVAL;
 
 	return 0;
@@ -3501,7 +3502,7 @@
 	if (err)
 		return err;
 
-	if (memcmp(res, exp, 16))
+	if (crypto_memneq(res, exp, 16))
 		return -EINVAL;
 
 	return 0;
@@ -3533,7 +3534,7 @@
 	if (err)
 		return err;
 
-	if (memcmp(res, exp, 16))
+	if (crypto_memneq(res, exp, 16))
 		return -EINVAL;
 
 	return 0;
@@ -3567,10 +3568,10 @@
 	if (err)
 		return err;
 
-	if (memcmp(mackey, exp_mackey, 16))
+	if (crypto_memneq(mackey, exp_mackey, 16))
 		return -EINVAL;
 
-	if (memcmp(ltk, exp_ltk, 16))
+	if (crypto_memneq(ltk, exp_ltk, 16))
 		return -EINVAL;
 
 	return 0;
@@ -3603,7 +3604,7 @@
 	if (err)
 		return err;
 
-	if (memcmp(res, exp, 16))
+	if (crypto_memneq(res, exp, 16))
 		return -EINVAL;
 
 	return 0;
@@ -3657,7 +3658,7 @@
 	if (err)
 		return err;
 
-	if (memcmp(res, exp, 16))
+	if (crypto_memneq(res, exp, 16))
 		return -EINVAL;
 
 	return 0;
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 7dbc80d..6406010e 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -323,7 +323,8 @@
 			__mdb_entry_to_br_ip(entry, &complete_info->ip);
 			mdb.obj.complete_priv = complete_info;
 			mdb.obj.complete = br_mdb_complete;
-			switchdev_port_obj_add(port_dev, &mdb.obj);
+			if (switchdev_port_obj_add(port_dev, &mdb.obj))
+				kfree(complete_info);
 		}
 	} else if (port_dev && type == RTM_DELMDB) {
 		switchdev_port_obj_del(port_dev, &mdb.obj);
diff --git a/net/core/dev.c b/net/core/dev.c
index 8c1860a..7e168d0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4648,6 +4648,12 @@
 }
 EXPORT_SYMBOL(gro_find_complete_by_type);
 
+static void napi_skb_free_stolen_head(struct sk_buff *skb)
+{
+	skb_dst_drop(skb);
+	kmem_cache_free(skbuff_head_cache, skb);
+}
+
 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
 {
 	switch (ret) {
@@ -4661,12 +4667,10 @@
 		break;
 
 	case GRO_MERGED_FREE:
-		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
-			skb_dst_drop(skb);
-			kmem_cache_free(skbuff_head_cache, skb);
-		} else {
+		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+			napi_skb_free_stolen_head(skb);
+		else
 			__kfree_skb(skb);
-		}
 		break;
 
 	case GRO_HELD:
@@ -4736,10 +4740,16 @@
 		break;
 
 	case GRO_DROP:
-	case GRO_MERGED_FREE:
 		napi_reuse_skb(napi, skb);
 		break;
 
+	case GRO_MERGED_FREE:
+		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+			napi_skb_free_stolen_head(skb);
+		else
+			napi_reuse_skb(napi, skb);
+		break;
+
 	case GRO_MERGED:
 		break;
 	}
@@ -7551,7 +7561,7 @@
 {
 #if BITS_PER_LONG == 64
 	BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
-	memcpy(stats64, netdev_stats, sizeof(*stats64));
+	memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
 	/* zero out counters that only exist in rtnl_link_stats64 */
 	memset((char *)stats64 + sizeof(*netdev_stats), 0,
 	       sizeof(*stats64) - sizeof(*netdev_stats));
@@ -7593,9 +7603,9 @@
 	} else {
 		netdev_stats_to_stats64(storage, &dev->stats);
 	}
-	storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
-	storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
-	storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler);
+	storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
+	storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
+	storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
 	return storage;
 }
 EXPORT_SYMBOL(dev_get_stats);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f45f619..227c249 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -687,7 +687,7 @@
 	NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
 
 	if (!neigh->dead) {
-		pr_warn("Destroying alive neighbour %p\n", neigh);
+		pr_warn("Destroying alive neighbour %pK\n", neigh);
 		dump_stack();
 		return;
 	}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c27382f..7c90130 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2309,6 +2309,8 @@
 	tcp_init_send_head(sk);
 	memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
 	__sk_dst_reset(sk);
+	dst_release(sk->sk_rx_dst);
+	sk->sk_rx_dst = NULL;
 	tcp_saved_syn_free(tp);
 
 	WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f24b9f4..fe5305a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1879,15 +1879,7 @@
 	if (dad_failed)
 		ifp->flags |= IFA_F_DADFAILED;
 
-	if (ifp->flags&IFA_F_PERMANENT) {
-		spin_lock_bh(&ifp->lock);
-		addrconf_del_dad_work(ifp);
-		ifp->flags |= IFA_F_TENTATIVE;
-		spin_unlock_bh(&ifp->lock);
-		if (dad_failed)
-			ipv6_ifa_notify(0, ifp);
-		in6_ifa_put(ifp);
-	} else if (ifp->flags&IFA_F_TEMPORARY) {
+	if (ifp->flags&IFA_F_TEMPORARY) {
 		struct inet6_ifaddr *ifpub;
 		spin_lock_bh(&ifp->lock);
 		ifpub = ifp->ifpub;
@@ -1900,6 +1892,14 @@
 			spin_unlock_bh(&ifp->lock);
 		}
 		ipv6_del_addr(ifp);
+	} else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
+		spin_lock_bh(&ifp->lock);
+		addrconf_del_dad_work(ifp);
+		ifp->flags |= IFA_F_TENTATIVE;
+		spin_unlock_bh(&ifp->lock);
+		if (dad_failed)
+			ipv6_ifa_notify(0, ifp);
+		in6_ifa_put(ifp);
 	} else {
 		ipv6_del_addr(ifp);
 	}
@@ -3356,6 +3356,7 @@
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 	struct netdev_notifier_changeupper_info *info;
 	struct inet6_dev *idev = __in6_dev_get(dev);
+	struct net *net = dev_net(dev);
 	int run_pending = 0;
 	int err;
 
@@ -3371,7 +3372,7 @@
 	case NETDEV_CHANGEMTU:
 		/* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
 		if (dev->mtu < IPV6_MIN_MTU) {
-			addrconf_ifdown(dev, 1);
+			addrconf_ifdown(dev, dev != net->loopback_dev);
 			break;
 		}
 
@@ -3487,7 +3488,7 @@
 			 * IPV6_MIN_MTU stop IPv6 on this interface.
 			 */
 			if (dev->mtu < IPV6_MIN_MTU)
-				addrconf_ifdown(dev, 1);
+				addrconf_ifdown(dev, dev != net->loopback_dev);
 		}
 		break;
 
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 636d4d8..4345ee3 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -771,10 +771,7 @@
 				goto next_iter;
 			}
 
-			if (iter->dst.dev == rt->dst.dev &&
-			    iter->rt6i_idev == rt->rt6i_idev &&
-			    ipv6_addr_equal(&iter->rt6i_gateway,
-					    &rt->rt6i_gateway)) {
+			if (rt6_duplicate_nexthop(iter, rt)) {
 				if (rt->rt6i_nsiblings)
 					rt->rt6i_nsiblings = 0;
 				if (!(iter->rt6i_flags & RTF_EXPIRES))
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 0b21d61..d8123f6 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2922,17 +2922,11 @@
 				 struct rt6_info *rt, struct fib6_config *r_cfg)
 {
 	struct rt6_nh *nh;
-	struct rt6_info *rtnh;
 	int err = -EEXIST;
 
 	list_for_each_entry(nh, rt6_nh_list, next) {
 		/* check if rt6_info already exists */
-		rtnh = nh->rt6_info;
-
-		if (rtnh->dst.dev == rt->dst.dev &&
-		    rtnh->rt6i_idev == rt->rt6i_idev &&
-		    ipv6_addr_equal(&rtnh->rt6i_gateway,
-				    &rt->rt6i_gateway))
+		if (rt6_duplicate_nexthop(nh->rt6_info, rt))
 			return err;
 	}
 
diff --git a/net/key/af_key.c b/net/key/af_key.c
index e67c28e..d8d95b6 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -65,6 +65,10 @@
 	} dump;
 };
 
+static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
+			       xfrm_address_t *saddr, xfrm_address_t *daddr,
+			       u16 *family);
+
 static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
 {
 	return (struct pfkey_sock *)sk;
@@ -1922,19 +1926,14 @@
 
 	/* addresses present only in tunnel mode */
 	if (t->mode == XFRM_MODE_TUNNEL) {
-		u8 *sa = (u8 *) (rq + 1);
-		int family, socklen;
+		int err;
 
-		family = pfkey_sockaddr_extract((struct sockaddr *)sa,
-						&t->saddr);
-		if (!family)
-			return -EINVAL;
-
-		socklen = pfkey_sockaddr_len(family);
-		if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen),
-					   &t->id.daddr) != family)
-			return -EINVAL;
-		t->encap_family = family;
+		err = parse_sockaddr_pair(
+			(struct sockaddr *)(rq + 1),
+			rq->sadb_x_ipsecrequest_len - sizeof(*rq),
+			&t->saddr, &t->id.daddr, &t->encap_family);
+		if (err)
+			return err;
 	} else
 		t->encap_family = xp->family;
 
@@ -1954,7 +1953,11 @@
 	if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy))
 		return -EINVAL;
 
-	while (len >= sizeof(struct sadb_x_ipsecrequest)) {
+	while (len >= sizeof(*rq)) {
+		if (len < rq->sadb_x_ipsecrequest_len ||
+		    rq->sadb_x_ipsecrequest_len < sizeof(*rq))
+			return -EINVAL;
+
 		if ((err = parse_ipsecrequest(xp, rq)) < 0)
 			return err;
 		len -= rq->sadb_x_ipsecrequest_len;
@@ -2417,7 +2420,6 @@
 	return err;
 }
 
-#ifdef CONFIG_NET_KEY_MIGRATE
 static int pfkey_sockaddr_pair_size(sa_family_t family)
 {
 	return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2);
@@ -2429,7 +2431,7 @@
 {
 	int af, socklen;
 
-	if (ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
+	if (ext_len < 2 || ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
 		return -EINVAL;
 
 	af = pfkey_sockaddr_extract(sa, saddr);
@@ -2445,6 +2447,7 @@
 	return 0;
 }
 
+#ifdef CONFIG_NET_KEY_MIGRATE
 static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
 				    struct xfrm_migrate *m)
 {
@@ -2452,13 +2455,14 @@
 	struct sadb_x_ipsecrequest *rq2;
 	int mode;
 
-	if (len <= sizeof(struct sadb_x_ipsecrequest) ||
-	    len < rq1->sadb_x_ipsecrequest_len)
+	if (len < sizeof(*rq1) ||
+	    len < rq1->sadb_x_ipsecrequest_len ||
+	    rq1->sadb_x_ipsecrequest_len < sizeof(*rq1))
 		return -EINVAL;
 
 	/* old endoints */
 	err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1),
-				  rq1->sadb_x_ipsecrequest_len,
+				  rq1->sadb_x_ipsecrequest_len - sizeof(*rq1),
 				  &m->old_saddr, &m->old_daddr,
 				  &m->old_family);
 	if (err)
@@ -2467,13 +2471,14 @@
 	rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len);
 	len -= rq1->sadb_x_ipsecrequest_len;
 
-	if (len <= sizeof(struct sadb_x_ipsecrequest) ||
-	    len < rq2->sadb_x_ipsecrequest_len)
+	if (len <= sizeof(*rq2) ||
+	    len < rq2->sadb_x_ipsecrequest_len ||
+	    rq2->sadb_x_ipsecrequest_len < sizeof(*rq2))
 		return -EINVAL;
 
 	/* new endpoints */
 	err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1),
-				  rq2->sadb_x_ipsecrequest_len,
+				  rq2->sadb_x_ipsecrequest_len - sizeof(*rq2),
 				  &m->new_saddr, &m->new_daddr,
 				  &m->new_family);
 	if (err)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 1118c61..b2c706c 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -4486,20 +4486,20 @@
 		return -EOPNOTSUPP;
 	}
 
-	auth_data = kzalloc(sizeof(*auth_data) + req->sae_data_len +
+	auth_data = kzalloc(sizeof(*auth_data) + req->auth_data_len +
 			    req->ie_len, GFP_KERNEL);
 	if (!auth_data)
 		return -ENOMEM;
 
 	auth_data->bss = req->bss;
 
-	if (req->sae_data_len >= 4) {
-		__le16 *pos = (__le16 *) req->sae_data;
+	if (req->auth_data_len >= 4) {
+		__le16 *pos = (__le16 *)req->auth_data;
 		auth_data->sae_trans = le16_to_cpu(pos[0]);
 		auth_data->sae_status = le16_to_cpu(pos[1]);
-		memcpy(auth_data->data, req->sae_data + 4,
-		       req->sae_data_len - 4);
-		auth_data->data_len += req->sae_data_len - 4;
+		memcpy(auth_data->data, req->auth_data + 4,
+		       req->auth_data_len - 4);
+		auth_data->data_len += req->auth_data_len - 4;
 	}
 
 	if (req->ie && req->ie_len) {
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 2c1b498..e34d3f6 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -849,10 +849,8 @@
 {
 	unsigned int verdict = NF_DROP;
 
-	if (IP_VS_FWD_METHOD(cp) != 0) {
-		pr_err("shouldn't reach here, because the box is on the "
-		       "half connection in the tun/dr module.\n");
-	}
+	if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
+		goto ignore_cp;
 
 	/* Ensure the checksum is correct */
 	if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
@@ -886,6 +884,8 @@
 		ip_vs_notrack(skb);
 	else
 		ip_vs_update_conntrack(skb, cp, 0);
+
+ignore_cp:
 	verdict = NF_ACCEPT;
 
 out:
@@ -1385,8 +1385,11 @@
 	 */
 	cp = pp->conn_out_get(ipvs, af, skb, &iph);
 
-	if (likely(cp))
+	if (likely(cp)) {
+		if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
+			goto ignore_cp;
 		return handle_response(af, skb, pd, cp, &iph, hooknum);
+	}
 
 	/* Check for real-server-started requests */
 	if (atomic_read(&ipvs->conn_out_counter)) {
@@ -1444,9 +1447,15 @@
 			}
 		}
 	}
+
+out:
 	IP_VS_DBG_PKT(12, af, pp, skb, iph.off,
 		      "ip_vs_out: packet continues traversal as normal");
 	return NF_ACCEPT;
+
+ignore_cp:
+	__ip_vs_conn_put(cp);
+	goto out;
 }
 
 /*
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 122bb81..5cf33df 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -982,6 +982,8 @@
 			kfree(se);
 	}
 
+	ida_simple_remove(&nfc_index_ida, dev->idx);
+
 	kfree(dev);
 }
 
@@ -1056,6 +1058,7 @@
 				    int tx_headroom, int tx_tailroom)
 {
 	struct nfc_dev *dev;
+	int rc;
 
 	if (!ops->start_poll || !ops->stop_poll || !ops->activate_target ||
 	    !ops->deactivate_target || !ops->im_transceive)
@@ -1068,6 +1071,15 @@
 	if (!dev)
 		return NULL;
 
+	rc = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL);
+	if (rc < 0)
+		goto err_free_dev;
+	dev->idx = rc;
+
+	dev->dev.class = &nfc_class;
+	dev_set_name(&dev->dev, "nfc%d", dev->idx);
+	device_initialize(&dev->dev);
+
 	dev->ops = ops;
 	dev->supported_protocols = supported_protocols;
 	dev->tx_headroom = tx_headroom;
@@ -1090,6 +1102,11 @@
 	}
 
 	return dev;
+
+err_free_dev:
+	kfree(dev);
+
+	return ERR_PTR(rc);
 }
 EXPORT_SYMBOL(nfc_allocate_device);
 
@@ -1104,14 +1121,6 @@
 
 	pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
-	dev->idx = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL);
-	if (dev->idx < 0)
-		return dev->idx;
-
-	dev->dev.class = &nfc_class;
-	dev_set_name(&dev->dev, "nfc%d", dev->idx);
-	device_initialize(&dev->dev);
-
 	mutex_lock(&nfc_devlist_mutex);
 	nfc_devlist_generation++;
 	rc = device_add(&dev->dev);
@@ -1149,12 +1158,10 @@
  */
 void nfc_unregister_device(struct nfc_dev *dev)
 {
-	int rc, id;
+	int rc;
 
 	pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
-	id = dev->idx;
-
 	if (dev->rfkill) {
 		rfkill_unregister(dev->rfkill);
 		rfkill_destroy(dev->rfkill);
@@ -1179,8 +1186,6 @@
 	nfc_devlist_generation++;
 	device_del(&dev->dev);
 	mutex_unlock(&nfc_devlist_mutex);
-
-	ida_simple_remove(&nfc_index_ida, id);
 }
 EXPORT_SYMBOL(nfc_unregister_device);
 
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index b9edf5f..e31dea1 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -76,7 +76,8 @@
 	struct sockaddr_nfc_llcp llcp_addr;
 	int len, ret = 0;
 
-	if (!addr || addr->sa_family != AF_NFC)
+	if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
+	    addr->sa_family != AF_NFC)
 		return -EINVAL;
 
 	pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
@@ -150,7 +151,8 @@
 	struct sockaddr_nfc_llcp llcp_addr;
 	int len, ret = 0;
 
-	if (!addr || addr->sa_family != AF_NFC)
+	if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
+	    addr->sa_family != AF_NFC)
 		return -EINVAL;
 
 	pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
@@ -661,8 +663,7 @@
 
 	pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags);
 
-	if (!addr || len < sizeof(struct sockaddr_nfc) ||
-	    addr->sa_family != AF_NFC)
+	if (!addr || len < sizeof(*addr) || addr->sa_family != AF_NFC)
 		return -EINVAL;
 
 	if (addr->service_name_len == 0 && addr->dsap == 0)
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 61fff42..85a3d9e 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -1173,8 +1173,7 @@
 	return ndev;
 
 free_nfc:
-	kfree(ndev->nfc_dev);
-
+	nfc_free_device(ndev->nfc_dev);
 free_nci:
 	kfree(ndev);
 	return NULL;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index ea023b3..102c681 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -910,7 +910,9 @@
 	u32 device_idx, target_idx, protocol;
 	int rc;
 
-	if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+	if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+	    !info->attrs[NFC_ATTR_TARGET_INDEX] ||
+	    !info->attrs[NFC_ATTR_PROTOCOLS])
 		return -EINVAL;
 
 	device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index e0b23fb..525b624 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -129,7 +129,7 @@
 	if (!sock) /* module unload or netns delete in progress */
 		return -ENETUNREACH;
 
-	ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
+	ret = sock_create_lite(sock->sk->sk_family,
 			       sock->sk->sk_type, sock->sk->sk_protocol,
 			       &new_sock);
 	if (ret)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index c2225cc..daf6624 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1008,6 +1008,9 @@
 
 		return sch;
 	}
+	/* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
+	if (ops->destroy)
+		ops->destroy(sch);
 err_out3:
 	dev_put(dev);
 	kfree((char *) sch - sch->padded);
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index e3d0458..2fae8b5 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -627,7 +627,9 @@
 			q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN *
 						      sizeof(u32));
 			if (!q->hhf_arrays[i]) {
-				hhf_destroy(sch);
+				/* Note: hhf_destroy() will be called
+				 * by our caller.
+				 */
 				return -ENOMEM;
 			}
 		}
@@ -638,7 +640,9 @@
 			q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN /
 							  BITS_PER_BYTE);
 			if (!q->hhf_valid_bits[i]) {
-				hhf_destroy(sch);
+				/* Note: hhf_destroy() will be called
+				 * by our caller.
+				 */
 				return -ENOMEM;
 			}
 		}
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 2bc8d7f..20b7f16 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -52,7 +52,7 @@
 	/* pre-allocate qdiscs, attachment can't fail */
 	priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
 			       GFP_KERNEL);
-	if (priv->qdiscs == NULL)
+	if (!priv->qdiscs)
 		return -ENOMEM;
 
 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
@@ -60,18 +60,14 @@
 		qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
 						    TC_H_MIN(ntx + 1)));
-		if (qdisc == NULL)
-			goto err;
+		if (!qdisc)
+			return -ENOMEM;
 		priv->qdiscs[ntx] = qdisc;
 		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
 	}
 
 	sch->flags |= TCQ_F_MQROOT;
 	return 0;
-
-err:
-	mq_destroy(sch);
-	return -ENOMEM;
 }
 
 static void mq_attach(struct Qdisc *sch)
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index b5c502c..9226834 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -118,10 +118,8 @@
 	/* pre-allocate qdisc, attachment can't fail */
 	priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
 			       GFP_KERNEL);
-	if (priv->qdiscs == NULL) {
-		err = -ENOMEM;
-		goto err;
-	}
+	if (!priv->qdiscs)
+		return -ENOMEM;
 
 	for (i = 0; i < dev->num_tx_queues; i++) {
 		dev_queue = netdev_get_tx_queue(dev, i);
@@ -129,10 +127,9 @@
 					  get_default_qdisc_ops(dev, i),
 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
 						    TC_H_MIN(i + 1)));
-		if (qdisc == NULL) {
-			err = -ENOMEM;
-			goto err;
-		}
+		if (!qdisc)
+			return -ENOMEM;
+
 		priv->qdiscs[i] = qdisc;
 		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
 	}
@@ -148,7 +145,7 @@
 		priv->hw_owned = 1;
 		err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
 		if (err)
-			goto err;
+			return err;
 	} else {
 		netdev_set_num_tc(dev, qopt->num_tc);
 		for (i = 0; i < qopt->num_tc; i++)
@@ -162,10 +159,6 @@
 
 	sch->flags |= TCQ_F_MQROOT;
 	return 0;
-
-err:
-	mqprio_destroy(sch);
-	return err;
 }
 
 static void mqprio_attach(struct Qdisc *sch)
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 7f195ed..bc5e995 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -742,9 +742,10 @@
 	q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
 	q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
 	if (!q->ht || !q->slots) {
-		sfq_destroy(sch);
+		/* Note: sfq_destroy() will be called by our caller */
 		return -ENOMEM;
 	}
+
 	for (i = 0; i < q->divisor; i++)
 		q->ht[i] = SFQ_EMPTY_SLOT;
 
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 90115ce..79aec90 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -34,6 +34,7 @@
  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  */
 
+#include <crypto/algapi.h>
 #include <crypto/hash.h>
 #include <crypto/skcipher.h>
 #include <linux/err.h>
@@ -927,7 +928,7 @@
 	if (ret)
 		goto out_err;
 
-	if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
+	if (crypto_memneq(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
 		ret = GSS_S_BAD_SIG;
 		goto out_err;
 	}
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 80890c0..cf7063a 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -220,15 +220,7 @@
 	enum cfg80211_event_type type;
 
 	union {
-		struct {
-			u8 bssid[ETH_ALEN];
-			const u8 *req_ie;
-			const u8 *resp_ie;
-			size_t req_ie_len;
-			size_t resp_ie_len;
-			struct cfg80211_bss *bss;
-			int status; /* -1 = failed; 0..65535 = status code */
-		} cr;
+		struct cfg80211_connect_resp_params cr;
 		struct {
 			const u8 *req_ie;
 			const u8 *resp_ie;
@@ -346,7 +338,7 @@
 		       const u8 *ssid, int ssid_len,
 		       const u8 *ie, int ie_len,
 		       const u8 *key, int key_len, int key_idx,
-		       const u8 *sae_data, int sae_data_len);
+		       const u8 *auth_data, int auth_data_len);
 int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
 			struct net_device *dev,
 			struct ieee80211_channel *chan,
@@ -384,11 +376,9 @@
 		     struct cfg80211_connect_params *connect,
 		     struct cfg80211_cached_keys *connkeys,
 		     const u8 *prev_bssid);
-void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
-			       const u8 *req_ie, size_t req_ie_len,
-			       const u8 *resp_ie, size_t resp_ie_len,
-			       int status, bool wextev,
-			       struct cfg80211_bss *bss);
+void __cfg80211_connect_result(struct net_device *dev,
+			       struct cfg80211_connect_resp_params *params,
+			       bool wextev);
 void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
 			     size_t ie_len, u16 reason, bool from_ap);
 int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 76775a2..5499e9f 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -26,9 +26,16 @@
 	struct wiphy *wiphy = wdev->wiphy;
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
-	u8 *ie = mgmt->u.assoc_resp.variable;
-	int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
-	u16 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
+	struct cfg80211_connect_resp_params cr;
+
+	memset(&cr, 0, sizeof(cr));
+	cr.status = (int)le16_to_cpu(mgmt->u.assoc_resp.status_code);
+	cr.bssid = mgmt->bssid;
+	cr.bss = bss;
+	cr.resp_ie = mgmt->u.assoc_resp.variable;
+	cr.resp_ie_len =
+		len - offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
+	cr.timeout_reason = NL80211_TIMEOUT_UNSPECIFIED;
 
 	trace_cfg80211_send_rx_assoc(dev, bss);
 
@@ -38,7 +45,7 @@
 	 * and got a reject -- we only try again with an assoc
 	 * frame instead of reassoc.
 	 */
-	if (cfg80211_sme_rx_assoc_resp(wdev, status_code)) {
+	if (cfg80211_sme_rx_assoc_resp(wdev, cr.status)) {
 		cfg80211_unhold_bss(bss_from_pub(bss));
 		cfg80211_put_bss(wiphy, bss);
 		return;
@@ -46,9 +53,7 @@
 
 	nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL, uapsd_queues);
 	/* update current_bss etc., consumes the bss reference */
-	__cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs,
-				  status_code,
-				  status_code == WLAN_STATUS_SUCCESS, bss);
+	__cfg80211_connect_result(dev, &cr, cr.status == WLAN_STATUS_SUCCESS);
 }
 EXPORT_SYMBOL(cfg80211_rx_assoc_resp);
 
@@ -216,14 +221,14 @@
 		       const u8 *ssid, int ssid_len,
 		       const u8 *ie, int ie_len,
 		       const u8 *key, int key_len, int key_idx,
-		       const u8 *sae_data, int sae_data_len)
+		       const u8 *auth_data, int auth_data_len)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_auth_request req = {
 		.ie = ie,
 		.ie_len = ie_len,
-		.sae_data = sae_data,
-		.sae_data_len = sae_data_len,
+		.auth_data = auth_data,
+		.auth_data_len = auth_data_len,
 		.auth_type = auth_type,
 		.key = key,
 		.key_len = key_len,
@@ -657,8 +662,25 @@
 			return err;
 	}
 
-	if (!ether_addr_equal(mgmt->sa, wdev_address(wdev)))
-		return -EINVAL;
+	if (!ether_addr_equal(mgmt->sa, wdev_address(wdev))) {
+		/* Allow random TA to be used with Public Action frames if the
+		 * driver has indicated support for this. Otherwise, only allow
+		 * the local address to be used.
+		 */
+		if (!ieee80211_is_action(mgmt->frame_control) ||
+		    mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
+			return -EINVAL;
+		if (!wdev->current_bss &&
+		    !wiphy_ext_feature_isset(
+			    &rdev->wiphy,
+			    NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA))
+			return -EINVAL;
+		if (wdev->current_bss &&
+		    !wiphy_ext_feature_isset(
+			    &rdev->wiphy,
+			    NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED))
+			return -EINVAL;
+	}
 
 	/* Transmit the Action frame as requested by user space */
 	return rdev_mgmt_tx(rdev, wdev, params, cookie);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4ba0d590..01324d2 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -305,8 +305,7 @@
 	[NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
 	[NL80211_ATTR_PID] = { .type = NLA_U32 },
 	[NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
-	[NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
-				 .len = WLAN_PMKID_LEN },
+	[NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN },
 	[NL80211_ATTR_DURATION] = { .type = NLA_U32 },
 	[NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
 	[NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
@@ -357,11 +356,12 @@
 	[NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 },
 	[NL80211_ATTR_WDEV] = { .type = NLA_U64 },
 	[NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 },
-	[NL80211_ATTR_SAE_DATA] = { .type = NLA_BINARY, },
+	[NL80211_ATTR_AUTH_DATA] = { .type = NLA_BINARY, },
 	[NL80211_ATTR_VHT_CAPABILITY] = { .len = NL80211_VHT_CAPABILITY_LEN },
 	[NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
 	[NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
 	[NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 },
+	[NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 },
 	[NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 },
 	[NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
 	[NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
@@ -414,7 +414,25 @@
 	[NL80211_ATTR_NAN_MASTER_PREF] = { .type = NLA_U8 },
 	[NL80211_ATTR_NAN_DUAL] = { .type = NLA_U8 },
 	[NL80211_ATTR_NAN_FUNC] = { .type = NLA_NESTED },
+	[NL80211_ATTR_FILS_KEK] = { .type = NLA_BINARY,
+				    .len = FILS_MAX_KEK_LEN },
+	[NL80211_ATTR_FILS_NONCES] = { .len = 2 * FILS_NONCE_LEN },
+	[NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED] = { .type = NLA_FLAG, },
 	[NL80211_ATTR_BSSID] = { .len = ETH_ALEN },
+	[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] = { .type = NLA_S8 },
+	[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST] = {
+		.len = sizeof(struct nl80211_bss_select_rssi_adjust)
+	},
+	[NL80211_ATTR_TIMEOUT_REASON] = { .type = NLA_U32 },
+	[NL80211_ATTR_FILS_ERP_USERNAME] = { .type = NLA_BINARY,
+					     .len = FILS_ERP_MAX_USERNAME_LEN },
+	[NL80211_ATTR_FILS_ERP_REALM] = { .type = NLA_BINARY,
+					  .len = FILS_ERP_MAX_REALM_LEN },
+	[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] = { .type = NLA_U16 },
+	[NL80211_ATTR_FILS_ERP_RRK] = { .type = NLA_BINARY,
+					.len = FILS_ERP_MAX_RRK_LEN },
+	[NL80211_ATTR_FILS_CACHE_ID] = { .len = 2 },
+	[NL80211_ATTR_PMK] = { .type = NLA_BINARY, .len = PMK_MAX_LEN },
 };
 
 /* policy for the key attributes */
@@ -512,7 +530,7 @@
 static const struct nla_policy
 nl80211_nan_func_policy[NL80211_NAN_FUNC_ATTR_MAX + 1] = {
 	[NL80211_NAN_FUNC_TYPE] = { .type = NLA_U8 },
-	[NL80211_NAN_FUNC_SERVICE_ID] = { .type = NLA_BINARY,
+	[NL80211_NAN_FUNC_SERVICE_ID] = {
 				    .len = NL80211_NAN_FUNC_SERVICE_ID_LEN },
 	[NL80211_NAN_FUNC_PUBLISH_TYPE] = { .type = NLA_U8 },
 	[NL80211_NAN_FUNC_PUBLISH_BCAST] = { .type = NLA_FLAG },
@@ -1609,6 +1627,8 @@
 			if (rdev->wiphy.features &
 					NL80211_FEATURE_SUPPORTS_WMM_ADMISSION)
 				CMD(add_tx_ts, ADD_TX_TS);
+			CMD(set_multicast_to_unicast, SET_MULTICAST_TO_UNICAST);
+			CMD(update_connect_params, UPDATE_CONNECT_PARAMS);
 		}
 		/* add into the if now */
 #undef CMD
@@ -3747,12 +3767,36 @@
 		if (!(rdev->wiphy.features & NL80211_FEATURE_SAE) &&
 		    auth_type == NL80211_AUTHTYPE_SAE)
 			return false;
+		if (!wiphy_ext_feature_isset(&rdev->wiphy,
+					     NL80211_EXT_FEATURE_FILS_STA) &&
+		    (auth_type == NL80211_AUTHTYPE_FILS_SK ||
+		     auth_type == NL80211_AUTHTYPE_FILS_SK_PFS ||
+		     auth_type == NL80211_AUTHTYPE_FILS_PK))
+			return false;
 		return true;
 	case NL80211_CMD_CONNECT:
+		/* SAE not supported yet */
+		if (auth_type == NL80211_AUTHTYPE_SAE)
+			return false;
+		/* FILS with SK PFS or PK not supported yet */
+		if (auth_type == NL80211_AUTHTYPE_FILS_SK_PFS ||
+		    auth_type == NL80211_AUTHTYPE_FILS_PK)
+			return false;
+		if (!wiphy_ext_feature_isset(
+			    &rdev->wiphy,
+			    NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) &&
+		    auth_type == NL80211_AUTHTYPE_FILS_SK)
+			return false;
+		return true;
 	case NL80211_CMD_START_AP:
 		/* SAE not supported yet */
 		if (auth_type == NL80211_AUTHTYPE_SAE)
 			return false;
+		/* FILS not supported yet */
+		if (auth_type == NL80211_AUTHTYPE_FILS_SK ||
+		    auth_type == NL80211_AUTHTYPE_FILS_SK_PFS ||
+		    auth_type == NL80211_AUTHTYPE_FILS_PK)
+			return false;
 		return true;
 	default:
 		return false;
@@ -6331,6 +6375,10 @@
 	struct nlattr *attr1, *attr2;
 	int n_channels = 0, tmp1, tmp2;
 
+	nla_for_each_nested(attr1, freqs, tmp1)
+		if (nla_len(attr1) != sizeof(u32))
+			return 0;
+
 	nla_for_each_nested(attr1, freqs, tmp1) {
 		n_channels++;
 		/*
@@ -6923,6 +6971,12 @@
 	if (!n_plans || n_plans > wiphy->max_sched_scan_plans)
 		return ERR_PTR(-EINVAL);
 
+	if (!wiphy_ext_feature_isset(
+		    wiphy, NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI) &&
+	    (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] ||
+	     attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]))
+		return ERR_PTR(-EINVAL);
+
 	request = kzalloc(sizeof(*request)
 			+ sizeof(*request->ssids) * n_ssids
 			+ sizeof(*request->match_sets) * n_match_sets
@@ -7129,6 +7183,26 @@
 		request->delay =
 			nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_DELAY]);
 
+	if (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]) {
+		request->relative_rssi = nla_get_s8(
+			attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]);
+		request->relative_rssi_set = true;
+	}
+
+	if (request->relative_rssi_set &&
+	    attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]) {
+		struct nl80211_bss_select_rssi_adjust *rssi_adjust;
+
+		rssi_adjust = nla_data(
+			attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]);
+		request->rssi_adjust.band = rssi_adjust->band;
+		request->rssi_adjust.delta = rssi_adjust->delta;
+		if (!is_band_valid(wiphy, request->rssi_adjust.band)) {
+			err = -EINVAL;
+			goto out_free;
+		}
+	}
+
 	err = nl80211_parse_sched_scan_plans(wiphy, n_plans, request, attrs);
 	if (err)
 		goto out_free;
@@ -7724,8 +7798,8 @@
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	struct net_device *dev = info->user_ptr[1];
 	struct ieee80211_channel *chan;
-	const u8 *bssid, *ssid, *ie = NULL, *sae_data = NULL;
-	int err, ssid_len, ie_len = 0, sae_data_len = 0;
+	const u8 *bssid, *ssid, *ie = NULL, *auth_data = NULL;
+	int err, ssid_len, ie_len = 0, auth_data_len = 0;
 	enum nl80211_auth_type auth_type;
 	struct key_parse key;
 	bool local_state_change;
@@ -7805,17 +7879,23 @@
 	if (!nl80211_valid_auth_type(rdev, auth_type, NL80211_CMD_AUTHENTICATE))
 		return -EINVAL;
 
-	if (auth_type == NL80211_AUTHTYPE_SAE &&
-	    !info->attrs[NL80211_ATTR_SAE_DATA])
+	if ((auth_type == NL80211_AUTHTYPE_SAE ||
+	     auth_type == NL80211_AUTHTYPE_FILS_SK ||
+	     auth_type == NL80211_AUTHTYPE_FILS_SK_PFS ||
+	     auth_type == NL80211_AUTHTYPE_FILS_PK) &&
+	    !info->attrs[NL80211_ATTR_AUTH_DATA])
 		return -EINVAL;
 
-	if (info->attrs[NL80211_ATTR_SAE_DATA]) {
-		if (auth_type != NL80211_AUTHTYPE_SAE)
+	if (info->attrs[NL80211_ATTR_AUTH_DATA]) {
+		if (auth_type != NL80211_AUTHTYPE_SAE &&
+		    auth_type != NL80211_AUTHTYPE_FILS_SK &&
+		    auth_type != NL80211_AUTHTYPE_FILS_SK_PFS &&
+		    auth_type != NL80211_AUTHTYPE_FILS_PK)
 			return -EINVAL;
-		sae_data = nla_data(info->attrs[NL80211_ATTR_SAE_DATA]);
-		sae_data_len = nla_len(info->attrs[NL80211_ATTR_SAE_DATA]);
+		auth_data = nla_data(info->attrs[NL80211_ATTR_AUTH_DATA]);
+		auth_data_len = nla_len(info->attrs[NL80211_ATTR_AUTH_DATA]);
 		/* need to include at least Auth Transaction and Status Code */
-		if (sae_data_len < 4)
+		if (auth_data_len < 4)
 			return -EINVAL;
 	}
 
@@ -7832,7 +7912,7 @@
 	err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
 				 ssid, ssid_len, ie, ie_len,
 				 key.p.key, key.p.key_len, key.idx,
-				 sae_data, sae_data_len);
+				 auth_data, auth_data_len);
 	wdev_unlock(dev->ieee80211_ptr);
 	return err;
 }
@@ -8011,6 +8091,15 @@
 		req.flags |= ASSOC_REQ_USE_RRM;
 	}
 
+	if (info->attrs[NL80211_ATTR_FILS_KEK]) {
+		req.fils_kek = nla_data(info->attrs[NL80211_ATTR_FILS_KEK]);
+		req.fils_kek_len = nla_len(info->attrs[NL80211_ATTR_FILS_KEK]);
+		if (!info->attrs[NL80211_ATTR_FILS_NONCES])
+			return -EINVAL;
+		req.fils_nonces =
+			nla_data(info->attrs[NL80211_ATTR_FILS_NONCES]);
+	}
+
 	err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
 	if (!err) {
 		wdev_lock(dev->ieee80211_ptr);
@@ -8734,6 +8823,35 @@
 		}
 	}
 
+	if (wiphy_ext_feature_isset(&rdev->wiphy,
+				    NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) &&
+	    info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] &&
+	    info->attrs[NL80211_ATTR_FILS_ERP_REALM] &&
+	    info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] &&
+	    info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
+		connect.fils_erp_username =
+			nla_data(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
+		connect.fils_erp_username_len =
+			nla_len(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
+		connect.fils_erp_realm =
+			nla_data(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
+		connect.fils_erp_realm_len =
+			nla_len(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
+		connect.fils_erp_next_seq_num =
+			nla_get_u16(
+			   info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM]);
+		connect.fils_erp_rrk =
+			nla_data(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
+		connect.fils_erp_rrk_len =
+			nla_len(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
+	} else if (info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] ||
+		   info->attrs[NL80211_ATTR_FILS_ERP_REALM] ||
+		   info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] ||
+		   info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
+		kzfree(connkeys);
+		return -EINVAL;
+	}
+
 	wdev_lock(dev->ieee80211_ptr);
 	err = cfg80211_connect(rdev, dev, &connect, connkeys,
 			       connect.prev_bssid);
@@ -8743,6 +8861,37 @@
 	return err;
 }
 
+static int nl80211_update_connect_params(struct sk_buff *skb,
+					 struct genl_info *info)
+{
+	struct cfg80211_connect_params connect = {};
+	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	u32 changed = 0;
+	int ret;
+
+	if (!rdev->ops->update_connect_params)
+		return -EOPNOTSUPP;
+
+	if (info->attrs[NL80211_ATTR_IE]) {
+		if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
+			return -EINVAL;
+		connect.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
+		connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+		changed |= UPDATE_ASSOC_IES;
+	}
+
+	wdev_lock(dev->ieee80211_ptr);
+	if (!wdev->current_bss)
+		ret = -ENOLINK;
+	else
+		ret = rdev_update_connect_params(rdev, dev, &connect, changed);
+	wdev_unlock(dev->ieee80211_ptr);
+
+	return ret;
+}
+
 static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info)
 {
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -8809,14 +8958,28 @@
 
 	memset(&pmksa, 0, sizeof(struct cfg80211_pmksa));
 
-	if (!info->attrs[NL80211_ATTR_MAC])
-		return -EINVAL;
-
 	if (!info->attrs[NL80211_ATTR_PMKID])
 		return -EINVAL;
 
 	pmksa.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]);
-	pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+	if (info->attrs[NL80211_ATTR_MAC]) {
+		pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
+	} else if (info->attrs[NL80211_ATTR_SSID] &&
+		   info->attrs[NL80211_ATTR_FILS_CACHE_ID] &&
+		   (info->genlhdr->cmd == NL80211_CMD_DEL_PMKSA ||
+		    info->attrs[NL80211_ATTR_PMK])) {
+		pmksa.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
+		pmksa.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
+		pmksa.cache_id =
+			nla_data(info->attrs[NL80211_ATTR_FILS_CACHE_ID]);
+	} else {
+		return -EINVAL;
+	}
+	if (info->attrs[NL80211_ATTR_PMK]) {
+		pmksa.pmk = nla_data(info->attrs[NL80211_ATTR_PMK]);
+		pmksa.pmk_len = nla_len(info->attrs[NL80211_ATTR_PMK]);
+	}
 
 	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
 	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT)
@@ -9604,6 +9767,20 @@
 	if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_DELAY, req->delay))
 		return -ENOBUFS;
 
+	if (req->relative_rssi_set) {
+		struct nl80211_bss_select_rssi_adjust rssi_adjust;
+
+		if (nla_put_s8(msg, NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI,
+			       req->relative_rssi))
+			return -ENOBUFS;
+
+		rssi_adjust.band = req->rssi_adjust.band;
+		rssi_adjust.delta = req->rssi_adjust.delta;
+		if (nla_put(msg, NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST,
+			    sizeof(rssi_adjust), &rssi_adjust))
+			return -ENOBUFS;
+	}
+
 	freqs = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES);
 	if (!freqs)
 		return -ENOBUFS;
@@ -11739,6 +11916,31 @@
 	return 0;
 }
 
+static int nl80211_set_multicast_to_unicast(struct sk_buff *skb,
+					    struct genl_info *info)
+{
+	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	const struct nlattr *nla;
+	bool enabled;
+
+	if (netif_running(dev))
+		return -EBUSY;
+
+	if (!rdev->ops->set_multicast_to_unicast)
+		return -EOPNOTSUPP;
+
+	if (wdev->iftype != NL80211_IFTYPE_AP &&
+	    wdev->iftype != NL80211_IFTYPE_P2P_GO)
+		return -EOPNOTSUPP;
+
+	nla = info->attrs[NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED];
+	enabled = nla_get_flag(nla);
+
+	return rdev_set_multicast_to_unicast(rdev, dev, enabled);
+}
+
 #define NL80211_FLAG_NEED_WIPHY		0x01
 #define NL80211_FLAG_NEED_NETDEV	0x02
 #define NL80211_FLAG_NEED_RTNL		0x04
@@ -12192,6 +12394,14 @@
 				  NL80211_FLAG_NEED_RTNL,
 	},
 	{
+		.cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS,
+		.doit = nl80211_update_connect_params,
+		.policy = nl80211_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+				  NL80211_FLAG_NEED_RTNL,
+	},
+	{
 		.cmd = NL80211_CMD_DISCONNECT,
 		.doit = nl80211_disconnect,
 		.policy = nl80211_policy,
@@ -12612,6 +12822,14 @@
 		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 				  NL80211_FLAG_NEED_RTNL,
 	},
+	{
+		.cmd = NL80211_CMD_SET_MULTICAST_TO_UNICAST,
+		.doit = nl80211_set_multicast_to_unicast,
+		.policy = nl80211_policy,
+		.flags = GENL_UNS_ADMIN_PERM,
+		.internal_flags = NL80211_FLAG_NEED_NETDEV |
+				  NL80211_FLAG_NEED_RTNL,
+	},
 };
 
 /* notification functions */
@@ -12941,7 +13159,7 @@
 	struct sk_buff *msg;
 	void *hdr;
 
-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+	msg = nlmsg_new(100 + len, gfp);
 	if (!msg)
 		return;
 
@@ -13085,15 +13303,16 @@
 }
 
 void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
-				 struct net_device *netdev, const u8 *bssid,
-				 const u8 *req_ie, size_t req_ie_len,
-				 const u8 *resp_ie, size_t resp_ie_len,
-				 int status, gfp_t gfp)
+				 struct net_device *netdev,
+				 struct cfg80211_connect_resp_params *cr,
+				 gfp_t gfp)
 {
 	struct sk_buff *msg;
 	void *hdr;
 
-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+	msg = nlmsg_new(100 + cr->req_ie_len + cr->resp_ie_len +
+			cr->fils_kek_len + cr->pmk_len +
+			(cr->pmkid ? WLAN_PMKID_LEN : 0), gfp);
 	if (!msg)
 		return;
 
@@ -13105,15 +13324,31 @@
 
 	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
 	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
-	    (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) ||
+	    (cr->bssid &&
+	     nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, cr->bssid)) ||
 	    nla_put_u16(msg, NL80211_ATTR_STATUS_CODE,
-			status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE :
-			status) ||
-	    (status < 0 && nla_put_flag(msg, NL80211_ATTR_TIMED_OUT)) ||
-	    (req_ie &&
-	     nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
-	    (resp_ie &&
-	     nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie)))
+			cr->status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE :
+			cr->status) ||
+	    (cr->status < 0 &&
+	     (nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) ||
+	      nla_put_u32(msg, NL80211_ATTR_TIMEOUT_REASON,
+			  cr->timeout_reason))) ||
+	    (cr->req_ie &&
+	     nla_put(msg, NL80211_ATTR_REQ_IE, cr->req_ie_len, cr->req_ie)) ||
+	    (cr->resp_ie &&
+	     nla_put(msg, NL80211_ATTR_RESP_IE, cr->resp_ie_len,
+		     cr->resp_ie)) ||
+	    (cr->update_erp_next_seq_num &&
+	     nla_put_u16(msg, NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM,
+			 cr->fils_erp_next_seq_num)) ||
+	    (cr->status == WLAN_STATUS_SUCCESS &&
+	     ((cr->fils_kek &&
+	       nla_put(msg, NL80211_ATTR_FILS_KEK, cr->fils_kek_len,
+		       cr->fils_kek)) ||
+	      (cr->pmk &&
+	       nla_put(msg, NL80211_ATTR_PMK, cr->pmk_len, cr->pmk)) ||
+	      (cr->pmkid &&
+	       nla_put(msg, NL80211_ATTR_PMKID, WLAN_PMKID_LEN, cr->pmkid)))))
 		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
@@ -13135,7 +13370,7 @@
 	struct sk_buff *msg;
 	void *hdr;
 
-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+	msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp);
 	if (!msg)
 		return;
 
@@ -13172,7 +13407,7 @@
 	struct sk_buff *msg;
 	void *hdr;
 
-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	msg = nlmsg_new(100 + ie_len, GFP_KERNEL);
 	if (!msg)
 		return;
 
@@ -13248,7 +13483,7 @@
 
 	trace_cfg80211_notify_new_peer_candidate(dev, addr);
 
-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+	msg = nlmsg_new(100 + ie_len, gfp);
 	if (!msg)
 		return;
 
@@ -13619,7 +13854,7 @@
 	struct sk_buff *msg;
 	void *hdr;
 
-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+	msg = nlmsg_new(100 + len, gfp);
 	if (!msg)
 		return -ENOMEM;
 
@@ -13663,7 +13898,7 @@
 
 	trace_cfg80211_mgmt_tx_status(wdev, cookie, ack);
 
-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+	msg = nlmsg_new(100 + len, gfp);
 	if (!msg)
 		return;
 
@@ -14472,7 +14707,7 @@
 	if (!ft_event->target_ap)
 		return;
 
-	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL);
 	if (!msg)
 		return;
 
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 7e3821d..2a84d18 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -55,10 +55,9 @@
 				struct net_device *netdev,
 				const u8 *addr, gfp_t gfp);
 void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
-				 struct net_device *netdev, const u8 *bssid,
-				 const u8 *req_ie, size_t req_ie_len,
-				 const u8 *resp_ie, size_t resp_ie_len,
-				 int status, gfp_t gfp);
+				 struct net_device *netdev,
+				 struct cfg80211_connect_resp_params *params,
+				 gfp_t gfp);
 void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
 			 struct net_device *netdev, const u8 *bssid,
 			 const u8 *req_ie, size_t req_ie_len,
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 11cf83c..2f42507 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -490,6 +490,18 @@
 	return ret;
 }
 
+static inline int
+rdev_update_connect_params(struct cfg80211_registered_device *rdev,
+			   struct net_device *dev,
+			   struct cfg80211_connect_params *sme, u32 changed)
+{
+	int ret;
+	trace_rdev_update_connect_params(&rdev->wiphy, dev, sme, changed);
+	ret = rdev->ops->update_connect_params(&rdev->wiphy, dev, sme, changed);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
 static inline int rdev_disconnect(struct cfg80211_registered_device *rdev,
 				  struct net_device *dev, u16 reason_code)
 {
@@ -562,6 +574,18 @@
 	return ret;
 }
 
+static inline int
+rdev_set_multicast_to_unicast(struct cfg80211_registered_device *rdev,
+			      struct net_device *dev,
+			      const bool enabled)
+{
+	int ret;
+	trace_rdev_set_multicast_to_unicast(&rdev->wiphy, dev, enabled);
+	ret = rdev->ops->set_multicast_to_unicast(&rdev->wiphy, dev, enabled);
+	trace_rdev_return_int(&rdev->wiphy, ret);
+	return ret;
+}
+
 static inline void rdev_rfkill_poll(struct cfg80211_registered_device *rdev)
 {
 	trace_rdev_rfkill_poll(&rdev->wiphy);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 8ae2e20..d7e6abc 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -34,10 +34,11 @@
 		CFG80211_CONN_SCAN_AGAIN,
 		CFG80211_CONN_AUTHENTICATE_NEXT,
 		CFG80211_CONN_AUTHENTICATING,
-		CFG80211_CONN_AUTH_FAILED,
+		CFG80211_CONN_AUTH_FAILED_TIMEOUT,
 		CFG80211_CONN_ASSOCIATE_NEXT,
 		CFG80211_CONN_ASSOCIATING,
 		CFG80211_CONN_ASSOC_FAILED,
+		CFG80211_CONN_ASSOC_FAILED_TIMEOUT,
 		CFG80211_CONN_DEAUTH,
 		CFG80211_CONN_ABANDON,
 		CFG80211_CONN_CONNECTED,
@@ -163,7 +164,8 @@
 	return err;
 }
 
-static int cfg80211_conn_do_work(struct wireless_dev *wdev)
+static int cfg80211_conn_do_work(struct wireless_dev *wdev,
+				 enum nl80211_timeout_reason *treason)
 {
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 	struct cfg80211_connect_params *params;
@@ -194,7 +196,8 @@
 					  NULL, 0,
 					  params->key, params->key_len,
 					  params->key_idx, NULL, 0);
-	case CFG80211_CONN_AUTH_FAILED:
+	case CFG80211_CONN_AUTH_FAILED_TIMEOUT:
+		*treason = NL80211_TIMEOUT_AUTH;
 		return -ENOTCONN;
 	case CFG80211_CONN_ASSOCIATE_NEXT:
 		if (WARN_ON(!rdev->ops->assoc))
@@ -221,6 +224,9 @@
 					     WLAN_REASON_DEAUTH_LEAVING,
 					     false);
 		return err;
+	case CFG80211_CONN_ASSOC_FAILED_TIMEOUT:
+		*treason = NL80211_TIMEOUT_ASSOC;
+		/* fall through */
 	case CFG80211_CONN_ASSOC_FAILED:
 		cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
 				     NULL, 0,
@@ -246,6 +252,7 @@
 		container_of(work, struct cfg80211_registered_device, conn_work);
 	struct wireless_dev *wdev;
 	u8 bssid_buf[ETH_ALEN], *bssid = NULL;
+	enum nl80211_timeout_reason treason;
 
 	rtnl_lock();
 
@@ -267,10 +274,15 @@
 			memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN);
 			bssid = bssid_buf;
 		}
-		if (cfg80211_conn_do_work(wdev)) {
-			__cfg80211_connect_result(
-					wdev->netdev, bssid,
-					NULL, 0, NULL, 0, -1, false, NULL);
+		treason = NL80211_TIMEOUT_UNSPECIFIED;
+		if (cfg80211_conn_do_work(wdev, &treason)) {
+			struct cfg80211_connect_resp_params cr;
+
+			memset(&cr, 0, sizeof(cr));
+			cr.status = -1;
+			cr.bssid = bssid;
+			cr.timeout_reason = treason;
+			__cfg80211_connect_result(wdev->netdev, &cr, false);
 		}
 		wdev_unlock(wdev);
 	}
@@ -373,9 +385,13 @@
 		wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
 		schedule_work(&rdev->conn_work);
 	} else if (status_code != WLAN_STATUS_SUCCESS) {
-		__cfg80211_connect_result(wdev->netdev, mgmt->bssid,
-					  NULL, 0, NULL, 0,
-					  status_code, false, NULL);
+		struct cfg80211_connect_resp_params cr;
+
+		memset(&cr, 0, sizeof(cr));
+		cr.status = status_code;
+		cr.bssid = mgmt->bssid;
+		cr.timeout_reason = NL80211_TIMEOUT_UNSPECIFIED;
+		__cfg80211_connect_result(wdev->netdev, &cr, false);
 	} else if (wdev->conn->state == CFG80211_CONN_AUTHENTICATING) {
 		wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT;
 		schedule_work(&rdev->conn_work);
@@ -423,7 +439,7 @@
 	if (!wdev->conn)
 		return;
 
-	wdev->conn->state = CFG80211_CONN_AUTH_FAILED;
+	wdev->conn->state = CFG80211_CONN_AUTH_FAILED_TIMEOUT;
 	schedule_work(&rdev->conn_work);
 }
 
@@ -445,7 +461,7 @@
 	if (!wdev->conn)
 		return;
 
-	wdev->conn->state = CFG80211_CONN_ASSOC_FAILED;
+	wdev->conn->state = CFG80211_CONN_ASSOC_FAILED_TIMEOUT;
 	schedule_work(&rdev->conn_work);
 }
 
@@ -587,7 +603,9 @@
 
 	/* we're good if we have a matching bss struct */
 	if (bss) {
-		err = cfg80211_conn_do_work(wdev);
+		enum nl80211_timeout_reason treason;
+
+		err = cfg80211_conn_do_work(wdev, &treason);
 		cfg80211_put_bss(wdev->wiphy, bss);
 	} else {
 		/* otherwise we'll need to scan for the AP first */
@@ -681,11 +699,9 @@
  */
 
 /* This method must consume bss one way or another */
-void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
-			       const u8 *req_ie, size_t req_ie_len,
-			       const u8 *resp_ie, size_t resp_ie_len,
-			       int status, bool wextev,
-			       struct cfg80211_bss *bss)
+void __cfg80211_connect_result(struct net_device *dev,
+			       struct cfg80211_connect_resp_params *cr,
+			       bool wextev)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	const u8 *country_ie;
@@ -697,48 +713,48 @@
 
 	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
 		    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) {
-		cfg80211_put_bss(wdev->wiphy, bss);
+		cfg80211_put_bss(wdev->wiphy, cr->bss);
 		return;
 	}
 
-	nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev,
-				    bssid, req_ie, req_ie_len,
-				    resp_ie, resp_ie_len,
-				    status, GFP_KERNEL);
+	nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev, cr,
+				    GFP_KERNEL);
 
 #ifdef CONFIG_CFG80211_WEXT
 	if (wextev) {
-		if (req_ie && status == WLAN_STATUS_SUCCESS) {
+		if (cr->req_ie && cr->status == WLAN_STATUS_SUCCESS) {
 			memset(&wrqu, 0, sizeof(wrqu));
-			wrqu.data.length = req_ie_len;
-			wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, req_ie);
+			wrqu.data.length = cr->req_ie_len;
+			wireless_send_event(dev, IWEVASSOCREQIE, &wrqu,
+					    cr->req_ie);
 		}
 
-		if (resp_ie && status == WLAN_STATUS_SUCCESS) {
+		if (cr->resp_ie && cr->status == WLAN_STATUS_SUCCESS) {
 			memset(&wrqu, 0, sizeof(wrqu));
-			wrqu.data.length = resp_ie_len;
-			wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, resp_ie);
+			wrqu.data.length = cr->resp_ie_len;
+			wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu,
+					    cr->resp_ie);
 		}
 
 		memset(&wrqu, 0, sizeof(wrqu));
 		wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-		if (bssid && status == WLAN_STATUS_SUCCESS) {
-			memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
-			memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN);
+		if (cr->bssid && cr->status == WLAN_STATUS_SUCCESS) {
+			memcpy(wrqu.ap_addr.sa_data, cr->bssid, ETH_ALEN);
+			memcpy(wdev->wext.prev_bssid, cr->bssid, ETH_ALEN);
 			wdev->wext.prev_bssid_valid = true;
 		}
 		wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
 	}
 #endif
 
-	if (!bss && (status == WLAN_STATUS_SUCCESS)) {
+	if (!cr->bss && (cr->status == WLAN_STATUS_SUCCESS)) {
 		WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect);
-		bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
-				       wdev->ssid, wdev->ssid_len,
-				       wdev->conn_bss_type,
-				       IEEE80211_PRIVACY_ANY);
-		if (bss)
-			cfg80211_hold_bss(bss_from_pub(bss));
+		cr->bss = cfg80211_get_bss(wdev->wiphy, NULL, cr->bssid,
+					   wdev->ssid, wdev->ssid_len,
+					   wdev->conn_bss_type,
+					   IEEE80211_PRIVACY_ANY);
+		if (cr->bss)
+			cfg80211_hold_bss(bss_from_pub(cr->bss));
 	}
 
 	if (wdev->current_bss) {
@@ -747,28 +763,28 @@
 		wdev->current_bss = NULL;
 	}
 
-	if (status != WLAN_STATUS_SUCCESS) {
+	if (cr->status != WLAN_STATUS_SUCCESS) {
 		kzfree(wdev->connect_keys);
 		wdev->connect_keys = NULL;
 		wdev->ssid_len = 0;
-		if (bss) {
-			cfg80211_unhold_bss(bss_from_pub(bss));
-			cfg80211_put_bss(wdev->wiphy, bss);
+		if (cr->bss) {
+			cfg80211_unhold_bss(bss_from_pub(cr->bss));
+			cfg80211_put_bss(wdev->wiphy, cr->bss);
 		}
 		cfg80211_sme_free(wdev);
 		return;
 	}
 
-	if (WARN_ON(!bss))
+	if (WARN_ON(!cr->bss))
 		return;
 
-	wdev->current_bss = bss_from_pub(bss);
+	wdev->current_bss = bss_from_pub(cr->bss);
 
 	if (!(wdev->wiphy->flags & WIPHY_FLAG_HAS_STATIC_WEP))
 		cfg80211_upload_connect_keys(wdev);
 
 	rcu_read_lock();
-	country_ie = ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY);
+	country_ie = ieee80211_bss_get_ie(cr->bss, WLAN_EID_COUNTRY);
 	if (!country_ie) {
 		rcu_read_unlock();
 		return;
@@ -785,62 +801,95 @@
 	 * - country_ie + 2, the start of the country ie data, and
 	 * - and country_ie[1] which is the IE length
 	 */
-	regulatory_hint_country_ie(wdev->wiphy, bss->channel->band,
+	regulatory_hint_country_ie(wdev->wiphy, cr->bss->channel->band,
 				   country_ie + 2, country_ie[1]);
 	kfree(country_ie);
 }
 
 /* Consumes bss object one way or another */
-void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
-			  struct cfg80211_bss *bss, const u8 *req_ie,
-			  size_t req_ie_len, const u8 *resp_ie,
-			  size_t resp_ie_len, int status, gfp_t gfp)
+void cfg80211_connect_done(struct net_device *dev,
+			   struct cfg80211_connect_resp_params *params,
+			   gfp_t gfp)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 	struct cfg80211_event *ev;
 	unsigned long flags;
+	u8 *next;
 
-	if (bss) {
+	if (params->bss) {
 		/* Make sure the bss entry provided by the driver is valid. */
-		struct cfg80211_internal_bss *ibss = bss_from_pub(bss);
+		struct cfg80211_internal_bss *ibss = bss_from_pub(params->bss);
 
 		if (WARN_ON(list_empty(&ibss->list))) {
-			cfg80211_put_bss(wdev->wiphy, bss);
+			cfg80211_put_bss(wdev->wiphy, params->bss);
 			return;
 		}
 	}
 
-	ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
+	ev = kzalloc(sizeof(*ev) + (params->bssid ? ETH_ALEN : 0) +
+		     params->req_ie_len + params->resp_ie_len +
+		     params->fils_kek_len + params->pmk_len +
+		     (params->pmkid ? WLAN_PMKID_LEN : 0), gfp);
 	if (!ev) {
-		cfg80211_put_bss(wdev->wiphy, bss);
+		cfg80211_put_bss(wdev->wiphy, params->bss);
 		return;
 	}
 
 	ev->type = EVENT_CONNECT_RESULT;
-	if (bssid)
-		memcpy(ev->cr.bssid, bssid, ETH_ALEN);
-	if (req_ie_len) {
-		ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev);
-		ev->cr.req_ie_len = req_ie_len;
-		memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len);
+	next = ((u8 *)ev) + sizeof(*ev);
+	if (params->bssid) {
+		ev->cr.bssid = next;
+		memcpy((void *)ev->cr.bssid, params->bssid, ETH_ALEN);
+		next += ETH_ALEN;
 	}
-	if (resp_ie_len) {
-		ev->cr.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len;
-		ev->cr.resp_ie_len = resp_ie_len;
-		memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len);
+	if (params->req_ie_len) {
+		ev->cr.req_ie = next;
+		ev->cr.req_ie_len = params->req_ie_len;
+		memcpy((void *)ev->cr.req_ie, params->req_ie,
+		       params->req_ie_len);
+		next += params->req_ie_len;
 	}
-	if (bss)
-		cfg80211_hold_bss(bss_from_pub(bss));
-	ev->cr.bss = bss;
-	ev->cr.status = status;
+	if (params->resp_ie_len) {
+		ev->cr.resp_ie = next;
+		ev->cr.resp_ie_len = params->resp_ie_len;
+		memcpy((void *)ev->cr.resp_ie, params->resp_ie,
+		       params->resp_ie_len);
+		next += params->resp_ie_len;
+	}
+	if (params->fils_kek_len) {
+		ev->cr.fils_kek = next;
+		ev->cr.fils_kek_len = params->fils_kek_len;
+		memcpy((void *)ev->cr.fils_kek, params->fils_kek,
+		       params->fils_kek_len);
+		next += params->fils_kek_len;
+	}
+	if (params->pmk_len) {
+		ev->cr.pmk = next;
+		ev->cr.pmk_len = params->pmk_len;
+		memcpy((void *)ev->cr.pmk, params->pmk, params->pmk_len);
+		next += params->pmk_len;
+	}
+	if (params->pmkid) {
+		ev->cr.pmkid = next;
+		memcpy((void *)ev->cr.pmkid, params->pmkid, WLAN_PMKID_LEN);
+		next += WLAN_PMKID_LEN;
+	}
+	ev->cr.update_erp_next_seq_num = params->update_erp_next_seq_num;
+	if (params->update_erp_next_seq_num)
+		ev->cr.fils_erp_next_seq_num = params->fils_erp_next_seq_num;
+	if (params->bss)
+		cfg80211_hold_bss(bss_from_pub(params->bss));
+	ev->cr.bss = params->bss;
+	ev->cr.status = params->status;
+	ev->cr.timeout_reason = params->timeout_reason;
 
 	spin_lock_irqsave(&wdev->event_lock, flags);
 	list_add_tail(&ev->list, &wdev->event_list);
 	spin_unlock_irqrestore(&wdev->event_lock, flags);
 	queue_work(cfg80211_wq, &rdev->event_work);
 }
-EXPORT_SYMBOL(cfg80211_connect_bss);
+EXPORT_SYMBOL(cfg80211_connect_done);
 
 /* Consumes bss object one way or another */
 void __cfg80211_roamed(struct wireless_dev *wdev,
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index a3d0a91b..ea1b47e 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1281,6 +1281,24 @@
 		  __entry->wpa_versions, __entry->flags, MAC_PR_ARG(prev_bssid))
 );
 
+TRACE_EVENT(rdev_update_connect_params,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 struct cfg80211_connect_params *sme, u32 changed),
+	TP_ARGS(wiphy, netdev, sme, changed),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		__field(u32, changed)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		__entry->changed = changed;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", parameters changed: %u",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG,  __entry->changed)
+);
+
 TRACE_EVENT(rdev_set_cqm_rssi_config,
 	TP_PROTO(struct wiphy *wiphy,
 		 struct net_device *netdev, s32 rssi_thold,
@@ -3030,6 +3048,25 @@
 	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
 	TP_ARGS(wiphy, wdev)
 );
+
+TRACE_EVENT(rdev_set_multicast_to_unicast,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 const bool enabled),
+	TP_ARGS(wiphy, netdev, enabled),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		__field(bool, enabled)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		__entry->enabled = enabled;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", unicast: %s",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG,
+		  BOOL_TO_STR(__entry->enabled))
+);
 #endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
 
 #undef TRACE_INCLUDE_PATH
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 877e9d3..8ac413f 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -15,6 +15,7 @@
 #include <linux/mpls.h>
 #include <net/ndisc.h>
 #include <linux/if_arp.h>
+#include <linux/gcd.h>
 #include "core.h"
 #include "rdev-ops.h"
 
@@ -935,7 +936,6 @@
 {
 	struct cfg80211_event *ev;
 	unsigned long flags;
-	const u8 *bssid = NULL;
 
 	spin_lock_irqsave(&wdev->event_lock, flags);
 	while (!list_empty(&wdev->event_list)) {
@@ -947,15 +947,10 @@
 		wdev_lock(wdev);
 		switch (ev->type) {
 		case EVENT_CONNECT_RESULT:
-			if (!is_zero_ether_addr(ev->cr.bssid))
-				bssid = ev->cr.bssid;
 			__cfg80211_connect_result(
-				wdev->netdev, bssid,
-				ev->cr.req_ie, ev->cr.req_ie_len,
-				ev->cr.resp_ie, ev->cr.resp_ie_len,
-				ev->cr.status,
-				ev->cr.status == WLAN_STATUS_SUCCESS,
-				ev->cr.bss);
+				wdev->netdev,
+				&ev->cr,
+				ev->cr.status == WLAN_STATUS_SUCCESS);
 			break;
 		case EVENT_ROAMED:
 			__cfg80211_roamed(wdev, ev->rm.bss, ev->rm.req_ie,
@@ -1559,47 +1554,53 @@
 }
 EXPORT_SYMBOL(ieee80211_chandef_to_operating_class);
 
+static void cfg80211_calculate_bi_data(struct wiphy *wiphy, u32 new_beacon_int,
+				       u32 *beacon_int_gcd,
+				       bool *beacon_int_different)
+{
+	struct wireless_dev *wdev;
+
+	*beacon_int_gcd = 0;
+	*beacon_int_different = false;
+
+	list_for_each_entry(wdev, &wiphy->wdev_list, list) {
+		if (!wdev->beacon_interval)
+			continue;
+
+		if (!*beacon_int_gcd) {
+			*beacon_int_gcd = wdev->beacon_interval;
+			continue;
+		}
+
+		if (wdev->beacon_interval == *beacon_int_gcd)
+			continue;
+
+		*beacon_int_different = true;
+		*beacon_int_gcd = gcd(*beacon_int_gcd, wdev->beacon_interval);
+	}
+
+	if (new_beacon_int && *beacon_int_gcd != new_beacon_int) {
+		if (*beacon_int_gcd)
+			*beacon_int_different = true;
+		*beacon_int_gcd = gcd(*beacon_int_gcd, new_beacon_int);
+	}
+}
+
 int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
 				 enum nl80211_iftype iftype, u32 beacon_int)
 {
-	struct wireless_dev *wdev;
-	struct iface_combination_params params = {
-		.beacon_int_gcd = beacon_int,	/* GCD(n) = n */
-	};
+	/*
+	 * This is just a basic pre-condition check; if interface combinations
+	 * are possible the driver must already be checking those with a call
+	 * to cfg80211_check_combinations(), in which case we'll validate more
+	 * through the cfg80211_calculate_bi_data() call and code in
+	 * cfg80211_iter_combinations().
+	 */
 
 	if (beacon_int < 10 || beacon_int > 10000)
 		return -EINVAL;
 
-	params.iftype_num[iftype] = 1;
-	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
-		if (!wdev->beacon_interval)
-			continue;
-
-		params.iftype_num[wdev->iftype]++;
-	}
-
-	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
-		u32 bi_prev = wdev->beacon_interval;
-
-		if (!wdev->beacon_interval)
-			continue;
-
-		/* slight optimisation - skip identical BIs */
-		if (wdev->beacon_interval == beacon_int)
-			continue;
-
-		params.beacon_int_different = true;
-
-		/* Get the GCD */
-		while (bi_prev != 0) {
-			u32 tmp_bi = bi_prev;
-
-			bi_prev = params.beacon_int_gcd % bi_prev;
-			params.beacon_int_gcd = tmp_bi;
-		}
-	}
-
-	return cfg80211_check_combinations(&rdev->wiphy, &params);
+	return 0;
 }
 
 int cfg80211_iter_combinations(struct wiphy *wiphy,
@@ -1613,6 +1614,21 @@
 	int i, j, iftype;
 	int num_interfaces = 0;
 	u32 used_iftypes = 0;
+	u32 beacon_int_gcd;
+	bool beacon_int_different;
+
+	/*
+	 * This is a bit strange, since the iteration used to rely only on
+	 * the data given by the driver, but here it now relies on context,
+	 * in form of the currently operating interfaces.
+	 * This is OK for all current users, and saves us from having to
+	 * push the GCD calculations into all the drivers.
+	 * In the future, this should probably rely more on data that's in
+	 * cfg80211 already - the only thing not would appear to be any new
+	 * interfaces (while being brought up) and channel/radar data.
+	 */
+	cfg80211_calculate_bi_data(wiphy, params->new_beacon_int,
+				   &beacon_int_gcd, &beacon_int_different);
 
 	if (params->radar_detect) {
 		rcu_read_lock();
@@ -1675,12 +1691,11 @@
 		if ((all_iftypes & used_iftypes) != used_iftypes)
 			goto cont;
 
-		if (params->beacon_int_gcd) {
+		if (beacon_int_gcd) {
 			if (c->beacon_int_min_gcd &&
-			    params->beacon_int_gcd < c->beacon_int_min_gcd)
-				return -EINVAL;
-			if (!c->beacon_int_min_gcd &&
-			    params->beacon_int_different)
+			    beacon_int_gcd < c->beacon_int_min_gcd)
+				goto cont;
+			if (!c->beacon_int_min_gcd && beacon_int_different)
 				goto cont;
 		}
 
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index a0d45ef..71ea359 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3654,7 +3654,7 @@
 				$fixedline =~ s/\s*=\s*$/ = {/;
 				fix_insert_line($fixlinenr, $fixedline);
 				$fixedline = $line;
-				$fixedline =~ s/^(.\s*){\s*/$1/;
+				$fixedline =~ s/^(.\s*)\{\s*/$1/;
 				fix_insert_line($fixlinenr, $fixedline);
 			}
 		}
@@ -3995,7 +3995,7 @@
 				my $fixedline = rtrim($prevrawline) . " {";
 				fix_insert_line($fixlinenr, $fixedline);
 				$fixedline = $rawline;
-				$fixedline =~ s/^(.\s*){\s*/$1\t/;
+				$fixedline =~ s/^(.\s*)\{\s*/$1\t/;
 				if ($fixedline !~ /^\+\s*$/) {
 					fix_insert_line($fixlinenr, $fixedline);
 				}
@@ -4484,7 +4484,7 @@
 			if (ERROR("SPACING",
 				  "space required before the open brace '{'\n" . $herecurr) &&
 			    $fix) {
-				$fixed[$fixlinenr] =~ s/^(\+.*(?:do|\))){/$1 {/;
+				$fixed[$fixlinenr] =~ s/^(\+.*(?:do|\)))\{/$1 {/;
 			}
 		}
 
diff --git a/scripts/dtc/libfdt/fdt.c b/scripts/dtc/libfdt/fdt.c
index 22286a1..14fb793 100644
--- a/scripts/dtc/libfdt/fdt.c
+++ b/scripts/dtc/libfdt/fdt.c
@@ -71,6 +71,20 @@
 		return -FDT_ERR_BADMAGIC;
 	}
 
+	if (fdt_off_dt_struct(fdt) > (UINT_MAX - fdt_size_dt_struct(fdt)))
+		return FDT_ERR_BADOFFSET;
+
+	if (fdt_off_dt_strings(fdt) > (UINT_MAX -  fdt_size_dt_strings(fdt)))
+		return FDT_ERR_BADOFFSET;
+
+	if ((fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt))
+	    > fdt_totalsize(fdt))
+		return FDT_ERR_BADOFFSET;
+
+	if ((fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt))
+	    > fdt_totalsize(fdt))
+		return FDT_ERR_BADOFFSET;
+
 	return 0;
 }
 
diff --git a/scripts/dtc/libfdt/fdt_rw.c b/scripts/dtc/libfdt/fdt_rw.c
index 8be02b1..468e169 100644
--- a/scripts/dtc/libfdt/fdt_rw.c
+++ b/scripts/dtc/libfdt/fdt_rw.c
@@ -396,7 +396,7 @@
 static void _fdt_packblocks(const char *old, char *new,
 			    int mem_rsv_size, int struct_size)
 {
-	int mem_rsv_off, struct_off, strings_off;
+	uint32_t mem_rsv_off, struct_off, strings_off;
 
 	mem_rsv_off = FDT_ALIGN(sizeof(struct fdt_header), 8);
 	struct_off = mem_rsv_off + mem_rsv_size;
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
old mode 100755
new mode 100644
index f742c65..1b9e67b
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -145,6 +145,25 @@
 	${CC} ${aflags} -c -o ${2} ${afile}
 }
 
+# Generates ${2} .o file with RTIC MP's from the ${1} object file (vmlinux)
+# ${3} the file name where the sizes of the RTIC MP structure are stored
+# just in case, save copy of the RTIC mp to ${4}
+# Note: RTIC_MPGEN has to be set if MPGen is available
+rtic_mp()
+{
+	# assume that RTIC_MP_O generation may fail
+	RTIC_MP_O=
+
+	${RTIC_MPGEN} --objcopy="${OBJCOPY}" --objdump="${OBJDUMP}" \
+	--binpath='' --vmlinux=${1} --config=${KCONFIG_CONFIG} && \
+	cat rtic_mp.c | ${CC} -c -o ${2} -x c - && \
+	cp rtic_mp.c ${4} && \
+	${NM} --print-size --size-sort ${2} > ${3} && \
+	RTIC_MP_O=${2}
+	# NM - save generated variable sizes for verification
+	# RTIC_MP_O is our retval - great success if set to generated .o file
+}
+
 # Create map file with all symbols from ${1}
 # See mksymap for additional details
 mksysmap()
@@ -169,6 +188,8 @@
 	rm -f System.map
 	rm -f vmlinux
 	rm -f vmlinux.o
+	rm -f .tmp_rtic_mp_sz*
+	rm -f rtic_mp.*
 }
 
 on_exit()
@@ -231,6 +252,15 @@
 # final build of init/
 ${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}"
 
+# Generate RTIC MP placeholder compile unit of the correct size
+# and add it to the list of link objects
+# this needs to be done before generating kallsyms
+if [ ! -z ${RTIC_MPGEN+x} ]; then
+	rtic_mp vmlinux.o rtic_mp.o .tmp_rtic_mp_sz1 .tmp_rtic_mp1.c
+	KBUILD_VMLINUX_MAIN+=" "
+	KBUILD_VMLINUX_MAIN+=$RTIC_MP_O
+fi
+
 kallsymso=""
 kallsyms_vmlinux=""
 if [ -n "${CONFIG_KALLSYMS}" ]; then
@@ -276,6 +306,18 @@
 	fi
 fi
 
+# Update RTIC MP object by replacing the place holder
+# with actual MP data of the same size
+# Also double check that object size did not change
+if [ ! -z ${RTIC_MPGEN+x} ]; then
+	rtic_mp "${kallsyms_vmlinux}" rtic_mp.o .tmp_rtic_mp_sz2 \
+                .tmp_rtic_mp2.c
+	if ! cmp -s .tmp_rtic_mp_sz1 .tmp_rtic_mp_sz2; then
+		echo >&2 'ERROR: RTIC MP object files size mismatch'
+		exit 1
+	fi
+fi
+
 info LD vmlinux
 vmlinux_link "${kallsymso}" vmlinux
 
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index a472bf2..01c67be 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -1159,7 +1159,7 @@
 
 config SND_SOC_MSM_HDMI_CODEC_RX
 	bool "HDMI Audio Playback"
-	depends on FB_MSM_MDSS_HDMI_PANEL && (SND_SOC_APQ8084 || SND_SOC_MSM8994 || SND_SOC_MSM8996 || SND_SOC_MSM8998 || SND_SOC_SDM660_COMMON)
+	depends on FB_MSM_MDSS_HDMI_PANEL && (SND_SOC_APQ8084 || SND_SOC_MSM8994 || SND_SOC_MSM8996 || SND_SOC_MSM8998 || SND_SOC_660)
 	help
 	HDMI audio drivers should be built only if the platform
         supports hdmi panel.
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index d8b5ae6..daf05d8 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -162,14 +162,7 @@
 snd-soc-wcd934x-objs := wcd934x.o
 snd-soc-wcd9xxx-objs := wcd9xxx-common-v2.o wcd9xxx-resmgr-v2.o \
 			wcdcal-hwdep.o wcd-dsp-mgr.o wcd-dsp-utils.o \
-			wcd9xxx-soc-init.o
-ifeq ($(CONFIG_COMMON_CLK_MSM), y)
-	snd-soc-wcd9xxx-objs += audio-ext-clk.o
-endif
-
-ifeq ($(CONFIG_COMMON_CLK_QCOM), y)
-	snd-soc-wcd9xxx-objs += audio-ext-clk-up.o
-endif
+			wcd9xxx-soc-init.o audio-ext-clk-up.o
 snd-soc-wcd-cpe-objs := wcd_cpe_services.o wcd_cpe_core.o
 snd-soc-wsa881x-objs := wsa881x.o wsa881x-tables.o wsa881x-regmap.o wsa881x-temp-sensor.o
 snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o
diff --git a/sound/soc/codecs/audio-ext-clk.c b/sound/soc/codecs/audio-ext-clk.c
deleted file mode 100644
index 72f16f5..0000000
--- a/sound/soc/codecs/audio-ext-clk.c
+++ /dev/null
@@ -1,348 +0,0 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/clk.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <dt-bindings/clock/audio-ext-clk.h>
-#include <sound/q6afe-v2.h>
-#include "audio-ext-clk-up.h"
-
-struct pinctrl_info {
-	struct pinctrl *pinctrl;
-	struct pinctrl_state *sleep;
-	struct pinctrl_state *active;
-};
-
-struct audio_ext_ap_clk {
-	bool enabled;
-	int gpio;
-	struct clk c;
-};
-
-struct audio_ext_pmi_clk {
-	int gpio;
-	struct clk c;
-};
-
-struct audio_ext_ap_clk2 {
-	bool enabled;
-	struct pinctrl_info pnctrl_info;
-	struct clk c;
-};
-
-static struct afe_clk_set clk2_config = {
-	Q6AFE_LPASS_CLK_CONFIG_API_VERSION,
-	Q6AFE_LPASS_CLK_ID_SPEAKER_I2S_OSR,
-	Q6AFE_LPASS_IBIT_CLK_11_P2896_MHZ,
-	Q6AFE_LPASS_CLK_ATTRIBUTE_COUPLE_NO,
-	Q6AFE_LPASS_CLK_ROOT_DEFAULT,
-	0,
-};
-
-static inline struct audio_ext_ap_clk *to_audio_ap_clk(struct clk *clk)
-{
-	return container_of(clk, struct audio_ext_ap_clk, c);
-}
-
-static int audio_ext_clk_prepare(struct clk *clk)
-{
-	struct audio_ext_ap_clk *audio_clk = to_audio_ap_clk(clk);
-
-	pr_debug("%s: gpio: %d\n", __func__, audio_clk->gpio);
-	if (gpio_is_valid(audio_clk->gpio))
-		return gpio_direction_output(audio_clk->gpio, 1);
-	return 0;
-}
-
-static void audio_ext_clk_unprepare(struct clk *clk)
-{
-	struct audio_ext_ap_clk *audio_clk = to_audio_ap_clk(clk);
-
-	pr_debug("%s: gpio: %d\n", __func__, audio_clk->gpio);
-	if (gpio_is_valid(audio_clk->gpio))
-		gpio_direction_output(audio_clk->gpio, 0);
-}
-
-static inline struct audio_ext_ap_clk2 *to_audio_ap_clk2(struct clk *clk)
-{
-	return container_of(clk, struct audio_ext_ap_clk2, c);
-}
-
-static int audio_ext_clk2_prepare(struct clk *clk)
-{
-	struct audio_ext_ap_clk2 *audio_clk2 = to_audio_ap_clk2(clk);
-	struct pinctrl_info *pnctrl_info = &audio_clk2->pnctrl_info;
-	int ret;
-
-
-	if (!pnctrl_info->pinctrl || !pnctrl_info->active)
-		return 0;
-
-	ret = pinctrl_select_state(pnctrl_info->pinctrl,
-				   pnctrl_info->active);
-	if (ret) {
-		pr_err("%s: active state select failed with %d\n",
-			__func__, ret);
-		return -EIO;
-	}
-
-	clk2_config.enable = 1;
-	ret = afe_set_lpass_clk_cfg(IDX_RSVD_3, &clk2_config);
-	if (ret < 0) {
-		pr_err("%s: failed to set clock, ret = %d\n", __func__, ret);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static void audio_ext_clk2_unprepare(struct clk *clk)
-{
-	struct audio_ext_ap_clk2 *audio_clk2 = to_audio_ap_clk2(clk);
-	struct pinctrl_info *pnctrl_info = &audio_clk2->pnctrl_info;
-	int ret;
-
-	if (!pnctrl_info->pinctrl || !pnctrl_info->sleep)
-		return;
-
-	ret = pinctrl_select_state(pnctrl_info->pinctrl,
-				   pnctrl_info->sleep);
-	if (ret)
-		pr_err("%s: sleep state select failed with %d\n",
-			__func__, ret);
-
-	clk2_config.enable = 0;
-	ret = afe_set_lpass_clk_cfg(IDX_RSVD_3, &clk2_config);
-	if (ret < 0)
-		pr_err("%s: failed to reset clock, ret = %d\n", __func__, ret);
-}
-
-static const struct clk_ops audio_ext_ap_clk_ops = {
-	.prepare = audio_ext_clk_prepare,
-	.unprepare = audio_ext_clk_unprepare,
-};
-
-static const struct clk_ops audio_ext_ap_clk2_ops = {
-	.prepare = audio_ext_clk2_prepare,
-	.unprepare = audio_ext_clk2_unprepare,
-};
-
-static struct audio_ext_pmi_clk audio_pmi_clk = {
-	.gpio = -EINVAL,
-	.c = {
-		.dbg_name = "audio_ext_pmi_clk",
-		.ops = &clk_ops_dummy,
-		CLK_INIT(audio_pmi_clk.c),
-	},
-};
-
-static struct audio_ext_pmi_clk audio_pmi_lnbb_clk = {
-	.gpio = -EINVAL,
-	.c = {
-		.dbg_name = "audio_ext_pmi_lnbb_clk",
-		.ops = &clk_ops_dummy,
-		CLK_INIT(audio_pmi_lnbb_clk.c),
-	},
-};
-
-static struct audio_ext_ap_clk audio_ap_clk = {
-	.gpio = -EINVAL,
-	.c = {
-		.dbg_name = "audio_ext_ap_clk",
-		.ops = &audio_ext_ap_clk_ops,
-		CLK_INIT(audio_ap_clk.c),
-	},
-};
-
-static struct audio_ext_ap_clk2 audio_ap_clk2 = {
-	.c = {
-		.dbg_name = "audio_ext_ap_clk2",
-		.ops = &audio_ext_ap_clk2_ops,
-		CLK_INIT(audio_ap_clk2.c),
-	},
-};
-
-static struct clk_lookup audio_ref_clock[] = {
-	CLK_LIST(audio_ap_clk),
-	CLK_LIST(audio_pmi_clk),
-	CLK_LIST(audio_pmi_lnbb_clk),
-	CLK_LIST(audio_ap_clk2),
-};
-
-static int audio_get_pinctrl(struct platform_device *pdev)
-{
-	struct pinctrl_info *pnctrl_info;
-	struct pinctrl *pinctrl;
-	int ret;
-
-	pnctrl_info = &audio_ap_clk2.pnctrl_info;
-
-	if (pnctrl_info->pinctrl) {
-		dev_dbg(&pdev->dev, "%s: already requested before\n",
-			__func__);
-		return -EINVAL;
-	}
-
-	pinctrl = devm_pinctrl_get(&pdev->dev);
-	if (IS_ERR_OR_NULL(pinctrl)) {
-		dev_dbg(&pdev->dev, "%s: Unable to get pinctrl handle\n",
-			__func__);
-		return -EINVAL;
-	}
-	pnctrl_info->pinctrl = pinctrl;
-	/* get all state handles from Device Tree */
-	pnctrl_info->sleep = pinctrl_lookup_state(pinctrl, "sleep");
-	if (IS_ERR(pnctrl_info->sleep)) {
-		dev_err(&pdev->dev, "%s: could not get sleep pinstate\n",
-			__func__);
-		goto err;
-	}
-	pnctrl_info->active = pinctrl_lookup_state(pinctrl, "active");
-	if (IS_ERR(pnctrl_info->active)) {
-		dev_err(&pdev->dev, "%s: could not get active pinstate\n",
-			__func__);
-		goto err;
-	}
-	/* Reset the TLMM pins to a default state */
-	ret = pinctrl_select_state(pnctrl_info->pinctrl,
-				   pnctrl_info->sleep);
-	if (ret) {
-		dev_err(&pdev->dev, "%s: Disable TLMM pins failed with %d\n",
-			__func__, ret);
-		goto err;
-	}
-	return 0;
-
-err:
-	devm_pinctrl_put(pnctrl_info->pinctrl);
-	return -EINVAL;
-}
-
-static int audio_ref_clk_probe(struct platform_device *pdev)
-{
-	int clk_gpio;
-	int ret;
-	struct clk *audio_clk;
-
-	clk_gpio = of_get_named_gpio(pdev->dev.of_node,
-				     "qcom,audio-ref-clk-gpio", 0);
-	if (clk_gpio > 0) {
-		ret = gpio_request(clk_gpio, "EXT_CLK");
-		if (ret) {
-			dev_err(&pdev->dev,
-				"Request ext clk gpio failed %d, err:%d\n",
-				clk_gpio, ret);
-			goto err;
-		}
-		if (of_property_read_bool(pdev->dev.of_node,
-					"qcom,node_has_rpm_clock")) {
-			audio_clk = clk_get(&pdev->dev, NULL);
-			if (IS_ERR(audio_clk)) {
-				dev_err(&pdev->dev, "Failed to get RPM div clk\n");
-				ret = PTR_ERR(audio_clk);
-				goto err_gpio;
-			}
-			audio_pmi_clk.c.parent = audio_clk;
-			audio_pmi_clk.gpio = clk_gpio;
-		} else
-			audio_ap_clk.gpio = clk_gpio;
-
-	} else {
-		if (of_property_read_bool(pdev->dev.of_node,
-					"qcom,node_has_rpm_clock")) {
-			audio_clk = clk_get(&pdev->dev, NULL);
-			if (IS_ERR(audio_clk)) {
-				dev_err(&pdev->dev, "Failed to get lnbbclk2\n");
-				ret = PTR_ERR(audio_clk);
-				goto err;
-			}
-			audio_pmi_lnbb_clk.c.parent = audio_clk;
-			audio_pmi_lnbb_clk.gpio = -EINVAL;
-		}
-	}
-
-	ret = audio_get_pinctrl(pdev);
-	if (ret)
-		dev_dbg(&pdev->dev, "%s: Parsing pinctrl failed\n",
-			__func__);
-
-	ret = of_msm_clock_register(pdev->dev.of_node, audio_ref_clock,
-			      ARRAY_SIZE(audio_ref_clock));
-	if (ret) {
-		dev_err(&pdev->dev, "%s: audio ref clock register failed\n",
-			__func__);
-		goto err_gpio;
-	}
-
-	return 0;
-
-err_gpio:
-	gpio_free(clk_gpio);
-
-err:
-	return ret;
-}
-
-static int audio_ref_clk_remove(struct platform_device *pdev)
-{
-	struct pinctrl_info *pnctrl_info = &audio_ap_clk2.pnctrl_info;
-
-	if (audio_pmi_clk.gpio > 0)
-		gpio_free(audio_pmi_clk.gpio);
-	else if (audio_ap_clk.gpio > 0)
-		gpio_free(audio_ap_clk.gpio);
-
-	if (pnctrl_info->pinctrl) {
-		devm_pinctrl_put(pnctrl_info->pinctrl);
-		pnctrl_info->pinctrl = NULL;
-	}
-
-	return 0;
-}
-
-static const struct of_device_id audio_ref_clk_match[] = {
-	{.compatible = "qcom,audio-ref-clk"},
-	{}
-};
-MODULE_DEVICE_TABLE(of, audio_ref_clk_match);
-
-static struct platform_driver audio_ref_clk_driver = {
-	.driver = {
-		.name = "audio-ref-clk",
-		.owner = THIS_MODULE,
-		.of_match_table = audio_ref_clk_match,
-	},
-	.probe = audio_ref_clk_probe,
-	.remove = audio_ref_clk_remove,
-};
-
-int audio_ref_clk_platform_init(void)
-{
-	return platform_driver_register(&audio_ref_clk_driver);
-}
-
-void audio_ref_clk_platform_exit(void)
-{
-	platform_driver_unregister(&audio_ref_clk_driver);
-}
-
-MODULE_DESCRIPTION("Audio Ref Clock module platform driver");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/msm/Makefile b/sound/soc/msm/Makefile
index caf8843..312bb45 100644
--- a/sound/soc/msm/Makefile
+++ b/sound/soc/msm/Makefile
@@ -22,7 +22,7 @@
 
 # for SDM660 sound card driver
 snd-soc-sdm660-common-objs := sdm660-common.o
-obj-$(CONFIG_SND_SOC_SDM660_COMMON) += snd-soc-sdm660-common.o
+obj-$(CONFIG_SND_SOC_660) += snd-soc-sdm660-common.o
 
 # for SDM660 sound card driver
 snd-soc-int-codec-objs := sdm660-internal.o
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 1590605..46bc540 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -2388,7 +2388,8 @@
 	struct adm_cmd_device_open_v5	open;
 	struct adm_cmd_device_open_v6	open_v6;
 	int ret = 0;
-	int port_idx, copp_idx, flags;
+	int port_idx, flags;
+	int copp_idx = -1;
 	int tmp_port = q6audio_get_port_id(port_id);
 
 	pr_debug("%s:port %#x path:%d rate:%d mode:%d perf_mode:%d,topo_id %d\n",
@@ -2442,8 +2443,17 @@
 	    (topology == VPM_TX_DM_RFECNS_COPP_TOPOLOGY))
 		rate = 16000;
 
-	copp_idx = adm_get_idx_if_copp_exists(port_idx, topology, perf_mode,
-						rate, bit_width, app_type);
+	/*
+	 * Routing driver reuses the same adm for streams with the same
+	 * app_type, sample_rate etc.
+	 * This isn't allowed for ULL streams as per the DSP interface
+	 */
+	if (perf_mode != ULTRA_LOW_LATENCY_PCM_MODE)
+		copp_idx = adm_get_idx_if_copp_exists(port_idx, topology,
+						      perf_mode,
+						      rate, bit_width,
+						      app_type);
+
 	if (copp_idx < 0) {
 		copp_idx = adm_get_next_available_copp(port_idx);
 		if (copp_idx >= MAX_COPPS_PER_PORT) {
diff --git a/sound/soc/msm/sdm660-common.c b/sound/soc/msm/sdm660-common.c
index b34b04b..43df772 100644
--- a/sound/soc/msm/sdm660-common.c
+++ b/sound/soc/msm/sdm660-common.c
@@ -44,6 +44,8 @@
 	EXT_DISP_RX_IDX_MAX,
 };
 
+bool codec_reg_done;
+
 /* TDM default config */
 static struct dev_config tdm_rx_cfg[TDM_INTERFACE_MAX][TDM_PORT_MAX] = {
 	{ /* PRI TDM */
@@ -2016,6 +2018,12 @@
 }
 EXPORT_SYMBOL(msm_common_snd_controls_size);
 
+void msm_set_codec_reg_done(bool done)
+{
+	codec_reg_done = done;
+}
+EXPORT_SYMBOL(msm_set_codec_reg_done);
+
 static inline int param_is_mask(int p)
 {
 	return (p >= SNDRV_PCM_HW_PARAM_FIRST_MASK) &&
@@ -3027,6 +3035,12 @@
 	  .data = "tasha_codec"},
 	{ .compatible = "qcom,sdm660-asoc-snd-tavil",
 	  .data = "tavil_codec"},
+	{ .compatible = "qcom,sdm670-asoc-snd",
+	  .data = "internal_codec"},
+	{ .compatible = "qcom,sdm670-asoc-snd-tasha",
+	  .data = "tasha_codec"},
+	{ .compatible = "qcom,sdm670-asoc-snd-tavil",
+	  .data = "tavil_codec"},
 	{},
 };
 
@@ -3044,6 +3058,7 @@
 	if (!pdata)
 		return -ENOMEM;
 
+	msm_set_codec_reg_done(false);
 	match = of_match_node(sdm660_asoc_machine_of_match,
 			      pdev->dev.of_node);
 	if (!match)
diff --git a/sound/soc/msm/sdm660-common.h b/sound/soc/msm/sdm660-common.h
index bca8cd7..ffe77bc 100644
--- a/sound/soc/msm/sdm660-common.h
+++ b/sound/soc/msm/sdm660-common.h
@@ -122,4 +122,5 @@
 int msm_mi2s_snd_startup(struct snd_pcm_substream *substream);
 void msm_mi2s_snd_shutdown(struct snd_pcm_substream *substream);
 int msm_common_snd_controls_size(void);
+void msm_set_codec_reg_done(bool done);
 #endif
diff --git a/sound/soc/msm/sdm660-ext-dai-links.c b/sound/soc/msm/sdm660-ext-dai-links.c
index 6ff29c9..34a6626 100644
--- a/sound/soc/msm/sdm660-ext-dai-links.c
+++ b/sound/soc/msm/sdm660-ext-dai-links.c
@@ -29,8 +29,15 @@
 #define WCN_CDC_SLIM_RX_CH_MAX 2
 #define WCN_CDC_SLIM_TX_CH_MAX 3
 
-static struct snd_soc_card snd_soc_card_msm_card_tavil;
-static struct snd_soc_card snd_soc_card_msm_card_tasha;
+static struct snd_soc_card snd_soc_card_msm_card_tavil = {
+	.name = "sdm670-tavil-snd-card",
+	.late_probe = msm_snd_card_tavil_late_probe,
+};
+
+static struct snd_soc_card snd_soc_card_msm_card_tasha = {
+	.name = "sdm670-tasha-snd-card",
+	.late_probe = msm_snd_card_tasha_late_probe,
+};
 
 static struct snd_soc_ops msm_ext_slimbus_be_ops = {
 	.hw_params = msm_snd_hw_params,
diff --git a/sound/soc/msm/sdm660-external.c b/sound/soc/msm/sdm660-external.c
index 426c150..4224289 100644
--- a/sound/soc/msm/sdm660-external.c
+++ b/sound/soc/msm/sdm660-external.c
@@ -55,7 +55,6 @@
 
 static int msm_ext_spk_control = 1;
 static struct wcd_mbhc_config *wcd_mbhc_cfg_ptr;
-bool codec_reg_done;
 
 struct msm_asoc_wcd93xx_codec {
 	void* (*get_afe_config_fn)(struct snd_soc_codec *codec,
@@ -603,23 +602,23 @@
 
 static void *def_ext_mbhc_cal(void)
 {
-	void *tavil_wcd_cal;
+	void *wcd_mbhc_cal;
 	struct wcd_mbhc_btn_detect_cfg *btn_cfg;
 	u16 *btn_high;
 
-	tavil_wcd_cal = kzalloc(WCD_MBHC_CAL_SIZE(WCD_MBHC_DEF_BUTTONS,
+	wcd_mbhc_cal = kzalloc(WCD_MBHC_CAL_SIZE(WCD_MBHC_DEF_BUTTONS,
 				WCD9XXX_MBHC_DEF_RLOADS), GFP_KERNEL);
-	if (!tavil_wcd_cal)
+	if (!wcd_mbhc_cal)
 		return NULL;
 
-#define S(X, Y) ((WCD_MBHC_CAL_PLUG_TYPE_PTR(tavil_wcd_cal)->X) = (Y))
+#define S(X, Y) ((WCD_MBHC_CAL_PLUG_TYPE_PTR(wcd_mbhc_cal)->X) = (Y))
 	S(v_hs_max, 1600);
 #undef S
-#define S(X, Y) ((WCD_MBHC_CAL_BTN_DET_PTR(tavil_wcd_cal)->X) = (Y))
+#define S(X, Y) ((WCD_MBHC_CAL_BTN_DET_PTR(wcd_mbhc_cal)->X) = (Y))
 	S(num_btn, WCD_MBHC_DEF_BUTTONS);
 #undef S
 
-	btn_cfg = WCD_MBHC_CAL_BTN_DET_PTR(tavil_wcd_cal);
+	btn_cfg = WCD_MBHC_CAL_BTN_DET_PTR(wcd_mbhc_cal);
 	btn_high = ((void *)&btn_cfg->_v_btn_low) +
 		(sizeof(btn_cfg->_v_btn_low[0]) * btn_cfg->num_btn);
 
@@ -632,7 +631,7 @@
 	btn_high[6] = 500;
 	btn_high[7] = 500;
 
-	return tavil_wcd_cal;
+	return wcd_mbhc_cal;
 }
 
 static inline int param_is_mask(int p)
@@ -1478,6 +1477,79 @@
 	{"MIC BIAS4", NULL, "MCLK"},
 };
 
+int msm_snd_card_tasha_late_probe(struct snd_soc_card *card)
+{
+	const char *be_dl_name = LPASS_BE_SLIMBUS_0_RX;
+	struct snd_soc_pcm_runtime *rtd;
+	int ret = 0;
+	void *mbhc_calibration;
+
+	rtd = snd_soc_get_pcm_runtime(card, be_dl_name);
+	if (!rtd) {
+		dev_err(card->dev,
+			"%s: snd_soc_get_pcm_runtime for %s failed!\n",
+			__func__, be_dl_name);
+		ret = -EINVAL;
+		goto err_pcm_runtime;
+	}
+
+	mbhc_calibration = def_ext_mbhc_cal();
+	if (!mbhc_calibration) {
+		ret = -ENOMEM;
+		goto err_mbhc_cal;
+	}
+	wcd_mbhc_cfg_ptr->calibration = mbhc_calibration;
+	ret = tasha_mbhc_hs_detect(rtd->codec, wcd_mbhc_cfg_ptr);
+	if (ret) {
+		dev_err(card->dev, "%s: mbhc hs detect failed, err:%d\n",
+			__func__, ret);
+		goto err_hs_detect;
+	}
+	return 0;
+
+err_hs_detect:
+	kfree(mbhc_calibration);
+err_mbhc_cal:
+err_pcm_runtime:
+	return ret;
+}
+
+int msm_snd_card_tavil_late_probe(struct snd_soc_card *card)
+{
+	const char *be_dl_name = LPASS_BE_SLIMBUS_0_RX;
+	struct snd_soc_pcm_runtime *rtd;
+	int ret = 0;
+	void *mbhc_calibration;
+
+	rtd = snd_soc_get_pcm_runtime(card, be_dl_name);
+	if (!rtd) {
+		dev_err(card->dev,
+			"%s: snd_soc_get_pcm_runtime for %s failed!\n",
+			__func__, be_dl_name);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	mbhc_calibration = def_ext_mbhc_cal();
+	if (!mbhc_calibration) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	wcd_mbhc_cfg_ptr->calibration = mbhc_calibration;
+	ret = tavil_mbhc_hs_detect(rtd->codec, wcd_mbhc_cfg_ptr);
+	if (ret) {
+		dev_err(card->dev, "%s: mbhc hs detect failed, err:%d\n",
+			__func__, ret);
+		goto err_free_mbhc_cal;
+	}
+	return 0;
+
+err_free_mbhc_cal:
+	kfree(mbhc_calibration);
+err:
+	return ret;
+}
+
 /**
  * msm_audrx_init - Audio init function of sound card instantiate.
  *
@@ -1698,7 +1770,6 @@
 		if (!entry) {
 			pr_debug("%s: Cannot create codecs module entry\n",
 				 __func__);
-			pdata->codec_root = NULL;
 			goto done;
 		}
 		pdata->codec_root = entry;
@@ -1721,50 +1792,17 @@
 		if (!entry) {
 			pr_debug("%s: Cannot create codecs module entry\n",
 				 __func__);
-			ret = 0;
-			goto err_snd_module;
+			goto done;
 		}
 		pdata->codec_root = entry;
 		tasha_codec_info_create_codec_entry(pdata->codec_root, codec);
 		tasha_mbhc_zdet_gpio_ctrl(msm_config_hph_en0_gpio, rtd->codec);
 	}
-
-	wcd_mbhc_cfg_ptr->calibration = def_ext_mbhc_cal();
-	if (!strcmp(dev_name(codec_dai->dev), "tavil_codec")) {
-		if (wcd_mbhc_cfg_ptr->calibration) {
-			pdata->codec = codec;
-			ret = tavil_mbhc_hs_detect(codec, wcd_mbhc_cfg_ptr);
-			if (ret < 0)
-				pr_err("%s: Failed to intialise mbhc %d\n",
-						__func__, ret);
-		} else {
-			pr_err("%s: wcd_mbhc_cfg calibration is NULL\n",
-					__func__);
-			ret = -ENOMEM;
-			goto err_mbhc_cal;
-		}
-	} else {
-		if (wcd_mbhc_cfg_ptr->calibration) {
-			pdata->codec = codec;
-			ret = tasha_mbhc_hs_detect(codec, wcd_mbhc_cfg_ptr);
-			if (ret < 0)
-				pr_err("%s: Failed to intialise mbhc %d\n",
-						__func__, ret);
-		} else {
-			pr_err("%s: wcd_mbhc_cfg calibration is NULL\n",
-					__func__);
-			ret = -ENOMEM;
-			goto err_mbhc_cal;
-		}
-
-	}
-	codec_reg_done = true;
 done:
+	msm_set_codec_reg_done(true);
 	return 0;
 
-err_snd_module:
 err_afe_cfg:
-err_mbhc_cal:
 	return ret;
 }
 EXPORT_SYMBOL(msm_audrx_init);
diff --git a/sound/soc/msm/sdm660-external.h b/sound/soc/msm/sdm660-external.h
index acf5735..d53e7c7 100644
--- a/sound/soc/msm/sdm660-external.h
+++ b/sound/soc/msm/sdm660-external.h
@@ -30,6 +30,8 @@
 						int snd_card_val);
 int msm_ext_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
 			       struct snd_pcm_hw_params *params);
+int msm_snd_card_tavil_late_probe(struct snd_soc_card *card);
+int msm_snd_card_tasha_late_probe(struct snd_soc_card *card);
 #ifdef CONFIG_SND_SOC_EXT_CODEC
 int msm_ext_cdc_init(struct platform_device *, struct msm_asoc_mach_data *,
 		     struct snd_soc_card **, struct wcd_mbhc_config *);
diff --git a/sound/soc/msm/sdm660-internal.c b/sound/soc/msm/sdm660-internal.c
index 4b9334b..1402154 100644
--- a/sound/soc/msm/sdm660-internal.c
+++ b/sound/soc/msm/sdm660-internal.c
@@ -1314,6 +1314,7 @@
 	msm_dig_codec_info_create_codec_entry(codec_root, dig_cdc);
 	msm_anlg_codec_info_create_codec_entry(codec_root, ana_cdc);
 done:
+	msm_set_codec_reg_done(true);
 	return 0;
 }
 
diff --git a/sound/soc/msm/sdm845.c b/sound/soc/msm/sdm845.c
index 03a4938..838771c 100644
--- a/sound/soc/msm/sdm845.c
+++ b/sound/soc/msm/sdm845.c
@@ -134,6 +134,13 @@
 	u32 msm_is_mi2s_master;
 };
 
+static u32 mi2s_ebit_clk[MI2S_MAX] = {
+	Q6AFE_LPASS_CLK_ID_PRI_MI2S_EBIT,
+	Q6AFE_LPASS_CLK_ID_SEC_MI2S_EBIT,
+	Q6AFE_LPASS_CLK_ID_TER_MI2S_EBIT,
+	Q6AFE_LPASS_CLK_ID_QUAD_MI2S_EBIT
+};
+
 struct auxpcm_conf {
 	struct mutex lock;
 	u32 ref_cnt;
@@ -434,6 +441,7 @@
 					   "Five", "Six", "Seven",
 					   "Eight"};
 static const char *const hifi_text[] = {"Off", "On"};
+static const char *const qos_text[] = {"Disable", "Enable"};
 
 static SOC_ENUM_SINGLE_EXT_DECL(slim_0_rx_chs, slim_rx_ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(slim_2_rx_chs, slim_rx_ch_text);
@@ -498,9 +506,11 @@
 static SOC_ENUM_SINGLE_EXT_DECL(aux_pcm_rx_format, bit_format_text);
 static SOC_ENUM_SINGLE_EXT_DECL(aux_pcm_tx_format, bit_format_text);
 static SOC_ENUM_SINGLE_EXT_DECL(hifi_function, hifi_text);
+static SOC_ENUM_SINGLE_EXT_DECL(qos_vote, qos_text);
 
 static struct platform_device *spdev;
 static int msm_hifi_control;
+static int qos_vote_status;
 
 static bool is_initial_boot;
 static bool codec_reg_done;
@@ -2628,6 +2638,72 @@
 	return 0;
 }
 
+static s32 msm_qos_value(struct snd_pcm_runtime *runtime)
+{
+	s32 usecs;
+
+	if (!runtime->rate)
+		return -EINVAL;
+
+	/* take 75% of period time as the deadline */
+	usecs = (750000 / runtime->rate) * runtime->period_size;
+	usecs += ((750000 % runtime->rate) * runtime->period_size) /
+		 runtime->rate;
+
+	return usecs;
+}
+
+static int msm_qos_ctl_get(struct snd_kcontrol *kcontrol,
+			   struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.enumerated.item[0] = qos_vote_status;
+
+	return 0;
+}
+
+static int msm_qos_ctl_put(struct snd_kcontrol *kcontrol,
+			   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct snd_soc_card *card = codec->component.card;
+	const char *be_name = MSM_DAILINK_NAME(LowLatency);
+	struct snd_soc_pcm_runtime *rtd;
+	struct snd_pcm_substream *substream;
+	s32 usecs;
+
+	rtd = snd_soc_get_pcm_runtime(card, be_name);
+	if (!rtd) {
+		pr_err("%s: fail to get pcm runtime for %s\n",
+			__func__, be_name);
+		return -EINVAL;
+	}
+
+	substream = rtd->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+	if (!substream) {
+		pr_err("%s: substream is null\n", __func__);
+		return -EINVAL;
+	}
+
+	qos_vote_status = ucontrol->value.enumerated.item[0];
+	if (qos_vote_status) {
+		if (pm_qos_request_active(&substream->latency_pm_qos_req))
+			pm_qos_remove_request(&substream->latency_pm_qos_req);
+		if (!substream->runtime) {
+			pr_err("%s: runtime is null\n", __func__);
+			return -EINVAL;
+		}
+		usecs = msm_qos_value(substream->runtime);
+		if (usecs >= 0)
+			pm_qos_add_request(&substream->latency_pm_qos_req,
+					PM_QOS_CPU_DMA_LATENCY, usecs);
+	} else {
+		if (pm_qos_request_active(&substream->latency_pm_qos_req))
+			pm_qos_remove_request(&substream->latency_pm_qos_req);
+	}
+
+	return 0;
+}
+
 static const struct snd_kcontrol_new msm_snd_controls[] = {
 	SOC_ENUM_EXT("SLIM_0_RX Channels", slim_0_rx_chs,
 			msm_slim_rx_ch_get, msm_slim_rx_ch_put),
@@ -2857,6 +2933,8 @@
 			msm_aux_pcm_tx_format_get, msm_aux_pcm_tx_format_put),
 	SOC_ENUM_EXT("HiFi Function", hifi_function, msm_hifi_get,
 			msm_hifi_put),
+	SOC_ENUM_EXT("MultiMedia5_RX QOS Vote", qos_vote, msm_qos_ctl_get,
+			msm_qos_ctl_put),
 };
 
 static int msm_snd_enable_codec_ext_clk(struct snd_soc_codec *codec,
@@ -4176,9 +4254,6 @@
 		mi2s_clk[dai_id].clk_freq_in_hz =
 		    mi2s_tx_cfg[dai_id].sample_rate * 2 * bit_per_sample;
 	}
-
-	if (!mi2s_intf_conf[dai_id].msm_is_mi2s_master)
-		mi2s_clk[dai_id].clk_freq_in_hz = 0;
 }
 
 static int msm_mi2s_set_sclk(struct snd_pcm_substream *substream, bool enable)
@@ -4529,6 +4604,11 @@
 	 */
 	mutex_lock(&mi2s_intf_conf[index].lock);
 	if (++mi2s_intf_conf[index].ref_cnt == 1) {
+		/* Check if msm needs to provide the clock to the interface */
+		if (!mi2s_intf_conf[index].msm_is_mi2s_master) {
+			mi2s_clk[index].clk_id = mi2s_ebit_clk[index];
+			fmt = SND_SOC_DAIFMT_CBM_CFM;
+		}
 		ret = msm_mi2s_set_sclk(substream, true);
 		if (ret < 0) {
 			dev_err(rtd->card->dev,
@@ -4548,9 +4628,6 @@
 			ret = -EINVAL;
 			goto clk_off;
 		}
-		/* Check if msm needs to provide the clock to the interface */
-		if (!mi2s_intf_conf[index].msm_is_mi2s_master)
-			fmt = SND_SOC_DAIFMT_CBM_CFM;
 		ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
 		if (ret < 0) {
 			pr_err("%s: set fmt cpu dai failed for MI2S (%d), err:%d\n",
diff --git a/sound/usb/card.c b/sound/usb/card.c
index eaf18aa..a87a526 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -619,7 +619,7 @@
 	assoc = intf->intf_assoc;
 	if (assoc && assoc->bFunctionClass == USB_CLASS_AUDIO &&
 	    assoc->bFunctionProtocol == UAC_VERSION_3 &&
-	    assoc->bFunctionSubClass == FULL_ADC_PROFILE) {
+	    assoc->bFunctionSubClass == FULL_ADC_3_0) {
 		dev_info(&dev->dev, "No support for full-fledged ADC 3.0 yet!!\n");
 		return -EINVAL;
 	}
diff --git a/techpack/.gitignore b/techpack/.gitignore
new file mode 100644
index 0000000..58da0b8
--- /dev/null
+++ b/techpack/.gitignore
@@ -0,0 +1,2 @@
+# ignore all subdirs except stub
+!/stub/
diff --git a/techpack/Kbuild b/techpack/Kbuild
new file mode 100644
index 0000000..3c7c8e6
--- /dev/null
+++ b/techpack/Kbuild
@@ -0,0 +1,5 @@
+techpack-dirs := $(shell find $(srctree)/$(src) -maxdepth 1 -mindepth 1 -type d -not -name ".*")
+obj-y += stub/ $(addsuffix /,$(subst $(srctree)/$(src)/,,$(techpack-dirs)))
+
+techpack-header-dirs := $(shell find $(srctree)/techpack -maxdepth 1 -mindepth 1 -type d -not -name ".*")
+header-y += $(addsuffix /include/uapi/,$(subst $(srctree)/techpack/,,$(techpack-header-dirs)))
diff --git a/techpack/stub/Makefile b/techpack/stub/Makefile
new file mode 100644
index 0000000..184b5c7
--- /dev/null
+++ b/techpack/stub/Makefile
@@ -0,0 +1,2 @@
+ccflags-y := -Wno-unused-function
+obj-y := stub.o
diff --git a/techpack/stub/include/uapi/Kbuild b/techpack/stub/include/uapi/Kbuild
new file mode 100644
index 0000000..87bfa65
--- /dev/null
+++ b/techpack/stub/include/uapi/Kbuild
@@ -0,0 +1 @@
+#Stub place holder
diff --git a/techpack/stub/stub.c b/techpack/stub/stub.c
new file mode 100644
index 0000000..6024341
--- /dev/null
+++ b/techpack/stub/stub.c
@@ -0,0 +1,3 @@
+static void _techpack_stub(void)
+{
+}
diff --git a/tools/lib/lockdep/uinclude/linux/lockdep.h b/tools/lib/lockdep/uinclude/linux/lockdep.h
index c808c7d..d302142 100644
--- a/tools/lib/lockdep/uinclude/linux/lockdep.h
+++ b/tools/lib/lockdep/uinclude/linux/lockdep.h
@@ -8,7 +8,7 @@
 #include <linux/utsname.h>
 #include <linux/compiler.h>
 
-#define MAX_LOCK_DEPTH 2000UL
+#define MAX_LOCK_DEPTH 63UL
 
 #define asmlinkage
 #define __visible
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index 3eb3edb..a130901 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -702,7 +702,7 @@
 		ui_browser__gotorc(browser, row, column + 1);
 		SLsmg_draw_hline(2);
 
-		if (row++ == 0)
+		if (++row == 0)
 			goto out;
 	} else
 		row = 0;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 04387ab..7e27207 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -64,6 +64,25 @@
 	INTEL_PT_STATE_FUP_NO_TIP,
 };
 
+static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
+{
+	switch (pkt_state) {
+	case INTEL_PT_STATE_NO_PSB:
+	case INTEL_PT_STATE_NO_IP:
+	case INTEL_PT_STATE_ERR_RESYNC:
+	case INTEL_PT_STATE_IN_SYNC:
+	case INTEL_PT_STATE_TNT:
+		return true;
+	case INTEL_PT_STATE_TIP:
+	case INTEL_PT_STATE_TIP_PGD:
+	case INTEL_PT_STATE_FUP:
+	case INTEL_PT_STATE_FUP_NO_TIP:
+		return false;
+	default:
+		return true;
+	};
+}
+
 #ifdef INTEL_PT_STRICT
 #define INTEL_PT_STATE_ERR1	INTEL_PT_STATE_NO_PSB
 #define INTEL_PT_STATE_ERR2	INTEL_PT_STATE_NO_PSB
@@ -92,6 +111,7 @@
 	bool have_tma;
 	bool have_cyc;
 	bool fixup_last_mtc;
+	bool have_last_ip;
 	uint64_t pos;
 	uint64_t last_ip;
 	uint64_t ip;
@@ -99,6 +119,7 @@
 	uint64_t timestamp;
 	uint64_t tsc_timestamp;
 	uint64_t ref_timestamp;
+	uint64_t sample_timestamp;
 	uint64_t ret_addr;
 	uint64_t ctc_timestamp;
 	uint64_t ctc_delta;
@@ -139,6 +160,7 @@
 	unsigned int fup_tx_flags;
 	unsigned int tx_flags;
 	uint64_t timestamp_insn_cnt;
+	uint64_t sample_insn_cnt;
 	uint64_t stuck_ip;
 	int no_progress;
 	int stuck_ip_prd;
@@ -398,6 +420,7 @@
 static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder)
 {
 	decoder->last_ip = intel_pt_calc_ip(&decoder->packet, decoder->last_ip);
+	decoder->have_last_ip = true;
 }
 
 static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder)
@@ -898,6 +921,7 @@
 
 	decoder->tot_insn_cnt += insn_cnt;
 	decoder->timestamp_insn_cnt += insn_cnt;
+	decoder->sample_insn_cnt += insn_cnt;
 	decoder->period_insn_cnt += insn_cnt;
 
 	if (err) {
@@ -1444,7 +1468,8 @@
 
 		case INTEL_PT_FUP:
 			decoder->pge = true;
-			intel_pt_set_last_ip(decoder);
+			if (decoder->packet.count)
+				intel_pt_set_last_ip(decoder);
 			break;
 
 		case INTEL_PT_MODE_TSX:
@@ -1648,6 +1673,8 @@
 			break;
 
 		case INTEL_PT_PSB:
+			decoder->last_ip = 0;
+			decoder->have_last_ip = true;
 			intel_pt_clear_stack(&decoder->stack);
 			err = intel_pt_walk_psbend(decoder);
 			if (err == -EAGAIN)
@@ -1728,8 +1755,9 @@
 
 static inline bool intel_pt_have_ip(struct intel_pt_decoder *decoder)
 {
-	return decoder->last_ip || decoder->packet.count == 0 ||
-	       decoder->packet.count == 3 || decoder->packet.count == 6;
+	return decoder->packet.count &&
+	       (decoder->have_last_ip || decoder->packet.count == 3 ||
+		decoder->packet.count == 6);
 }
 
 /* Walk PSB+ packets to get in sync. */
@@ -1852,14 +1880,10 @@
 			break;
 
 		case INTEL_PT_FUP:
-			if (decoder->overflow) {
-				if (intel_pt_have_ip(decoder))
-					intel_pt_set_ip(decoder);
-				if (decoder->ip)
-					return 0;
-			}
-			if (decoder->packet.count)
-				intel_pt_set_last_ip(decoder);
+			if (intel_pt_have_ip(decoder))
+				intel_pt_set_ip(decoder);
+			if (decoder->ip)
+				return 0;
 			break;
 
 		case INTEL_PT_MTC:
@@ -1908,6 +1932,9 @@
 			break;
 
 		case INTEL_PT_PSB:
+			decoder->last_ip = 0;
+			decoder->have_last_ip = true;
+			intel_pt_clear_stack(&decoder->stack);
 			err = intel_pt_walk_psb(decoder);
 			if (err)
 				return err;
@@ -1933,6 +1960,8 @@
 {
 	int err;
 
+	decoder->set_fup_tx_flags = false;
+
 	intel_pt_log("Scanning for full IP\n");
 	err = intel_pt_walk_to_ip(decoder);
 	if (err)
@@ -2041,6 +2070,7 @@
 
 	decoder->pge = false;
 	decoder->continuous_period = false;
+	decoder->have_last_ip = false;
 	decoder->last_ip = 0;
 	decoder->ip = 0;
 	intel_pt_clear_stack(&decoder->stack);
@@ -2049,6 +2079,7 @@
 	if (err)
 		return err;
 
+	decoder->have_last_ip = true;
 	decoder->pkt_state = INTEL_PT_STATE_NO_IP;
 
 	err = intel_pt_walk_psb(decoder);
@@ -2067,7 +2098,7 @@
 
 static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder)
 {
-	uint64_t est = decoder->timestamp_insn_cnt << 1;
+	uint64_t est = decoder->sample_insn_cnt << 1;
 
 	if (!decoder->cbr || !decoder->max_non_turbo_ratio)
 		goto out;
@@ -2075,7 +2106,7 @@
 	est *= decoder->max_non_turbo_ratio;
 	est /= decoder->cbr;
 out:
-	return decoder->timestamp + est;
+	return decoder->sample_timestamp + est;
 }
 
 const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
@@ -2091,7 +2122,9 @@
 			err = intel_pt_sync(decoder);
 			break;
 		case INTEL_PT_STATE_NO_IP:
+			decoder->have_last_ip = false;
 			decoder->last_ip = 0;
+			decoder->ip = 0;
 			/* Fall through */
 		case INTEL_PT_STATE_ERR_RESYNC:
 			err = intel_pt_sync_ip(decoder);
@@ -2128,15 +2161,24 @@
 		}
 	} while (err == -ENOLINK);
 
-	decoder->state.err = err ? intel_pt_ext_err(err) : 0;
-	decoder->state.timestamp = decoder->timestamp;
+	if (err) {
+		decoder->state.err = intel_pt_ext_err(err);
+		decoder->state.from_ip = decoder->ip;
+		decoder->sample_timestamp = decoder->timestamp;
+		decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
+	} else {
+		decoder->state.err = 0;
+		if (intel_pt_sample_time(decoder->pkt_state)) {
+			decoder->sample_timestamp = decoder->timestamp;
+			decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
+		}
+	}
+
+	decoder->state.timestamp = decoder->sample_timestamp;
 	decoder->state.est_timestamp = intel_pt_est_timestamp(decoder);
 	decoder->state.cr3 = decoder->cr3;
 	decoder->state.tot_insn_cnt = decoder->tot_insn_cnt;
 
-	if (err)
-		decoder->state.from_ip = decoder->ip;
-
 	return &decoder->state;
 }
 
diff --git a/tools/testing/selftests/capabilities/test_execve.c b/tools/testing/selftests/capabilities/test_execve.c
index 10a21a9..763f37f 100644
--- a/tools/testing/selftests/capabilities/test_execve.c
+++ b/tools/testing/selftests/capabilities/test_execve.c
@@ -138,9 +138,6 @@
 
 	if (chdir(cwd) != 0)
 		err(1, "chdir to private tmpfs");
-
-	if (umount2(".", MNT_DETACH) != 0)
-		err(1, "detach private tmpfs");
 }
 
 static void copy_fromat_to(int fromfd, const char *fromname, const char *toname)
@@ -248,7 +245,7 @@
 			err(1, "chown");
 		if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0)
 			err(1, "chmod");
-}
+	}
 
 	capng_get_caps_process();
 
@@ -384,7 +381,7 @@
 	} else {
 		printf("[RUN]\tNon-root +ia, sgidnonroot => i\n");
 		exec_other_validate_cap("./validate_cap_sgidnonroot",
-						false, false, true, false);
+					false, false, true, false);
 
 		if (fork_wait()) {
 			printf("[RUN]\tNon-root +ia, sgidroot => i\n");
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 1dd087d..111e09c 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -47,6 +47,22 @@
 	return vfio_group;
 }
 
+static bool kvm_vfio_external_group_match_file(struct vfio_group *group,
+					       struct file *filep)
+{
+	bool ret, (*fn)(struct vfio_group *, struct file *);
+
+	fn = symbol_get(vfio_external_group_match_file);
+	if (!fn)
+		return false;
+
+	ret = fn(group, filep);
+
+	symbol_put(vfio_external_group_match_file);
+
+	return ret;
+}
+
 static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
 {
 	void (*fn)(struct vfio_group *);
@@ -171,18 +187,13 @@
 		if (!f.file)
 			return -EBADF;
 
-		vfio_group = kvm_vfio_group_get_external_user(f.file);
-		fdput(f);
-
-		if (IS_ERR(vfio_group))
-			return PTR_ERR(vfio_group);
-
 		ret = -ENOENT;
 
 		mutex_lock(&kv->lock);
 
 		list_for_each_entry(kvg, &kv->group_list, node) {
-			if (kvg->vfio_group != vfio_group)
+			if (!kvm_vfio_external_group_match_file(kvg->vfio_group,
+								f.file))
 				continue;
 
 			list_del(&kvg->node);
@@ -196,7 +207,7 @@
 
 		mutex_unlock(&kv->lock);
 
-		kvm_vfio_group_put_external_user(vfio_group);
+		fdput(f);
 
 		kvm_vfio_update_coherency(dev);