Merge "usb: defconfig: Enable USB related function drivers on msm8953"
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-audio-overlay.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-audio-overlay.dtsi
index f90bd7f..e51d54b 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-audio-overlay.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-audio-overlay.dtsi
@@ -17,7 +17,6 @@
 
 &snd_934x {
 	qcom,audio-routing =
-		"AIF4 VI", "MCLK",
 		"RX_BIAS", "MCLK",
 		"MADINPUT", "MCLK",
 		"AMIC2", "MIC BIAS2",
@@ -28,8 +27,6 @@
 		"MIC BIAS2", "ANCLeft Headset Mic",
 		"AMIC5", "MIC BIAS3",
 		"MIC BIAS3", "Handset Mic",
-		"DMIC0", "MIC BIAS1",
-		"MIC BIAS1", "Digital Mic0",
 		"DMIC1", "MIC BIAS1",
 		"MIC BIAS1", "Digital Mic1",
 		"DMIC2", "MIC BIAS3",
@@ -40,14 +37,13 @@
 		"MIC BIAS4", "Digital Mic4",
 		"DMIC5", "MIC BIAS4",
 		"MIC BIAS4", "Digital Mic5",
-		"SpkrLeft IN", "SPK1 OUT",
 		"SpkrRight IN", "SPK2 OUT";
 
 	qcom,msm-mbhc-hphl-swh = <1>;
 	qcom,msm-mbhc-gnd-swh = <1>;
 	qcom,msm-mbhc-hs-mic-max-threshold-mv = <1700>;
 	qcom,msm-mbhc-hs-mic-min-threshold-mv = <50>;
-	qcom,tavil-mclk-clk-freq = <12288000>;
+	qcom,tavil-mclk-clk-freq = <9600000>;
 
 	asoc-codec = <&stub_codec>;
 	asoc-codec-names = "msm-stub-codec.1";
@@ -66,23 +62,24 @@
 		interrupt-controller;
 		#interrupt-cells = <1>;
 		interrupt-parent = <&tlmm>;
-		qcom,gpio-connect = <&tlmm 71 0>;
+		qcom,gpio-connect = <&tlmm 90 0>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&wcd_intr_default>;
 	};
 
 	clock_audio_up: audio_ext_clk_up {
 		compatible = "qcom,audio-ref-clk";
-		qcom,codec-mclk-clk-freq = <12288000>;
+		qcom,audio-ref-clk-gpio = <&tlmm 62 0>;
+		qcom,codec-mclk-clk-freq = <9600000>;
 		pinctrl-names = "sleep", "active";
 		pinctrl-0 = <&i2s_mclk_sleep>;
 		pinctrl-1 = <&i2s_mclk_active>;
 		#clock-cells = <1>;
 	};
 
-	wcd_rst_gpio: msm_cdc_pinctrl@77 {
+	wcd_rst_gpio: msm_cdc_pinctrl@86 {
 		compatible = "qcom,msm-cdc-pinctrl";
-		qcom,cdc-rst-n-gpio = <&tlmm 77 0>;
+		qcom,cdc-rst-n-gpio = <&tlmm 86 0>;
 		pinctrl-names = "aud_active", "aud_sleep";
 		pinctrl-0 = <&cdc_reset_active>;
 		pinctrl-1 = <&cdc_reset_sleep>;
@@ -91,8 +88,8 @@
 
 &i2c_3 {
 	wcd934x_cdc: tavil_codec {
-		compatible = "qcom,tavil-i2c-pgd";
-		elemental-addr = [00 01 50 02 17 02];
+		compatible = "qcom,tavil-i2c";
+		reg = <0x0d>;
 
 		interrupt-parent = <&wcd9xxx_intc>;
 		interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
@@ -135,7 +132,7 @@
 		qcom,cdc-micbias3-mv = <1800>;
 		qcom,cdc-micbias4-mv = <1800>;
 
-		qcom,cdc-mclk-clk-rate = <12288000>;
+		qcom,cdc-mclk-clk-rate = <9600000>;
 		qcom,cdc-dmic-sample-rate = <4800000>;
 
 		qcom,wdsp-cmpnt-dev-name = "tavil_codec";
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-blsp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-blsp.dtsi
index 4fe2d1e..13e1fc3 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-blsp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-blsp.dtsi
@@ -103,7 +103,6 @@
 		pinctrl-names = "i2c_active", "i2c_sleep";
 		pinctrl-0 = <&i2c_3_active>;
 		pinctrl-1 = <&i2c_3_sleep>;
-		status = "disabled";
 	};
 
 	i2c_4: i2c@838000 { /* BLSP1 QUP4: GPIO: 76,77 */
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts
index 89945e3..94ccf9c 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-cdp.dts
@@ -15,6 +15,7 @@
 
 #include "sdxpoorwills.dtsi"
 #include "sdxpoorwills-pinctrl.dtsi"
+#include "sdxpoorwills-cdp-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDXPOORWILLS CDP";
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-ion.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-ion.dtsi
index afc88969..a09b149 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-ion.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-ion.dtsi
@@ -20,5 +20,11 @@
 			reg = <25>;
 			qcom,ion-heap-type = "SYSTEM";
 		};
+
+		qcom,ion-heap@28 { /* AUDIO HEAP */
+			reg = <28>;
+			memory-region = <&audio_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
index 9b8e751..82b65e2 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
@@ -947,12 +947,12 @@
 		wcd9xxx_intr {
 			wcd_intr_default: wcd_intr_default{
 				mux {
-					pins = "gpio71";
+					pins = "gpio90";
 					function = "gpio";
 				};
 
 				config {
-					pins = "gpio71";
+					pins = "gpio90";
 					drive-strength = <2>; /* 2 mA */
 					bias-pull-down; /* pull down */
 					input-enable;
@@ -963,11 +963,11 @@
 		cdc_reset_ctrl {
 			cdc_reset_sleep: cdc_reset_sleep {
 				mux {
-					pins = "gpio77";
+					pins = "gpio86";
 					function = "gpio";
 				};
 				config {
-					pins = "gpio77";
+					pins = "gpio86";
 					drive-strength = <2>;
 					bias-disable;
 					output-low;
@@ -976,11 +976,11 @@
 
 			cdc_reset_active:cdc_reset_active {
 				mux {
-					pins = "gpio77";
+					pins = "gpio86";
 					function = "gpio";
 				};
 				config {
-					pins = "gpio77";
+					pins = "gpio86";
 					drive-strength = <8>;
 					bias-pull-down;
 					output-high;
@@ -1063,7 +1063,7 @@
 			pri_ws_active_master: pri_ws_active_master {
 				mux {
 					pins = "gpio12";
-					function = "pri_mi2s_ws_a";
+					function = "pri_mi2s";
 				};
 
 				config {
@@ -1077,7 +1077,7 @@
 			pri_sck_active_master: pri_sck_active_master {
 				mux {
 					pins = "gpio15";
-					function = "pri_mi2s_sck_a";
+					function = "pri_mi2s";
 				};
 
 				config {
@@ -1091,7 +1091,7 @@
 			pri_ws_active_slave: pri_ws_active_slave {
 				mux {
 					pins = "gpio12";
-					function = "pri_mi2s_ws_a";
+					function = "pri_mi2s";
 				};
 
 				config {
@@ -1104,7 +1104,7 @@
 			pri_sck_active_slave: pri_sck_active_slave {
 				mux {
 					pins = "gpio15";
-					function = "pri_mi2s_sck_a";
+					function = "pri_mi2s";
 				};
 
 				config {
@@ -1117,7 +1117,7 @@
 			pri_dout_active: pri_dout_active {
 				mux {
 					pins = "gpio14";
-					function = "pri_mi2s_data1_a";
+					function = "pri_mi2s";
 				};
 
 				config {
@@ -1147,7 +1147,7 @@
 			pri_din_active: pri_din_active {
 				mux {
 					pins = "gpio13";
-					function = "pri_mi2s_data0_a";
+					function = "pri_mi2s";
 				};
 
 				config {
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 5a7e17e..eaa15ce 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -126,6 +126,7 @@
 		sda670-pm660a-mtp-overlay.dtbo \
 		qcs605-cdp-overlay.dtbo \
 		qcs605-mtp-overlay.dtbo \
+		qcs605-360camera-overlay.dtbo \
 		qcs605-external-codec-mtp-overlay.dtbo \
 		qcs605-lc-mtp-overlay.dtbo
 
@@ -156,6 +157,7 @@
 qcs605-mtp-overlay.dtbo-base := qcs605.dtb
 qcs605-external-codec-mtp-overlay.dtbo-base := qcs605.dtb
 qcs605-lc-mtp-overlay.dtbo-base := qcs605.dtb
+qcs605-360camera-overlay.dtbo-base := qcs605.dtb
 
 else
 dtb-$(CONFIG_ARCH_SDM670) += sdm670-rumi.dtb \
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-8953.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-8953.dtsi
new file mode 100644
index 0000000..e794472
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-8953.dtsi
@@ -0,0 +1,103 @@
+/*
+ *Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/clock/msm-clocks-8953.h>
+
+&soc {
+	kgsl_smmu: arm,smmu-kgsl@1c40000 {
+		status = "ok";
+		compatible = "qcom,smmu-v2";
+		qcom,tz-device-id = "GPU";
+		reg = <0x1c40000 0x10000>;
+		#iommu-cells = <1>;
+		#global-interrupts = <0>;
+		interrupts =  <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,dynamic;
+		qcom,use-3-lvl-tables;
+		qcom,enable-static-cb;
+		qcom,enable-smmu-halt;
+		qcom,skip-init;
+		vdd-supply = <&gdsc_oxili_cx>;
+		qcom,regulator-names = "vdd";
+		clocks = <&clock_gcc_gfx clk_gcc_oxili_ahb_clk>,
+			<&clock_gcc_gfx clk_gcc_bimc_gfx_clk>;
+		clock-names = "gpu_ahb_clk", "gcc_bimc_gfx_clk";
+	};
+
+	/* A test device to test the SMMU operation */
+	kgsl_iommu_test_device0 {
+		compatible = "iommu-debug-test";
+		/* The SID should be valid one to get the proper
+		 *SMR,S2CR indices.
+		 */
+		iommus = <&kgsl_smmu 0x0>;
+	};
+
+	apps_iommu: qcom,iommu@1e00000 {
+		status = "disabled";
+		compatible = "qcom,qsmmu-v500";
+		reg = <0x1e00000 0x40000>,
+			<0x1ee2000 0x20>;
+		reg-names = "base", "tcu-base";
+		#iommu-cells = <2>;
+		qcom,tz-device-id = "APPS";
+		qcom,skip-init;
+		qcom,enable-static-cb;
+		qcom,use-3-lvl-tables;
+		qcom,disable-atos;
+		#global-interrupts = <0>;
+		#size-cells = <1>;
+		#address-cells = <1>;
+		ranges;
+		interrupts = <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&clock_gcc clk_gcc_smmu_cfg_clk>,
+			  <&clock_gcc clk_gcc_apss_tcu_async_clk>;
+		clock-names = "iface_clk", "core_clk";
+	};
+};
+
+#include "msm-arm-smmu-impl-defs-8953.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-impl-defs-8953.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-impl-defs-8953.dtsi
new file mode 100644
index 0000000..2122db9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-impl-defs-8953.dtsi
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&kgsl_smmu {
+	attach-impl-defs = <0x6000 0x270>,
+		<0x6060 0x1055>,
+		<0x6800 0x6>,
+		<0x6900 0x3ff>,
+		<0x6924 0x204>,
+		<0x6928 0x10800>,
+		<0x6930 0x400>,
+		<0x6960 0xffffffff>,
+		<0x6b64 0xa0000>,
+		<0x6b68 0xaaab92a>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953-ion.dtsi b/arch/arm64/boot/dts/qcom/msm8953-ion.dtsi
new file mode 100644
index 0000000..34004b0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-ion.dtsi
@@ -0,0 +1,36 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,ion {
+		compatible = "qcom,msm-ion";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,ion-heap@25 {
+			reg = <25>;
+			qcom,ion-heap-type = "SYSTEM";
+		};
+
+		qcom,ion-heap@8 { /* CP_MM HEAP */
+			reg = <8>;
+			memory-region = <&secure_mem>;
+			qcom,ion-heap-type = "SECURE_DMA";
+		};
+
+		qcom,ion-heap@27 { /* QSEECOM HEAP */
+			reg = <27>;
+			memory-region = <&qseecom_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index 76a703a..af21543 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -134,6 +134,8 @@
 #include "msm8953-pm.dtsi"
 #include "msm8953-bus.dtsi"
 #include "msm8953-coresight.dtsi"
+#include "msm8953-ion.dtsi"
+#include "msm-arm-smmu-8953.dtsi"
 
 &soc {
 	#address-cells = <1>;
@@ -522,13 +524,13 @@
 		clock-names = "core_clk_src", "core_clk",
 				"iface_clk", "bus_clk";
 		qcom,ce-opp-freq = <100000000>;
-		status = "disabled";
+		status = "okay";
 	};
 
 	qcom_tzlog: tz-log@08600720 {
 		compatible = "qcom,tz-log";
 		reg = <0x08600720 0x2000>;
-		status = "disabled";
+		status = "okay";
 	};
 
 	qcom_rng: qrng@e3000 {
@@ -544,7 +546,7 @@
 			<1 618 0 800>;          /* 100 MB/s */
 		clocks = <&clock_gcc clk_gcc_prng_ahb_clk>;
 		clock-names = "iface_clk";
-		status = "disabled";
+		status = "okay";
 	};
 
 	qcom_crypto: qcrypto@720000 {
@@ -577,7 +579,7 @@
 		qcom,use-sw-hmac-algo;
 		qcom,use-sw-aead-algo;
 		qcom,ce-opp-freq = <100000000>;
-		status = "disabled";
+		status = "okay";
 	};
 
 	qcom_cedev: qcedev@720000 {
@@ -603,7 +605,7 @@
 		clock-names = "core_clk_src", "core_clk",
 				"iface_clk", "bus_clk";
 		qcom,ce-opp-freq = <100000000>;
-		status = "disabled";
+		status = "okay";
 	};
 
 	blsp1_uart0: serial@78af000 {
diff --git a/arch/arm64/boot/dts/qcom/qcs605-360camera-overlay.dts b/arch/arm64/boot/dts/qcom/qcs605-360camera-overlay.dts
new file mode 100644
index 0000000..e7a2197
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-360camera-overlay.dts
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "qcs605-360camera.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. QCS605 PM660+PM660L 360camera";
+	compatible = "qcom,qcs605-mtp", "qcom,qcs605", "qcom,mtp";
+	qcom,msm-id = <347 0x0>;
+	qcom,board-id = <0x0000000b 1>;
+	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+		       <0x0001001b 0x0102001a 0x0 0x0>,
+		       <0x0001001b 0x0201011a 0x0 0x0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-360camera.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-360camera.dtsi
index 18b0cd8..c40fff6 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-360camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera-sensor-360camera.dtsi
@@ -11,247 +11,245 @@
  * GNU General Public License for more details.
  */
 
-&soc {
-	led_flash_rear: qcom,camera-flash@0 {
-		cell-index = <0>;
-		reg = <0x00 0x00>;
-		compatible = "qcom,camera-flash";
-		flash-source = <&pm660l_flash0 &pm660l_flash1>;
-		torch-source = <&pm660l_torch0 &pm660l_torch1>;
-		switch-source = <&pm660l_switch0>;
-		status = "ok";
-	};
+&led_flash_rear {
+	cell-index = <0>;
+	reg = <0x00 0x00>;
+	compatible = "qcom,camera-flash";
+	flash-source = <&pm660l_flash0 &pm660l_flash1>;
+	torch-source = <&pm660l_torch0 &pm660l_torch1>;
+	switch-source = <&pm660l_switch0>;
+	status = "ok";
+};
 
-	led_flash_front: qcom,camera-flash@1 {
-		cell-index = <1>;
-		reg = <0x01 0x00>;
-		compatible = "qcom,camera-flash";
-		flash-source = <&pm660l_flash2>;
-		torch-source = <&pm660l_torch2>;
-		switch-source = <&pm660l_switch1>;
-		status = "ok";
-	};
+&led_flash_front {
+	cell-index = <1>;
+	reg = <0x01 0x00>;
+	compatible = "qcom,camera-flash";
+	flash-source = <&pm660l_flash2>;
+	torch-source = <&pm660l_torch2>;
+	switch-source = <&pm660l_switch1>;
+	status = "ok";
+};
 
-	actuator_regulator: gpio-regulator@0 {
-		compatible = "regulator-fixed";
-		reg = <0x00 0x00>;
-		regulator-name = "actuator_regulator";
-		regulator-min-microvolt = <2800000>;
-		regulator-max-microvolt = <2800000>;
-		regulator-enable-ramp-delay = <100>;
-		enable-active-high;
-		gpio = <&tlmm 27 0>;
-	};
+&actuator_regulator {
+	compatible = "regulator-fixed";
+	reg = <0x00 0x00>;
+	regulator-name = "actuator_regulator";
+	regulator-min-microvolt = <2800000>;
+	regulator-max-microvolt = <2800000>;
+	regulator-enable-ramp-delay = <100>;
+	enable-active-high;
+	gpio = <&tlmm 27 0>;
+};
 
-	camera_ldo: gpio-regulator@2 {
-		compatible = "regulator-fixed";
-		reg = <0x02 0x00>;
-		regulator-name = "camera_ldo";
-		regulator-min-microvolt = <1352000>;
-		regulator-max-microvolt = <1352000>;
-		regulator-enable-ramp-delay = <233>;
-		enable-active-high;
-		gpio = <&pm660l_gpios 4 0>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&camera_dvdd_en_default>;
-		vin-supply = <&pm660_s6>;
-	};
+&camera_ldo {
+	compatible = "regulator-fixed";
+	reg = <0x02 0x00>;
+	regulator-name = "camera_ldo";
+	regulator-min-microvolt = <1352000>;
+	regulator-max-microvolt = <1352000>;
+	regulator-enable-ramp-delay = <233>;
+	enable-active-high;
+	gpio = <&pm660l_gpios 4 0>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&camera_dvdd_en_default>;
+	vin-supply = <&pm660_s6>;
+};
 
-	camera_rear_ldo: gpio-regulator@1 {
-		compatible = "regulator-fixed";
-		reg = <0x01 0x00>;
-		regulator-name = "camera_rear_ldo";
-		regulator-min-microvolt = <1352000>;
-		regulator-max-microvolt = <1352000>;
-		regulator-enable-ramp-delay = <135>;
-		enable-active-high;
-		gpio = <&pm660l_gpios 4 0>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&camera_rear_dvdd_en_default>;
-		vin-supply = <&pm660_s6>;
-	};
+&camera_rear_ldo {
+	compatible = "regulator-fixed";
+	reg = <0x01 0x00>;
+	regulator-name = "camera_rear_ldo";
+	regulator-min-microvolt = <1352000>;
+	regulator-max-microvolt = <1352000>;
+	regulator-enable-ramp-delay = <135>;
+	enable-active-high;
+	gpio = <&pm660l_gpios 4 0>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&camera_rear_dvdd_en_default>;
+	vin-supply = <&pm660_s6>;
+};
 
-	camera_vio_ldo: gpio-regulator@3 {
-		compatible = "regulator-fixed";
-		reg = <0x03 0x00>;
-		regulator-name = "camera_vio_ldo";
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <1800000>;
-		regulator-enable-ramp-delay = <233>;
-		enable-active-high;
-		gpio = <&tlmm 29 0>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&cam_sensor_rear_vio>;
-		vin-supply = <&pm660_s4>;
-	};
+&camera_vio_ldo {
+	compatible = "regulator-fixed";
+	reg = <0x03 0x00>;
+	regulator-name = "camera_vio_ldo";
+	regulator-min-microvolt = <1800000>;
+	regulator-max-microvolt = <1800000>;
+	regulator-enable-ramp-delay = <233>;
+	enable-active-high;
+	gpio = <&tlmm 29 0>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&cam_sensor_rear_vio>;
+	vin-supply = <&pm660_s4>;
+};
 
-	camera_vana_ldo: gpio-regulator@4 {
-		compatible = "regulator-fixed";
-		reg = <0x04 0x00>;
-		regulator-name = "camera_vana_ldo";
-		regulator-min-microvolt = <2850000>;
-		regulator-max-microvolt = <2850000>;
-		regulator-enable-ramp-delay = <233>;
-		enable-active-high;
-		gpio = <&tlmm 8 0>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&cam_sensor_rear_vana>;
-		vin-supply = <&pm660l_bob>;
-	};
+&camera_vana_ldo {
+	compatible = "regulator-fixed";
+	reg = <0x04 0x00>;
+	regulator-name = "camera_vana_ldo";
+	regulator-min-microvolt = <2850000>;
+	regulator-max-microvolt = <2850000>;
+	regulator-enable-ramp-delay = <233>;
+	enable-active-high;
+	gpio = <&tlmm 8 0>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&cam_sensor_rear_vana>;
+	vin-supply = <&pm660l_bob>;
+};
+
+&actuator_rear {
+	cell-index = <0>;
+	reg = <0x0>;
+	compatible = "qcom,actuator";
+	cci-master = <0>;
+	cam_vaf-supply = <&actuator_regulator>;
+	regulator-names = "cam_vaf";
+	rgltr-cntrl-support;
+	rgltr-min-voltage = <2800000>;
+	rgltr-max-voltage = <2800000>;
+	rgltr-load-current = <0>;
+};
+
+&actuator_front {
+	cell-index = <1>;
+	reg = <0x1>;
+	compatible = "qcom,actuator";
+	cci-master = <1>;
+	cam_vaf-supply = <&actuator_regulator>;
+	regulator-names = "cam_vaf";
+	rgltr-cntrl-support;
+	rgltr-min-voltage = <2800000>;
+	rgltr-max-voltage = <2800000>;
+	rgltr-load-current = <0>;
+};
+
+&ois_rear {
+	cell-index = <0>;
+	reg = <0x0>;
+	compatible = "qcom,ois";
+	cci-master = <0>;
+	cam_vaf-supply = <&actuator_regulator>;
+	regulator-names = "cam_vaf";
+	rgltr-cntrl-support;
+	rgltr-min-voltage = <2800000>;
+	rgltr-max-voltage = <2800000>;
+	rgltr-load-current = <0>;
+	status = "disabled";
+};
+
+&eeprom_rear {
+	cell-index = <0>;
+	reg = <0>;
+	compatible = "qcom,eeprom";
+	cam_vio-supply = <&camera_vio_ldo>;
+	cam_vana-supply = <&camera_vana_ldo>;
+	cam_vdig-supply = <&camera_rear_ldo>;
+	cam_clk-supply = <&titan_top_gdsc>;
+	cam_vaf-supply = <&actuator_regulator>;
+	regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+		"cam_clk", "cam_vaf";
+	rgltr-cntrl-support;
+	rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+	rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+	rgltr-load-current = <0 80000 105000 0 0>;
+	gpio-no-mux = <0>;
+	pinctrl-names = "cam_default", "cam_suspend";
+	pinctrl-0 = <&cam_sensor_mclk0_active
+			&cam_sensor_rear_active>;
+	pinctrl-1 = <&cam_sensor_mclk0_suspend
+			&cam_sensor_rear_suspend>;
+	gpios = <&tlmm 13 0>,
+		<&tlmm 30 0>;
+	gpio-reset = <1>;
+	gpio-req-tbl-num = <0 1>;
+	gpio-req-tbl-flags = <1 0>;
+	gpio-req-tbl-label = "CAMIF_MCLK0",
+				"CAM_RESET0";
+	sensor-mode = <0>;
+	cci-master = <0>;
+	status = "ok";
+	clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+	clock-names = "cam_clk";
+	clock-cntl-level = "turbo";
+	clock-rates = <24000000>;
+};
+
+&eeprom_rear_aux {
+	cell-index = <1>;
+	reg = <0x1>;
+	compatible = "qcom,eeprom";
+	cam_vio-supply = <&camera_vio_ldo>;
+	cam_vana-supply = <&camera_vana_ldo>;
+	cam_vdig-supply = <&camera_ldo>;
+	cam_clk-supply = <&titan_top_gdsc>;
+	cam_vaf-supply = <&actuator_regulator>;
+	regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+		"cam_clk", "cam_vaf";
+	rgltr-cntrl-support;
+	rgltr-min-voltage = <1352000 1800000 2850000 0 2800000>;
+	rgltr-max-voltage = <1352000 1800000 2850000 0 2800000>;
+	rgltr-load-current = <105000 0 80000 0>;
+	gpio-no-mux = <0>;
+	pinctrl-names = "cam_default", "cam_suspend";
+	pinctrl-0 = <&cam_sensor_mclk1_active
+			&cam_sensor_rear2_active>;
+	pinctrl-1 = <&cam_sensor_mclk1_suspend
+			&cam_sensor_rear2_suspend>;
+	gpios = <&tlmm 14 0>,
+		<&tlmm 28 0>;
+	gpio-reset = <1>;
+	gpio-req-tbl-num = <0 1>;
+	gpio-req-tbl-flags = <1 0>;
+	gpio-req-tbl-label = "CAMIF_MCLK1",
+				"CAM_RESET1";
+	sensor-position = <0>;
+	sensor-mode = <0>;
+	cci-master = <1>;
+	status = "ok";
+	clock-names = "cam_clk";
+	clock-cntl-level = "turbo";
+	clock-rates = <24000000>;
+};
+
+&eeprom_front {
+	cell-index = <2>;
+	reg = <0x2>;
+	compatible = "qcom,eeprom";
+	cam_vio-supply = <&camera_vio_ldo>;
+	cam_vana-supply = <&camera_vana_ldo>;
+	cam_vdig-supply = <&camera_ldo>;
+	cam_clk-supply = <&titan_top_gdsc>;
+	cam_vaf-supply = <&actuator_regulator>;
+	regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+		"cam_clk", "cam_vaf";
+	rgltr-cntrl-support;
+	rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
+	rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
+	rgltr-load-current = <0 80000 105000 0>;
+	gpio-no-mux = <0>;
+	pinctrl-names = "cam_default", "cam_suspend";
+	pinctrl-0 = <&cam_sensor_mclk2_active
+			 &cam_sensor_front_active>;
+	pinctrl-1 = <&cam_sensor_mclk2_suspend
+			 &cam_sensor_front_suspend>;
+	gpios = <&tlmm 15 0>,
+		<&tlmm 9 0>;
+	gpio-reset = <1>;
+	gpio-req-tbl-num = <0 1>;
+	gpio-req-tbl-flags = <1 0>;
+	gpio-req-tbl-label = "CAMIF_MCLK2",
+				"CAM_RESET2";
+	sensor-mode = <0>;
+	cci-master = <1>;
+	status = "ok";
+	clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+	clock-names = "cam_clk";
+	clock-cntl-level = "turbo";
+	clock-rates = <24000000>;
 };
 
 &cam_cci {
-	actuator_rear: qcom,actuator@0 {
-		cell-index = <0>;
-		reg = <0x0>;
-		compatible = "qcom,actuator";
-		cci-master = <0>;
-		cam_vaf-supply = <&actuator_regulator>;
-		regulator-names = "cam_vaf";
-		rgltr-cntrl-support;
-		rgltr-min-voltage = <2800000>;
-		rgltr-max-voltage = <2800000>;
-		rgltr-load-current = <0>;
-	};
-
-	actuator_front: qcom,actuator@1 {
-		cell-index = <1>;
-		reg = <0x1>;
-		compatible = "qcom,actuator";
-		cci-master = <1>;
-		cam_vaf-supply = <&actuator_regulator>;
-		regulator-names = "cam_vaf";
-		rgltr-cntrl-support;
-		rgltr-min-voltage = <2800000>;
-		rgltr-max-voltage = <2800000>;
-		rgltr-load-current = <0>;
-	};
-
-	ois_rear: qcom,ois@0 {
-		cell-index = <0>;
-		reg = <0x0>;
-		compatible = "qcom,ois";
-		cci-master = <0>;
-		cam_vaf-supply = <&actuator_regulator>;
-		regulator-names = "cam_vaf";
-		rgltr-cntrl-support;
-		rgltr-min-voltage = <2800000>;
-		rgltr-max-voltage = <2800000>;
-		rgltr-load-current = <0>;
-		status = "disabled";
-	};
-
-	eeprom_rear: qcom,eeprom@0 {
-		cell-index = <0>;
-		reg = <0>;
-		compatible = "qcom,eeprom";
-		cam_vio-supply = <&camera_vio_ldo>;
-		cam_vana-supply = <&camera_vana_ldo>;
-		cam_vdig-supply = <&camera_rear_ldo>;
-		cam_clk-supply = <&titan_top_gdsc>;
-		cam_vaf-supply = <&actuator_regulator>;
-		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
-			"cam_clk", "cam_vaf";
-		rgltr-cntrl-support;
-		rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
-		rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
-		rgltr-load-current = <0 80000 105000 0 0>;
-		gpio-no-mux = <0>;
-		pinctrl-names = "cam_default", "cam_suspend";
-		pinctrl-0 = <&cam_sensor_mclk0_active
-				&cam_sensor_rear_active>;
-		pinctrl-1 = <&cam_sensor_mclk0_suspend
-				&cam_sensor_rear_suspend>;
-		gpios = <&tlmm 13 0>,
-			<&tlmm 30 0>;
-		gpio-reset = <1>;
-		gpio-req-tbl-num = <0 1>;
-		gpio-req-tbl-flags = <1 0>;
-		gpio-req-tbl-label = "CAMIF_MCLK0",
-					"CAM_RESET0";
-		sensor-mode = <0>;
-		cci-master = <0>;
-		status = "ok";
-		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
-		clock-names = "cam_clk";
-		clock-cntl-level = "turbo";
-		clock-rates = <24000000>;
-	};
-
-	eeprom_rear_aux: qcom,eeprom@1 {
-		cell-index = <1>;
-		reg = <0x1>;
-		compatible = "qcom,eeprom";
-		cam_vio-supply = <&camera_vio_ldo>;
-		cam_vana-supply = <&camera_vana_ldo>;
-		cam_vdig-supply = <&camera_ldo>;
-		cam_clk-supply = <&titan_top_gdsc>;
-		cam_vaf-supply = <&actuator_regulator>;
-		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
-			"cam_clk", "cam_vaf";
-		rgltr-cntrl-support;
-		rgltr-min-voltage = <1352000 1800000 2850000 0 2800000>;
-		rgltr-max-voltage = <1352000 1800000 2850000 0 2800000>;
-		rgltr-load-current = <105000 0 80000 0>;
-		gpio-no-mux = <0>;
-		pinctrl-names = "cam_default", "cam_suspend";
-		pinctrl-0 = <&cam_sensor_mclk1_active
-				&cam_sensor_rear2_active>;
-		pinctrl-1 = <&cam_sensor_mclk1_suspend
-				&cam_sensor_rear2_suspend>;
-		gpios = <&tlmm 14 0>,
-			<&tlmm 28 0>;
-		gpio-reset = <1>;
-		gpio-req-tbl-num = <0 1>;
-		gpio-req-tbl-flags = <1 0>;
-		gpio-req-tbl-label = "CAMIF_MCLK1",
-					"CAM_RESET1";
-		sensor-position = <0>;
-		sensor-mode = <0>;
-		cci-master = <1>;
-		status = "ok";
-		clock-names = "cam_clk";
-		clock-cntl-level = "turbo";
-		clock-rates = <24000000>;
-	};
-
-	eeprom_front: qcom,eeprom@2 {
-		cell-index = <2>;
-		reg = <0x2>;
-		compatible = "qcom,eeprom";
-		cam_vio-supply = <&camera_vio_ldo>;
-		cam_vana-supply = <&camera_vana_ldo>;
-		cam_vdig-supply = <&camera_ldo>;
-		cam_clk-supply = <&titan_top_gdsc>;
-		cam_vaf-supply = <&actuator_regulator>;
-		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
-			"cam_clk", "cam_vaf";
-		rgltr-cntrl-support;
-		rgltr-min-voltage = <1800000 2850000 1352000 0 2800000>;
-		rgltr-max-voltage = <1800000 2850000 1352000 0 2800000>;
-		rgltr-load-current = <0 80000 105000 0>;
-		gpio-no-mux = <0>;
-		pinctrl-names = "cam_default", "cam_suspend";
-		pinctrl-0 = <&cam_sensor_mclk2_active
-				 &cam_sensor_front_active>;
-		pinctrl-1 = <&cam_sensor_mclk2_suspend
-				 &cam_sensor_front_suspend>;
-		gpios = <&tlmm 15 0>,
-			<&tlmm 9 0>;
-		gpio-reset = <1>;
-		gpio-req-tbl-num = <0 1>;
-		gpio-req-tbl-flags = <1 0>;
-		gpio-req-tbl-label = "CAMIF_MCLK2",
-					"CAM_RESET2";
-		sensor-mode = <0>;
-		cci-master = <1>;
-		status = "ok";
-		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
-		clock-names = "cam_clk";
-		clock-cntl-level = "turbo";
-		clock-rates = <24000000>;
-	};
-
 	qcom,cam-sensor@0 {
 		cell-index = <0>;
 		compatible = "qcom,cam-sensor";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi b/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
index 61ef7ff..46de412 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
@@ -21,12 +21,6 @@
 			qcom,ion-heap-type = "SYSTEM";
 		};
 
-		qcom,ion-heap@22 { /* ADSP HEAP */
-			reg = <22>;
-			memory-region = <&adsp_mem>;
-			qcom,ion-heap-type = "DMA";
-		};
-
 		qcom,ion-heap@27 { /* QSEECOM HEAP */
 			reg = <27>;
 			memory-region = <&qseecom_mem>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
index de125e2..7e426cf 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
@@ -625,15 +625,35 @@
 };
 
 &dsi_sim_cmd {
-	qcom,mdss-dsi-t-clk-post = <0x0d>;
-	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,mdss-dsi-t-clk-post = <0x0c>;
+	qcom,mdss-dsi-t-clk-pre = <0x29>;
 	qcom,mdss-dsi-display-timings {
 		timing@0{
-			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
-				07 05 03 04 00];
 			qcom,display-topology = <1 0 1>,
-						<2 0 1>;
-			qcom,default-topology-index = <0>;
+						<2 2 1>;
+			qcom,default-topology-index = <1>;
+			qcom,panel-roi-alignment = <720 40 720 40 720 40>;
+			qcom,partial-update-enabled = "single_roi";
+			qcom,mdss-dsi-panel-phy-timings = [00 1a 06 06 22 20 07
+				07 04 03 04 00];
+		};
+		timing@1{
+			qcom,display-topology = <1 0 1>,
+						<2 2 1>;
+			qcom,default-topology-index = <1>;
+			qcom,panel-roi-alignment = <540 40 540 40 540 40>;
+			qcom,partial-update-enabled = "single_roi";
+			qcom,mdss-dsi-panel-phy-timings = [00 1a 06 06 22 20 07
+				07 04 03 04 00];
+		};
+		timing@2{
+			qcom,display-topology = <1 0 1>,
+						<2 2 1>;
+			qcom,default-topology-index = <1>;
+			qcom,panel-roi-alignment = <360 40 360 40 360 40>;
+			qcom,partial-update-enabled = "single_roi";
+			qcom,mdss-dsi-panel-phy-timings = [00 1a 06 06 22 20 07
+				07 04 03 04 00];
 		};
 	};
 };
@@ -709,8 +729,8 @@
 	qcom,mdss-dsi-t-clk-pre = <0x2d>;
 	qcom,mdss-dsi-display-timings {
 		 timing@0 {
-			qcom,mdss-dsi-panel-timings = [00 1c 08 07 23 22 07 07
-				05 03 04 00];
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
+				07 05 03 04 00];
 			qcom,display-topology = <2 0 2>,
 						<1 0 2>;
 			qcom,default-topology-index = <0>;
@@ -724,8 +744,8 @@
 	qcom,ulps-enabled;
 	qcom,mdss-dsi-display-timings {
 		 timing@0 {
-			qcom,mdss-dsi-panel-timings = [00 1c 08 07 23 22 07 07
-				05 03 04 00];
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
+				07 05 03 04 00];
 			qcom,display-topology = <2 0 2>,
 						<1 0 2>;
 			qcom,default-topology-index = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
index 00f0650..0c1f097 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
@@ -132,6 +132,7 @@
 	vdd-hba-supply = <&ufs_phy_gdsc>;
 	vdd-hba-fixed-regulator;
 	vcc-supply = <&pm8998_l20>;
+	vcc-voltage-level = <2950000 2960000>;
 	vccq2-supply = <&pm8998_s4>;
 	vcc-max-microamp = <600000>;
 	vccq2-max-microamp = <600000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 91bab54..1b83977 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -2800,6 +2800,7 @@
 		qcom,pas-id = <0xf>;
 		qcom,firmware-name = "ipa_fws";
 		qcom,pil-force-shutdown;
+		memory-region = <&pil_ipa_fw_mem>;
 	};
 
 	qcom,chd_sliver {
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index 9335215..de90d43 100644
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -470,6 +470,7 @@
 CONFIG_MSM_SMEM=y
 CONFIG_MSM_SMD=y
 CONFIG_MSM_SMD_DEBUG=y
+CONFIG_MSM_TZ_SMMU=y
 CONFIG_MSM_SMP2P=y
 CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
 CONFIG_MSM_QMI_INTERFACE=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index d2bb797..8145f47 100644
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -487,6 +487,7 @@
 CONFIG_MSM_SMEM=y
 CONFIG_MSM_SMD=y
 CONFIG_MSM_SMD_DEBUG=y
+CONFIG_MSM_TZ_SMMU=y
 CONFIG_TRACER_PKT=y
 CONFIG_MSM_SMP2P=y
 CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c b/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
index 5635346..c1ea8dd 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_pwr.c
@@ -111,13 +111,13 @@
 			regs->vregs[i].post_off_sleep = tmp;
 		}
 
-		++i;
 		pr_debug("[%s] minv=%d maxv=%d, en_load=%d, dis_load=%d\n",
 			 regs->vregs[i].vreg_name,
 			 regs->vregs[i].min_voltage,
 			 regs->vregs[i].max_voltage,
 			 regs->vregs[i].enable_load,
 			 regs->vregs[i].disable_load);
+		++i;
 	}
 
 error:
diff --git a/drivers/gpu/drm/msm/sde/sde_ad4.h b/drivers/gpu/drm/msm/sde/sde_ad4.h
index 5a646e9..06f004c 100644
--- a/drivers/gpu/drm/msm/sde/sde_ad4.h
+++ b/drivers/gpu/drm/msm/sde/sde_ad4.h
@@ -58,7 +58,7 @@
  * enum ad_intr_resp_property - ad4 interrupt response enum
  */
 enum ad_intr_resp_property {
-	AD4_BACKLIGHT,
+	AD4_IN_OUT_BACKLIGHT,
 	AD4_RESPMAX,
 };
 
@@ -92,8 +92,10 @@
  * sde_read_intr_resp_ad4 - api to get ad4 interrupt status for event
  * @dspp: pointer to dspp object
  * @event: event for which response is needed
- * @resp: value of event requested
+ * @resp_in: read ad4 input value of event requested
+ * @resp_out: read ad4 output value of event requested
  */
-void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event, u32 *resp);
+void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event,
+			u32 *resp_in, u32 *resp_out);
 
 #endif /* _SDE_AD4_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index 07b5536..42aea7e 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -24,6 +24,7 @@
 #include "sde_ad4.h"
 #include "sde_hw_interrupts.h"
 #include "sde_core_irq.h"
+#include "dsi_panel.h"
 
 struct sde_cp_node {
 	u32 property_id;
@@ -1575,7 +1576,8 @@
 
 static void sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg)
 {
-	uint32_t bl = 0;
+	uint32_t input_bl = 0, output_bl = 0;
+	uint32_t scale = MAX_AD_BL_SCALE_LEVEL;
 	struct sde_hw_mixer *hw_lm = NULL;
 	struct sde_hw_dspp *hw_dspp = NULL;
 	u32 num_mixers;
@@ -1598,11 +1600,17 @@
 	if (!hw_dspp)
 		return;
 
-	hw_dspp->ops.ad_read_intr_resp(hw_dspp, AD4_BACKLIGHT, &bl);
+	hw_dspp->ops.ad_read_intr_resp(hw_dspp, AD4_IN_OUT_BACKLIGHT,
+			&input_bl, &output_bl);
+
+	if (!input_bl || input_bl < output_bl)
+		return;
+
+	scale = (output_bl * MAX_AD_BL_SCALE_LEVEL) / input_bl;
 	event.length = sizeof(u32);
 	event.type = DRM_EVENT_AD_BACKLIGHT;
 	msm_mode_object_event_notify(&crtc_drm->base, crtc_drm->dev,
-			&event, (u8 *)&bl);
+			&event, (u8 *)&scale);
 }
 
 int sde_cp_ad_interrupt(struct drm_crtc *crtc_drm, bool en,
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 43e6aaa..52e291d 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -1404,7 +1404,8 @@
 }
 
 static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
-	struct sde_crtc *sde_crtc, struct sde_crtc_mixer *mixer)
+		struct drm_crtc_state *old_state, struct sde_crtc *sde_crtc,
+		struct sde_crtc_mixer *mixer)
 {
 	struct drm_plane *plane;
 	struct drm_framebuffer *fb;
@@ -1424,7 +1425,7 @@
 	bool bg_alpha_enable = false;
 	u32 prefill = 0;
 
-	if (!sde_crtc || !mixer) {
+	if (!sde_crtc || !crtc->state || !mixer) {
 		SDE_ERROR("invalid sde_crtc or mixer\n");
 		return;
 	}
@@ -1435,7 +1436,9 @@
 	cstate = to_sde_crtc_state(crtc->state);
 
 	cstate->sbuf_prefill_line = 0;
-	sde_crtc->sbuf_flush_mask = 0x0;
+	sde_crtc->sbuf_flush_mask_old = sde_crtc->sbuf_flush_mask_all;
+	sde_crtc->sbuf_flush_mask_all = 0x0;
+	sde_crtc->sbuf_flush_mask_delta = 0x0;
 
 	drm_atomic_crtc_for_each_plane(plane, crtc) {
 		state = plane->state;
@@ -1457,7 +1460,10 @@
 		sde_plane_get_ctl_flush(plane, ctl, &flush_mask, &flush_sbuf);
 
 		/* save sbuf flush value for later */
-		sde_crtc->sbuf_flush_mask |= flush_sbuf;
+		if (old_state && drm_atomic_get_existing_plane_state(
+					old_state->state, plane))
+			sde_crtc->sbuf_flush_mask_delta |= flush_sbuf;
+		sde_crtc->sbuf_flush_mask_all |= flush_sbuf;
 
 		SDE_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
 				crtc->base.id,
@@ -1584,8 +1590,11 @@
 /**
  * _sde_crtc_blend_setup - configure crtc mixers
  * @crtc: Pointer to drm crtc structure
+ * @old_state: Pointer to old crtc state
+ * @add_planes: Whether or not to add planes to mixers
  */
-static void _sde_crtc_blend_setup(struct drm_crtc *crtc, bool add_planes)
+static void _sde_crtc_blend_setup(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_state, bool add_planes)
 {
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *sde_crtc_state;
@@ -1632,7 +1641,7 @@
 	memset(&sde_crtc->stage_cfg, 0, sizeof(struct sde_hw_stage_cfg));
 
 	if (add_planes)
-		_sde_crtc_blend_setup_mixer(crtc, sde_crtc, mixer);
+		_sde_crtc_blend_setup_mixer(crtc, old_state, sde_crtc, mixer);
 
 	for (i = 0; i < sde_crtc->num_mixers; i++) {
 		const struct sde_rect *lm_roi = &sde_crtc_state->lm_roi[i];
@@ -3198,7 +3207,7 @@
 	if (unlikely(!sde_crtc->num_mixers))
 		return;
 
-	_sde_crtc_blend_setup(crtc, true);
+	_sde_crtc_blend_setup(crtc, old_state, true);
 	_sde_crtc_dest_scaler_setup(crtc);
 
 	/* cancel the idle notify delayed work */
@@ -3422,23 +3431,29 @@
 	sde_crtc = to_sde_crtc(crtc);
 
 	/*
-	 * Update sbuf configuration and flush bits if a flush
-	 * mask has been defined for either the current or
-	 * previous commit.
+	 * Update sbuf configuration and flush bits if either the rot_op_mode
+	 * is different or a rotator commit was performed.
 	 *
-	 * Updates are also required for the first commit after
-	 * sbuf_flush_mask becomes 0x0, to properly transition
-	 * the hardware out of sbuf mode.
+	 * In the case where the rot_op_mode has changed, further require that
+	 * the transition is either to or from offline mode unless
+	 * sbuf_flush_mask_delta is also non-zero (i.e., a corresponding plane
+	 * update was provided to the current commit).
 	 */
-	if (!sde_crtc->sbuf_flush_mask_old && !sde_crtc->sbuf_flush_mask)
-		return 0;
+	flush_mask = sde_crtc->sbuf_flush_mask_delta;
+	if ((sde_crtc->sbuf_op_mode_old != cstate->sbuf_cfg.rot_op_mode) &&
+		(sde_crtc->sbuf_op_mode_old == SDE_CTL_ROT_OP_MODE_OFFLINE ||
+		cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE))
+		flush_mask |= sde_crtc->sbuf_flush_mask_all |
+			sde_crtc->sbuf_flush_mask_old;
 
-	flush_mask = sde_crtc->sbuf_flush_mask_old | sde_crtc->sbuf_flush_mask;
-	sde_crtc->sbuf_flush_mask_old = sde_crtc->sbuf_flush_mask;
+	if (!flush_mask &&
+		cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
+		return 0;
 
 	SDE_ATRACE_BEGIN("crtc_kickoff_rot");
 
-	if (cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_OFFLINE) {
+	if (cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_OFFLINE &&
+			sde_crtc->sbuf_flush_mask_delta) {
 		drm_atomic_crtc_for_each_plane(plane, crtc) {
 			rc = sde_plane_kickoff_rot(plane);
 			if (rc) {
@@ -3474,12 +3489,16 @@
 		/* explicitly trigger rotator for async modes */
 		if (cstate->sbuf_cfg.rot_op_mode ==
 				SDE_CTL_ROT_OP_MODE_INLINE_ASYNC &&
-				master_ctl->ops.trigger_rot_start) {
+				master_ctl->ops.trigger_rot_start)
 			master_ctl->ops.trigger_rot_start(master_ctl);
-			SDE_EVT32(DRMID(crtc), master_ctl->idx - CTL_0);
-		}
+		SDE_EVT32(DRMID(crtc), master_ctl->idx - CTL_0,
+				sde_crtc->sbuf_flush_mask_all,
+				sde_crtc->sbuf_flush_mask_delta);
 	}
 
+	/* save this in sde_crtc for next commit cycle */
+	sde_crtc->sbuf_op_mode_old = cstate->sbuf_cfg.rot_op_mode;
+
 	SDE_ATRACE_END("crtc_kickoff_rot");
 	return rc;
 }
@@ -3492,13 +3511,14 @@
 {
 	struct sde_crtc_mixer *mixer;
 	struct sde_hw_ctl *ctl;
-	u32 i, flush_mask;
+	u32 i, n, flush_mask;
 
 	if (!sde_crtc)
 		return;
 
 	mixer = sde_crtc->mixers;
-	for (i = 0; i < sde_crtc->num_mixers; i++) {
+	n = min_t(size_t, sde_crtc->num_mixers, ARRAY_SIZE(sde_crtc->mixers));
+	for (i = 0; i < n; i++) {
 		ctl = mixer[i].hw_ctl;
 		if (!ctl || !ctl->ops.get_pending_flush ||
 				!ctl->ops.clear_pending_flush ||
@@ -3524,16 +3544,19 @@
 {
 	struct drm_plane *plane_halt[MAX_PLANES];
 	struct drm_plane *plane;
+	struct drm_encoder *encoder;
 	const struct drm_plane_state *pstate;
 	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
 	struct sde_hw_ctl *ctl;
 	enum sde_ctl_rot_op_mode old_rot_op_mode;
-	signed int i, plane_count;
+	signed int i, n, plane_count;
 	int rc;
 
-	if (!crtc || !old_state)
+	if (!crtc || !crtc->dev || !old_state || !crtc->state)
 		return -EINVAL;
 	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(crtc->state);
 
 	old_rot_op_mode = to_sde_crtc_state(old_state)->sbuf_cfg.rot_op_mode;
 	SDE_EVT32(DRMID(crtc), old_rot_op_mode,
@@ -3545,7 +3568,8 @@
 	/* optionally generate a panic instead of performing a h/w reset */
 	SDE_DBG_CTRL("stop_ftrace", "reset_hw_panic");
 
-	for (i = 0; i < sde_crtc->num_mixers; ++i) {
+	n = min_t(size_t, sde_crtc->num_mixers, ARRAY_SIZE(sde_crtc->mixers));
+	for (i = 0; i < n; ++i) {
 		ctl = sde_crtc->mixers[i].hw_ctl;
 		if (!ctl || !ctl->ops.reset)
 			continue;
@@ -3570,14 +3594,13 @@
 	 * depending on the rotation mode; don't handle this for now
 	 * and just force a hard reset in those cases.
 	 */
-	if (i == sde_crtc->num_mixers &&
-			old_rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
+	if (i == n && old_rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
 		return false;
 
 	SDE_DEBUG("crtc%d: issuing hard reset\n", DRMID(crtc));
 
 	/* force all components in the system into reset at the same time */
-	for (i = 0; i < sde_crtc->num_mixers; ++i) {
+	for (i = 0; i < n; ++i) {
 		ctl = sde_crtc->mixers[i].hw_ctl;
 		if (!ctl || !ctl->ops.hard_reset)
 			continue;
@@ -3613,11 +3636,26 @@
 		sde_plane_reset_rot(plane, (struct drm_plane_state *)pstate);
 	}
 
+	/* provide safe "border color only" commit configuration for later */
+	cstate->sbuf_cfg.rot_op_mode = SDE_CTL_ROT_OP_MODE_OFFLINE;
+	_sde_crtc_commit_kickoff_rot(crtc, cstate);
+	_sde_crtc_remove_pipe_flush(sde_crtc);
+	_sde_crtc_blend_setup(crtc, old_state, false);
+
 	/* take h/w components out of reset */
 	for (i = plane_count - 1; i >= 0; --i)
 		sde_plane_halt_requests(plane_halt[i], false);
 
-	for (i = 0; i < sde_crtc->num_mixers; ++i) {
+	/* attempt to poll for start of frame cycle before reset release */
+	list_for_each_entry(encoder,
+			&crtc->dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+		if (sde_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
+			sde_encoder_poll_line_counts(encoder);
+	}
+
+	for (i = 0; i < n; ++i) {
 		ctl = sde_crtc->mixers[i].hw_ctl;
 		if (!ctl || !ctl->ops.hard_reset)
 			continue;
@@ -3625,6 +3663,15 @@
 		ctl->ops.hard_reset(ctl, false);
 	}
 
+	list_for_each_entry(encoder,
+			&crtc->dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+
+		if (sde_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
+			sde_encoder_kickoff(encoder, false);
+	}
+
 	return -EAGAIN;
 }
 
@@ -3649,7 +3696,7 @@
 	cstate = to_sde_crtc_state(crtc->state);
 
 	/* default to ASYNC mode for inline rotation */
-	cstate->sbuf_cfg.rot_op_mode = sde_crtc->sbuf_flush_mask ?
+	cstate->sbuf_cfg.rot_op_mode = sde_crtc->sbuf_flush_mask_all ?
 		SDE_CTL_ROT_OP_MODE_INLINE_ASYNC : SDE_CTL_ROT_OP_MODE_OFFLINE;
 
 	if (cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
@@ -3749,11 +3796,6 @@
 		if (_sde_crtc_reset_hw(crtc, old_state,
 					!sde_crtc->reset_request))
 			is_error = true;
-
-		/* force offline rotation mode since the commit has no pipes */
-		if (is_error)
-			cstate->sbuf_cfg.rot_op_mode =
-				SDE_CTL_ROT_OP_MODE_OFFLINE;
 	}
 	sde_crtc->reset_request = reset_req;
 
@@ -3799,7 +3841,7 @@
 
 	if (is_error) {
 		_sde_crtc_remove_pipe_flush(sde_crtc);
-		_sde_crtc_blend_setup(crtc, false);
+		_sde_crtc_blend_setup(crtc, old_state, false);
 	}
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 589a667..1de3675 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -215,8 +215,10 @@
  * @misr_enable   : boolean entry indicates misr enable/disable status.
  * @misr_frame_count  : misr frame count provided by client
  * @misr_data     : store misr data before turning off the clocks.
- * @sbuf_flush_mask: flush mask for inline rotator
+ * @sbuf_op_mode_old : inline rotator op mode for previous commit cycle
  * @sbuf_flush_mask_old: inline rotator flush mask for previous commit
+ * @sbuf_flush_mask_all: inline rotator flush mask for all attached planes
+ * @sbuf_flush_mask_delta: inline rotator flush mask for current delta state
  * @idle_notify_work: delayed worker to notify idle timeout to user space
  * @power_event   : registered power event handle
  * @cur_perf      : current performance committed to clock/bandwidth driver
@@ -284,8 +286,10 @@
 	u32 misr_frame_count;
 	u32 misr_data[CRTC_DUAL_MIXERS];
 
-	u32 sbuf_flush_mask;
+	u32 sbuf_op_mode_old;
 	u32 sbuf_flush_mask_old;
+	u32 sbuf_flush_mask_all;
+	u32 sbuf_flush_mask_delta;
 	struct kthread_delayed_work idle_notify_work;
 
 	struct sde_power_event *power_event;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 4008115..7162b06 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -3443,6 +3443,51 @@
 	_sde_encoder_power_enable(sde_enc, false);
 }
 
+int sde_encoder_poll_line_counts(struct drm_encoder *drm_enc)
+{
+	static const uint64_t timeout_us = 50000;
+	static const uint64_t sleep_us = 20;
+	struct sde_encoder_virt *sde_enc;
+	ktime_t cur_ktime, exp_ktime;
+	uint32_t line_count, tmp, i;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	if (!sde_enc->cur_master ||
+			!sde_enc->cur_master->ops.get_line_count) {
+		SDE_DEBUG_ENC(sde_enc, "can't get master line count\n");
+		SDE_EVT32(DRMID(drm_enc), SDE_EVTLOG_ERROR);
+		return -EINVAL;
+	}
+
+	exp_ktime = ktime_add_ms(ktime_get(), timeout_us / 1000);
+
+	line_count = sde_enc->cur_master->ops.get_line_count(
+			sde_enc->cur_master);
+
+	for (i = 0; i < (timeout_us * 2 / sleep_us); ++i) {
+		tmp = line_count;
+		line_count = sde_enc->cur_master->ops.get_line_count(
+				sde_enc->cur_master);
+		if (line_count < tmp) {
+			SDE_EVT32(DRMID(drm_enc), line_count);
+			return 0;
+		}
+
+		cur_ktime = ktime_get();
+		if (ktime_compare_safe(exp_ktime, cur_ktime) <= 0)
+			break;
+
+		usleep_range(sleep_us / 2, sleep_us);
+	}
+
+	SDE_EVT32(DRMID(drm_enc), line_count, SDE_EVTLOG_ERROR);
+	return -ETIMEDOUT;
+}
+
 int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
 		struct sde_encoder_kickoff_params *params)
 {
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 937bd18..8038eb6 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -108,6 +108,13 @@
 struct sde_rsc_client *sde_encoder_get_rsc_client(struct drm_encoder *encoder);
 
 /**
+ * sde_encoder_poll_line_counts - poll encoder line counts for start of frame
+ * @encoder:	encoder pointer
+ * @Returns:	zero on success
+ */
+int sde_encoder_poll_line_counts(struct drm_encoder *encoder);
+
+/**
  * sde_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
  *	path (i.e. ctl flush and start) at next appropriate time.
  *	Immediately: if no previous commit is outstanding.
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
index bf48271..994bf3d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
@@ -1248,16 +1248,20 @@
 	return 0;
 }
 
-void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event, u32 *resp)
+void sde_read_intr_resp_ad4(struct sde_hw_dspp *dspp, u32 event,
+		u32 *resp_in, u32 *resp_out)
 {
-	if (!dspp || !resp) {
-		DRM_ERROR("invalid params dspp %pK resp %pK\n", dspp, resp);
+	if (!dspp || !resp_in || !resp_out) {
+		DRM_ERROR("invalid params dspp %pK resp_in %pK resp_out %pK\n",
+				dspp, resp_in, resp_out);
 		return;
 	}
 
 	switch (event) {
-	case AD4_BACKLIGHT:
-		*resp = SDE_REG_READ(&dspp->hw,
+	case AD4_IN_OUT_BACKLIGHT:
+		*resp_in = SDE_REG_READ(&dspp->hw,
+				dspp->cap->sblk->ad.base + 0x2c);
+		*resp_out = SDE_REG_READ(&dspp->hw,
 				dspp->cap->sblk->ad.base + 0x48);
 		break;
 	default:
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
index 2b64165..2d2ac5b 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
@@ -159,10 +159,11 @@
 	/**
 	 * ad_read_intr_resp - function to get interrupt response for ad
 	 * @event: Event for which response needs to be read
-	 * @resp: Pointer to u32 where response value is dumped.
+	 * @resp_in: Pointer to u32 where resp ad4 input value is dumped.
+	 * @resp_out: Pointer to u32 where resp ad4 output value is dumped.
 	 */
 	void (*ad_read_intr_resp)(struct sde_hw_dspp *ctx, u32 event,
-			u32 *resp);
+			u32 *resp_in, u32 *resp_out);
 
 };
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index fd031d7..fa5eb49 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -588,7 +588,7 @@
 {
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	uint32_t sid_info;
-	struct scm_desc desc;
+	struct scm_desc desc = {0};
 	unsigned int resp = 0;
 	int ret = 0;
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
index d53b1b5..4ffbd55 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
@@ -706,7 +706,9 @@
 				req->filter_index_list[i].filter_handle,
 				req->filter_index_list[i].filter_index);
 		return -EINVAL;
-	} else if (req->install_status != IPA_QMI_RESULT_SUCCESS_V01) {
+	}
+
+	if (req->install_status != IPA_QMI_RESULT_SUCCESS_V01) {
 		IPAWANERR(" UL filter rule for pipe %d install_status = %d\n",
 			req->source_pipe_index, req->install_status);
 		return -EINVAL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index ee312c7..a812891 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -2327,6 +2327,7 @@
 			IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts
 				[IPAHAL_PKT_STATUS_EXCEPTION_NONE]);
 		}
+		tx_pkt = NULL;
 	};
 
 	return rc;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 1c8715a..c158c94 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -610,7 +610,7 @@
 		IPAWANDBG("IPACM pass zero rules to Q6\n");
 	} else {
 		IPAWANDBG("IPACM pass %u rules to Q6\n",
-		req->filter_spec_ex_list_len);
+		req->filter_spec_list_len);
 	}
 
 	if (req->filter_spec_list_len >= QMI_IPA_MAX_FILTERS_V01) {
@@ -919,7 +919,9 @@
 		req->source_pipe_index,
 		req->rule_id_len);
 		return -EINVAL;
-	} else if (req->install_status != IPA_QMI_RESULT_SUCCESS_V01) {
+	}
+
+	if (req->install_status != IPA_QMI_RESULT_SUCCESS_V01) {
 		IPAWANERR(" UL filter rule for pipe %d install_status = %d\n",
 			req->source_pipe_index, req->install_status);
 		return -EINVAL;
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index 457dc5f..41a1a79 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -671,13 +671,14 @@
 	if (sscanf(buf, "%s", str) != 1)
 		return -EINVAL;
 
+	mutex_lock(&drvdata->mutex);
 	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
 		dev_err(dev,
 			"Select link list to program using curr_list\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out;
 	}
 
-	mutex_lock(&drvdata->mutex);
 	if (drvdata->enable[drvdata->curr_list]) {
 		ret = -EBUSY;
 		goto out;
@@ -771,10 +772,21 @@
 static ssize_t dcc_show_enable(struct device *dev,
 			       struct device_attribute *attr, char *buf)
 {
+	int ret;
 	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
 
-	return scnprintf(buf, PAGE_SIZE, "%u\n",
+	mutex_lock(&drvdata->mutex);
+	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+		dev_err(dev, "Select link list to program using curr_list\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	ret = scnprintf(buf, PAGE_SIZE, "%u\n",
 			 (unsigned int)drvdata->enable[drvdata->curr_list]);
+err:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
 }
 
 static ssize_t dcc_store_enable(struct device *dev,
@@ -812,10 +824,13 @@
 
 	buf[0] = '\0';
 
-	if (drvdata->curr_list >= DCC_MAX_LINK_LIST)
-		return -EINVAL;
-
 	mutex_lock(&drvdata->mutex);
+	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+		dev_err(dev, "Select link list to program using curr_list\n");
+		count = -EINVAL;
+		goto err;
+	}
+
 	list_for_each_entry(entry,
 			    &drvdata->cfg_head[drvdata->curr_list], list) {
 		switch (entry->desc_type) {
@@ -852,8 +867,8 @@
 		count += len;
 	}
 
+err:
 	mutex_unlock(&drvdata->mutex);
-
 	return count;
 }
 
@@ -866,6 +881,12 @@
 
 	mutex_lock(&drvdata->mutex);
 
+	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+		dev_err(drvdata->dev, "Select link list to program using curr_list\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
 	if (!len) {
 		dev_err(drvdata->dev, "DCC: Invalid length\n");
 		ret = -EINVAL;
@@ -959,11 +980,6 @@
 	if (nval <= 0 || nval > 3)
 		return -EINVAL;
 
-	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
-		dev_err(dev, "Select link list to program using curr_list\n");
-		return -EINVAL;
-	}
-
 	if (nval == 1) {
 		len = 1;
 		apb_bus = 0;
@@ -1028,6 +1044,12 @@
 	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
 
 	mutex_lock(&drvdata->mutex);
+	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+		dev_err(dev, "Select link list to program using curr_list\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
 	if (!drvdata->enable[drvdata->curr_list]) {
 		ret = -EINVAL;
 		goto err;
@@ -1049,6 +1071,13 @@
 	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
 
 	mutex_lock(&drvdata->mutex);
+
+	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+		dev_err(dev, "Select link list to program using curr_list\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
 	if (!drvdata->enable[drvdata->curr_list]) {
 		ret = -EINVAL;
 		goto err;
@@ -1159,6 +1188,12 @@
 		goto err;
 	}
 
+	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+		dev_err(dev, "Select link list to program using curr_list\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
 	if (list_empty(&drvdata->cfg_head[drvdata->curr_list])) {
 		dev_err(drvdata->dev, "DCC: No read address programmed\n");
 		ret = -EPERM;
@@ -1266,6 +1301,12 @@
 
 	mutex_lock(&drvdata->mutex);
 
+	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+		dev_err(dev, "Select link list to program using curr_list\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
 	if (drvdata->enable[drvdata->curr_list]) {
 		ret = -EBUSY;
 		goto out;
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index 5289cd0..1c8bc51 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -25,12 +25,28 @@
 
 DEFINE_MUTEX(secure_buffer_mutex);
 
+struct cp2_mem_chunks {
+	u32 chunk_list;
+	u32 chunk_list_size;
+	u32 chunk_size;
+} __attribute__ ((__packed__));
+
+struct cp2_lock_req {
+	struct cp2_mem_chunks chunks;
+	u32 mem_usage;
+	u32 lock;
+} __attribute__ ((__packed__));
+
 struct mem_prot_info {
 	phys_addr_t addr;
 	u64 size;
 };
 
 #define MEM_PROT_ASSIGN_ID		0x16
+#define MEM_PROTECT_LOCK_ID2		0x0A
+#define MEM_PROTECT_LOCK_ID2_FLAT	0x11
+#define V2_CHUNK_SIZE           SZ_1M
+#define FEATURE_ID_CP 12
 
 struct dest_vm_and_perm_info {
 	u32 vm;
@@ -42,6 +58,134 @@
 static void *qcom_secure_mem;
 #define QCOM_SECURE_MEM_SIZE (512*1024)
 
+static int secure_buffer_change_chunk(u32 chunks,
+				u32 nchunks,
+				u32 chunk_size,
+				int lock)
+{
+	struct cp2_lock_req request;
+	u32 resp;
+	int ret;
+	struct scm_desc desc = {0};
+
+	desc.args[0] = request.chunks.chunk_list = chunks;
+	desc.args[1] = request.chunks.chunk_list_size = nchunks;
+	desc.args[2] = request.chunks.chunk_size = chunk_size;
+	/* Usage is now always 0 */
+	desc.args[3] = request.mem_usage = 0;
+	desc.args[4] = request.lock = lock;
+	desc.args[5] = 0;
+	desc.arginfo = SCM_ARGS(6, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
+				SCM_VAL);
+
+	kmap_flush_unused();
+	kmap_atomic_flush_unused();
+
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
+				&request, sizeof(request), &resp, sizeof(resp));
+	} else {
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+				MEM_PROTECT_LOCK_ID2_FLAT), &desc);
+		resp = desc.ret[0];
+	}
+
+	return ret;
+}
+
+static int secure_buffer_change_table(struct sg_table *table, int lock)
+{
+	int i, j;
+	int ret = -EINVAL;
+	u32 *chunk_list;
+	struct scatterlist *sg;
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		int nchunks;
+		int size = sg->length;
+		int chunk_list_len;
+		phys_addr_t chunk_list_phys;
+
+		/*
+		 * This should theoretically be a phys_addr_t but the protocol
+		 * indicates this should be a u32.
+		 */
+		u32 base;
+		u64 tmp = sg_dma_address(sg);
+
+		WARN((tmp >> 32) & 0xffffffff,
+			"%s: there are ones in the upper 32 bits of the sg at %p! They will be truncated! Address: 0x%llx\n",
+			__func__, sg, tmp);
+		if (unlikely(!size || (size % V2_CHUNK_SIZE))) {
+			WARN(1,
+				"%s: chunk %d has invalid size: 0x%x. Must be a multiple of 0x%x\n",
+				__func__, i, size, V2_CHUNK_SIZE);
+			return -EINVAL;
+		}
+
+		base = (u32)tmp;
+
+		nchunks = size / V2_CHUNK_SIZE;
+		chunk_list_len = sizeof(u32)*nchunks;
+
+		chunk_list = kzalloc(chunk_list_len, GFP_KERNEL);
+
+		if (!chunk_list)
+			return -ENOMEM;
+
+		chunk_list_phys = virt_to_phys(chunk_list);
+		for (j = 0; j < nchunks; j++)
+			chunk_list[j] = base + j * V2_CHUNK_SIZE;
+
+		/*
+		 * Flush the chunk list before sending the memory to the
+		 * secure environment to ensure the data is actually present
+		 * in RAM
+		 */
+		dmac_flush_range(chunk_list, chunk_list + chunk_list_len);
+
+		ret = secure_buffer_change_chunk(chunk_list_phys,
+				nchunks, V2_CHUNK_SIZE, lock);
+
+		if (!ret) {
+			/*
+			 * Set or clear the private page flag to communicate the
+			 * status of the chunk to other entities
+			 */
+			if (lock)
+				SetPagePrivate(sg_page(sg));
+			else
+				ClearPagePrivate(sg_page(sg));
+		}
+
+		kfree(chunk_list);
+	}
+
+	return ret;
+}
+
+int msm_secure_table(struct sg_table *table)
+{
+	int ret;
+
+	mutex_lock(&secure_buffer_mutex);
+	ret = secure_buffer_change_table(table, 1);
+	mutex_unlock(&secure_buffer_mutex);
+
+	return ret;
+}
+
+int msm_unsecure_table(struct sg_table *table)
+{
+	int ret;
+
+	mutex_lock(&secure_buffer_mutex);
+	ret = secure_buffer_change_table(table, 0);
+	mutex_unlock(&secure_buffer_mutex);
+
+	return ret;
+}
+
 static struct dest_vm_and_perm_info *
 populate_dest_info(int *dest_vmids, int nelements, int *dest_perms,
 		   size_t *size_in_bytes)
@@ -279,6 +423,19 @@
 	}
 }
 
+#define MAKE_CP_VERSION(major, minor, patch) \
+	(((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
+
+bool msm_secure_v2_is_supported(void)
+{
+	/*
+	 * if the version is < 1.1.0 then dynamic buffer allocation is
+	 * not supported
+	 */
+	return (scm_get_feat_version(FEATURE_ID_CP) >=
+			MAKE_CP_VERSION(1, 1, 0));
+}
+
 static int __init alloc_secure_shared_memory(void)
 {
 	int ret = 0;
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 876e176..9ff0c73 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -840,6 +840,8 @@
 	ch->glink_state = GLINK_LOCAL_DISCONNECTED;
 	ch->txn_id = INITIAL_TXN_ID; /* use non-zero nonce for debug */
 	ch->pid = 0;
+	ch->actual_rx_size = 0;
+	ch->glink_rx_buf = NULL;
 
 	pr_debug("Channel closed [%s].\n", ch->name);
 
@@ -940,8 +942,8 @@
 
 	/* check for already pending data */
 	if (ch->actual_rx_size) {
-		pr_debug("already pending data size [%zu]\n",
-			 ch->actual_rx_size);
+		pr_debug("already pending data size [%zu] ch [%s]\n",
+			 ch->actual_rx_size, ch->name);
 		goto copy_buf;
 	}
 
@@ -949,24 +951,27 @@
 	reinit_completion(&ch->rx_done);
 
 	/* Wait for Rx response */
-	pr_debug("Wait for Rx done.\n");
+	pr_debug("Wait for Rx done, ch [%s].\n", ch->name);
 	if (timeout_msec)
 		timeleft = wait_for_completion_timeout(&ch->rx_done, jiffies);
 	else
 		wait_for_completion(&ch->rx_done);
 
 	if (timeleft == 0) {
-		pr_err("rx_done timeout [%d] msec expired.\n", timeout_msec);
+		pr_err("rx_done timeout [%d] msec expired, ch [%s]\n",
+			timeout_msec, ch->name);
 		mutex_unlock(&ch->lock);
 		return -ETIMEDOUT;
 	} else if (ch->rx_abort) {
 		mutex_unlock(&ch->lock);
-		pr_err("rx_abort, probably remote side reset (SSR).\n");
+		pr_err("rx_abort, probably remote side reset (SSR), ch [%s].\n",
+			ch->name);
 		return -ERESTART; /* probably SSR */
 	} else if (ch->actual_rx_size) {
-		pr_debug("actual_rx_size is [%zu]\n", ch->actual_rx_size);
+		pr_debug("actual_rx_size is [%zu], ch [%s]\n",
+			ch->actual_rx_size, ch->name);
 	} else {
-		pr_err("actual_rx_size is zero.\n");
+		pr_err("actual_rx_size is zero, ch [%s].\n", ch->name);
 		goto exit_err;
 	}
 
@@ -980,7 +985,7 @@
 	size = min_t(int, ch->actual_rx_size, size);
 	memcpy(buf, ch->glink_rx_buf, size);
 
-	pr_debug("copy size [%d].\n", (int) size);
+	pr_debug("copy size [%d] , ch [%s].\n", (int) size, ch->name);
 
 	/* free glink buffer after copy to spcom buffer */
 	glink_rx_done(ch->glink_handle, ch->glink_rx_buf, false);
@@ -993,7 +998,8 @@
 		pr_err("glink_queue_rx_intent() failed, ret [%d]", ret);
 		goto exit_err;
 	} else {
-		pr_debug("queue rx_buf, size [%zu]\n", ch->rx_buf_size);
+		pr_debug("queue rx_buf, size [%zu], ch [%s]\n",
+			ch->rx_buf_size, ch->name);
 	}
 
 	mutex_unlock(&ch->lock);
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
index 309b9cc..f3c8d51 100644
--- a/drivers/staging/android/ion/Makefile
+++ b/drivers/staging/android/ion/Makefile
@@ -1,6 +1,8 @@
 obj-$(CONFIG_ION) +=	ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
-			ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o \
-			ion_system_secure_heap.o
+			ion_carveout_heap.o ion_chunk_heap.o ion_system_secure_heap.o
+ifdef CONFIG_ION_MSM
+obj-$(CONFIG_CMA) += ion_cma_heap.o ion_cma_secure_heap.o
+endif
 obj-$(CONFIG_ION_TEST) += ion_test.o
 ifdef CONFIG_COMPAT
 obj-$(CONFIG_ION) += compat_ion.o
diff --git a/drivers/staging/android/ion/ion_cma_secure_heap.c b/drivers/staging/android/ion/ion_cma_secure_heap.c
new file mode 100644
index 0000000..b2eac28
--- /dev/null
+++ b/drivers/staging/android/ion/ion_cma_secure_heap.c
@@ -0,0 +1,902 @@
+/*
+ * drivers/staging/android/ion/ion_cma_secure_heap.c
+ *
+ * Copyright (C) Linaro 2012
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/ion.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_ion.h>
+#include <trace/events/kmem.h>
+
+#include <soc/qcom/secure_buffer.h>
+#include <asm/cacheflush.h>
+
+/* for ion_heap_ops structure */
+#include "ion_priv.h"
+
+#define ION_CMA_ALLOCATE_FAILED NULL
+
+struct ion_secure_cma_non_contig_info {
+	dma_addr_t phys;
+	int len;
+	struct list_head entry;
+};
+
+struct ion_secure_cma_buffer_info {
+	dma_addr_t phys;
+	struct sg_table *table;
+	bool is_cached;
+	int len;
+	struct list_head non_contig_list;
+	unsigned long ncelems;
+};
+
+struct ion_cma_alloc_chunk {
+	void *cpu_addr;
+	struct list_head entry;
+	dma_addr_t handle;
+	unsigned long chunk_size;
+	atomic_t cnt;
+};
+
+struct ion_cma_secure_heap {
+	struct device *dev;
+	/*
+	 * Protects against races between threads allocating memory/adding to
+	 * pool at the same time. (e.g. thread 1 adds to pool, thread 2
+	 * allocates thread 1's memory before thread 1 knows it needs to
+	 * allocate more.
+	 * Admittedly this is fairly coarse grained right now but the chance for
+	 * contention on this lock is unlikely right now. This can be changed if
+	 * this ever changes in the future
+	 */
+	struct mutex alloc_lock;
+	/*
+	 * protects the list of memory chunks in this pool
+	 */
+	struct mutex chunk_lock;
+	struct ion_heap heap;
+	/*
+	 * Bitmap for allocation. This contains the aggregate of all chunks.
+	 */
+	unsigned long *bitmap;
+	/*
+	 * List of all allocated chunks
+	 *
+	 * This is where things get 'clever'. Individual allocations from
+	 * dma_alloc_coherent must be allocated and freed in one chunk.
+	 * We don't just want to limit the allocations to those confined
+	 * within a single chunk (if clients allocate n small chunks we would
+	 * never be able to use the combined size). The bitmap allocator is
+	 * used to find the contiguous region and the parts of the chunks are
+	 * marked off as used. The chunks won't be freed in the shrinker until
+	 * the usage is actually zero.
+	 */
+	struct list_head chunks;
+	int npages;
+	ion_phys_addr_t base;
+	struct work_struct work;
+	unsigned long last_alloc;
+	struct shrinker shrinker;
+	atomic_t total_allocated;
+	atomic_t total_pool_size;
+	atomic_t total_leaked;
+	unsigned long heap_size;
+	unsigned long default_prefetch_size;
+};
+
+static void ion_secure_pool_pages(struct work_struct *work);
+
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ * This function could be replace by dma_common_get_sgtable
+ * as soon as it will avalaible.
+ */
+static int ion_secure_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+				      dma_addr_t handle, size_t size)
+{
+	struct page *page = pfn_to_page(PFN_DOWN(handle));
+	int ret;
+
+	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+	if (unlikely(ret))
+		return ret;
+
+	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+	sg_dma_address(sgt->sgl) = handle;
+	return 0;
+}
+
+static int ion_secure_cma_add_to_pool(
+					struct ion_cma_secure_heap *sheap,
+					unsigned long len,
+					bool prefetch)
+{
+	void *cpu_addr;
+	dma_addr_t handle;
+	unsigned long attrs = 0;
+	int ret = 0;
+	struct ion_cma_alloc_chunk *chunk;
+
+	trace_ion_secure_cma_add_to_pool_start(len, atomic_read(&sheap->
+					       total_pool_size), prefetch);
+	mutex_lock(&sheap->chunk_lock);
+
+	chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+	if (!chunk) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	attrs = DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_SKIP_ZEROING;
+
+	cpu_addr = dma_alloc_attrs(sheap->dev, len, &handle, GFP_KERNEL,
+				   attrs);
+
+	if (!cpu_addr) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+
+	chunk->cpu_addr = cpu_addr;
+	chunk->handle = handle;
+	chunk->chunk_size = len;
+	atomic_set(&chunk->cnt, 0);
+	list_add(&chunk->entry, &sheap->chunks);
+	atomic_add(len, &sheap->total_pool_size);
+	 /* clear the bitmap to indicate this region can be allocated from */
+	bitmap_clear(sheap->bitmap, (handle - sheap->base) >> PAGE_SHIFT,
+		     len >> PAGE_SHIFT);
+	goto out;
+
+out_free:
+	kfree(chunk);
+out:
+	mutex_unlock(&sheap->chunk_lock);
+
+	trace_ion_secure_cma_add_to_pool_end(len, atomic_read(&sheap->
+					     total_pool_size), prefetch);
+
+	return ret;
+}
+
+static void ion_secure_pool_pages(struct work_struct *work)
+{
+	struct ion_cma_secure_heap *sheap = container_of(work,
+			struct ion_cma_secure_heap, work);
+
+	ion_secure_cma_add_to_pool(sheap, sheap->last_alloc, true);
+}
+
+/*
+ * @s1: start of the first region
+ * @l1: length of the first region
+ * @s2: start of the second region
+ * @l2: length of the second region
+ *
+ * Returns the total number of bytes that intersect.
+ *
+ * s1 is the region we are trying to clear so s2 may be subsumed by s1 but the
+ * maximum size to clear should only ever be l1
+ *
+ */
+static unsigned int intersect(unsigned long s1, unsigned long l1,
+			      unsigned long s2, unsigned long l2)
+{
+	unsigned long base1 = s1;
+	unsigned long end1 = s1 + l1;
+	unsigned long base2 = s2;
+	unsigned long end2 = s2 + l2;
+
+	/* Case 0: The regions don't overlap at all */
+	if (!(base1 < end2 && base2 < end1))
+		return 0;
+
+	/* Case 1: region 2 is subsumed by region 1 */
+	if (base1 <= base2 && end2 <= end1)
+		return l2;
+
+	/* case 2: region 1 is subsumed by region 2 */
+	if (base2 <= base1 && end1 <= end2)
+		return l1;
+
+	/* case 3: region1 overlaps region2 on the bottom */
+	if (base2 < end1 && base2 > base1)
+		return end1 - base2;
+
+	/* case 4: region 2 overlaps region1 on the bottom */
+	if (base1 < end2 && base1 > base2)
+		return end2 - base1;
+
+	pr_err("Bad math! Did not detect chunks correctly! %lx %lx %lx %lx\n",
+	       s1, l1, s2, l2);
+	WARN_ON(1);
+	/* retrun max intersection value, so that it will fail later*/
+	return (unsigned int)(~0);
+}
+
+int ion_secure_cma_prefetch(struct ion_heap *heap, void *data)
+{
+	unsigned long len = (unsigned long)data;
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+	unsigned long diff;
+
+	if ((int)heap->type != ION_HEAP_TYPE_SECURE_DMA)
+		return -EINVAL;
+
+	if (len == 0)
+		len = sheap->default_prefetch_size;
+
+	/*
+	 * Only prefetch as much space as there is left in the pool so
+	 * check against the current free size of the heap.
+	 * This is slightly racy if someone else is allocating at the same
+	 * time. CMA has a restricted size for the heap so worst case
+	 * the prefetch doesn't work because the allocation fails.
+	 */
+	diff = sheap->heap_size - atomic_read(&sheap->total_pool_size);
+
+	if (len > diff)
+		len = diff;
+
+	sheap->last_alloc = len;
+	trace_ion_prefetching(sheap->last_alloc);
+	schedule_work(&sheap->work);
+
+	return 0;
+}
+
+static void bad_math_dump(unsigned long len, int total_overlap,
+			  struct ion_cma_secure_heap *sheap,
+			  bool alloc, dma_addr_t paddr)
+{
+	struct list_head *entry;
+
+	pr_err("Bad math! expected total was %lx actual was %x\n",
+	       len, total_overlap);
+	pr_err("attempted %s address was %pa len %lx\n",
+	       alloc ? "allocation" : "free", &paddr, len);
+	pr_err("chunks:\n");
+	list_for_each(entry, &sheap->chunks) {
+		struct ion_cma_alloc_chunk *chunk =
+			container_of(entry,
+				     struct ion_cma_alloc_chunk, entry);
+		pr_info("---   pa %pa len %lx\n",
+			&chunk->handle, chunk->chunk_size);
+	}
+	WARN(1, "mismatch in the sizes of secure cma chunks\n");
+}
+
+static int ion_secure_cma_alloc_from_pool(
+					struct ion_cma_secure_heap *sheap,
+					dma_addr_t *phys,
+					unsigned long len)
+{
+	dma_addr_t paddr;
+	unsigned long page_no;
+	int ret = 0;
+	int total_overlap = 0;
+	struct list_head *entry;
+
+	mutex_lock(&sheap->chunk_lock);
+
+	page_no = bitmap_find_next_zero_area(sheap->bitmap,
+					     sheap->npages, 0,
+					     len >> PAGE_SHIFT, 0);
+	if (page_no >= sheap->npages) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	bitmap_set(sheap->bitmap, page_no, len >> PAGE_SHIFT);
+	paddr = sheap->base + (page_no << PAGE_SHIFT);
+
+	list_for_each(entry, &sheap->chunks) {
+		struct ion_cma_alloc_chunk *chunk = container_of(entry,
+					struct ion_cma_alloc_chunk, entry);
+		int overlap = intersect(chunk->handle,
+					chunk->chunk_size, paddr, len);
+
+		atomic_add(overlap, &chunk->cnt);
+		total_overlap += overlap;
+	}
+
+	if (total_overlap != len) {
+		bad_math_dump(len, total_overlap, sheap, 1, paddr);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	*phys = paddr;
+out:
+	mutex_unlock(&sheap->chunk_lock);
+	return ret;
+}
+
+static void ion_secure_cma_free_chunk(struct ion_cma_secure_heap *sheap,
+				      struct ion_cma_alloc_chunk *chunk)
+{
+	unsigned long attrs = 0;
+
+	attrs = DMA_ATTR_NO_KERNEL_MAPPING;
+	/* This region is 'allocated' and not available to allocate from */
+	bitmap_set(sheap->bitmap, (chunk->handle - sheap->base) >> PAGE_SHIFT,
+		   chunk->chunk_size >> PAGE_SHIFT);
+	dma_free_attrs(sheap->dev, chunk->chunk_size, chunk->cpu_addr,
+		       chunk->handle, attrs);
+	atomic_sub(chunk->chunk_size, &sheap->total_pool_size);
+	list_del(&chunk->entry);
+	kfree(chunk);
+}
+
+static void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap,
+					 int max_nr)
+{
+	struct list_head *entry, *_n;
+	unsigned long drained_size = 0, skipped_size = 0;
+
+	trace_ion_secure_cma_shrink_pool_start(drained_size, skipped_size);
+
+	list_for_each_safe(entry, _n, &sheap->chunks) {
+		struct ion_cma_alloc_chunk *chunk = container_of(entry,
+					struct ion_cma_alloc_chunk, entry);
+
+		if (max_nr < 0)
+			break;
+
+		if (atomic_read(&chunk->cnt) == 0) {
+			max_nr -= chunk->chunk_size;
+			drained_size += chunk->chunk_size;
+			ion_secure_cma_free_chunk(sheap, chunk);
+		} else {
+			skipped_size += chunk->chunk_size;
+		}
+	}
+
+	trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size);
+}
+
+int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+
+	mutex_lock(&sheap->chunk_lock);
+	__ion_secure_cma_shrink_pool(sheap, INT_MAX);
+	mutex_unlock(&sheap->chunk_lock);
+
+	return 0;
+}
+
+static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker,
+					     struct shrink_control *sc)
+{
+	struct ion_cma_secure_heap *sheap = container_of(shrinker,
+					struct ion_cma_secure_heap, shrinker);
+	int nr_to_scan = sc->nr_to_scan;
+
+	/*
+	 * Allocation path may invoke the shrinker. Proceeding any further
+	 * would cause a deadlock in several places so don't shrink if that
+	 * happens.
+	 */
+	if (!mutex_trylock(&sheap->chunk_lock))
+		return -EAGAIN;
+
+	__ion_secure_cma_shrink_pool(sheap, nr_to_scan);
+
+	mutex_unlock(&sheap->chunk_lock);
+
+	return atomic_read(&sheap->total_pool_size);
+}
+
+static unsigned long ion_secure_cma_shrinker_count(struct shrinker *shrinker,
+						   struct shrink_control *sc)
+{
+	struct ion_cma_secure_heap *sheap = container_of(shrinker,
+					struct ion_cma_secure_heap, shrinker);
+	return atomic_read(&sheap->total_pool_size);
+}
+
+static void ion_secure_cma_free_from_pool(struct ion_cma_secure_heap *sheap,
+					  dma_addr_t handle,
+					  unsigned long len)
+{
+	struct list_head *entry, *_n;
+	int total_overlap = 0;
+
+	mutex_lock(&sheap->chunk_lock);
+	bitmap_clear(sheap->bitmap, (handle - sheap->base) >> PAGE_SHIFT,
+		     len >> PAGE_SHIFT);
+
+	list_for_each_safe(entry, _n, &sheap->chunks) {
+		struct ion_cma_alloc_chunk *chunk = container_of(entry,
+					struct ion_cma_alloc_chunk, entry);
+		int overlap = intersect(chunk->handle,
+					chunk->chunk_size, handle, len);
+
+		/*
+		 * Don't actually free this from the pool list yet, let either
+		 * an explicit drain call or the shrinkers take care of the
+		 * pool.
+		 */
+		atomic_sub_return(overlap, &chunk->cnt);
+		if (atomic_read(&chunk->cnt) < 0) {
+			WARN(1, "Invalid chunk size of %d\n",
+			     atomic_read(&chunk->cnt));
+			goto out;
+		}
+
+		total_overlap += overlap;
+	}
+
+	if (atomic_read(&sheap->total_pool_size) < 0) {
+		WARN(1, "total pool size of %d is unexpected\n",
+		     atomic_read(&sheap->total_pool_size));
+		goto out;
+	}
+
+	if (total_overlap != len)
+		bad_math_dump(len, total_overlap, sheap, 0, handle);
+out:
+	mutex_unlock(&sheap->chunk_lock);
+}
+
+/* ION CMA heap operations functions */
+static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate(
+			    struct ion_heap *heap, struct ion_buffer *buffer,
+			    unsigned long len, unsigned long align,
+			    unsigned long flags)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+	struct ion_secure_cma_buffer_info *info;
+	int ret;
+
+	dev_dbg(sheap->dev, "Request buffer allocation len %ld\n", len);
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return ION_CMA_ALLOCATE_FAILED;
+
+	mutex_lock(&sheap->alloc_lock);
+	ret = ion_secure_cma_alloc_from_pool(sheap, &info->phys, len);
+
+	if (ret) {
+retry:
+		ret = ion_secure_cma_add_to_pool(sheap, len, false);
+		if (ret) {
+			mutex_unlock(&sheap->alloc_lock);
+			dev_err(sheap->dev, "Fail to allocate buffer\n");
+			goto err;
+		}
+		ret = ion_secure_cma_alloc_from_pool(sheap, &info->phys, len);
+		if (ret) {
+			/*
+			 * Lost the race with the shrinker, try again
+			 */
+			goto retry;
+		}
+	}
+	mutex_unlock(&sheap->alloc_lock);
+
+	atomic_add(len, &sheap->total_allocated);
+	info->table = kmalloc(sizeof(*info->table), GFP_KERNEL);
+	if (!info->table) {
+		dev_err(sheap->dev, "Fail to allocate sg table\n");
+		goto err;
+	}
+
+	info->len = len;
+	ion_secure_cma_get_sgtable(sheap->dev,
+				   info->table, info->phys, len);
+
+	/* keep this for memory release */
+	buffer->priv_virt = info;
+	dev_dbg(sheap->dev, "Allocate buffer %pK\n", buffer);
+	return info;
+
+err:
+	kfree(info);
+	return ION_CMA_ALLOCATE_FAILED;
+}
+
+static void __ion_secure_cma_free_non_contig(struct ion_cma_secure_heap *sheap,
+					     struct ion_secure_cma_buffer_info
+					     *info)
+{
+	struct ion_secure_cma_non_contig_info *nc_info, *temp;
+
+	list_for_each_entry_safe(nc_info, temp, &info->non_contig_list, entry) {
+		ion_secure_cma_free_from_pool(sheap, nc_info->phys,
+					      nc_info->len);
+		list_del(&nc_info->entry);
+		kfree(nc_info);
+	}
+}
+
+static void __ion_secure_cma_free(struct ion_cma_secure_heap *sheap,
+				  struct ion_secure_cma_buffer_info *info,
+				  bool release_memory)
+{
+	if (release_memory) {
+		if (info->ncelems)
+			__ion_secure_cma_free_non_contig(sheap, info);
+		else
+			ion_secure_cma_free_from_pool(sheap, info->phys,
+						      info->len);
+	}
+	sg_free_table(info->table);
+	kfree(info->table);
+	kfree(info);
+}
+
+static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate_non_contig(
+			struct ion_heap *heap, struct ion_buffer *buffer,
+			unsigned long len, unsigned long align,
+			unsigned long flags)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+	struct ion_secure_cma_buffer_info *info;
+	int ret;
+	unsigned long alloc_size = len;
+	struct ion_secure_cma_non_contig_info *nc_info, *temp;
+	unsigned long ncelems = 0;
+	struct scatterlist *sg;
+	unsigned long total_allocated = 0;
+
+	dev_dbg(sheap->dev, "Request buffer allocation len %ld\n", len);
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return ION_CMA_ALLOCATE_FAILED;
+
+	INIT_LIST_HEAD(&info->non_contig_list);
+	info->table = kmalloc(sizeof(*info->table), GFP_KERNEL);
+	if (!info->table) {
+		dev_err(sheap->dev, "Fail to allocate sg table\n");
+		goto err;
+	}
+	mutex_lock(&sheap->alloc_lock);
+	while (total_allocated < len) {
+		if (alloc_size < SZ_1M) {
+			pr_err("Cannot allocate less than 1MB\n");
+			goto err2;
+		}
+		nc_info = kzalloc(sizeof(*nc_info), GFP_KERNEL);
+		if (!nc_info)
+			goto err2;
+
+		ret = ion_secure_cma_alloc_from_pool(sheap, &nc_info->phys,
+						     alloc_size);
+		if (ret) {
+retry:
+			ret = ion_secure_cma_add_to_pool(sheap, alloc_size,
+							 false);
+			if (ret) {
+				alloc_size = alloc_size / 2;
+				if (!IS_ALIGNED(alloc_size, SZ_1M))
+					alloc_size = round_down(alloc_size,
+								SZ_1M);
+				kfree(nc_info);
+				continue;
+			}
+			ret = ion_secure_cma_alloc_from_pool(sheap,
+							     &nc_info->phys,
+							     alloc_size);
+			if (ret) {
+				/*
+				 * Lost the race with the shrinker, try again
+				 */
+				goto retry;
+			}
+		}
+		nc_info->len = alloc_size;
+		list_add_tail(&nc_info->entry, &info->non_contig_list);
+		ncelems++;
+		total_allocated += alloc_size;
+		alloc_size = min(alloc_size, len - total_allocated);
+	}
+	mutex_unlock(&sheap->alloc_lock);
+	atomic_add(total_allocated, &sheap->total_allocated);
+
+	nc_info = list_first_entry_or_null(&info->non_contig_list,
+					   struct
+					   ion_secure_cma_non_contig_info,
+					   entry);
+	if (!nc_info) {
+		pr_err("%s: Unable to find first entry of non contig list\n",
+		       __func__);
+		goto err1;
+	}
+	info->phys = nc_info->phys;
+	info->len = total_allocated;
+	info->ncelems = ncelems;
+
+	ret = sg_alloc_table(info->table, ncelems, GFP_KERNEL);
+	if (unlikely(ret))
+		goto err1;
+
+	sg = info->table->sgl;
+	list_for_each_entry(nc_info, &info->non_contig_list, entry) {
+		sg_set_page(sg, phys_to_page(nc_info->phys), nc_info->len, 0);
+		sg_dma_address(sg) = nc_info->phys;
+		sg = sg_next(sg);
+	}
+	buffer->priv_virt = info;
+	dev_dbg(sheap->dev, "Allocate buffer %pK\n", buffer);
+	return info;
+
+err2:
+	mutex_unlock(&sheap->alloc_lock);
+err1:
+	list_for_each_entry_safe(nc_info, temp, &info->non_contig_list,
+				 entry) {
+		list_del(&nc_info->entry);
+		kfree(nc_info);
+	}
+	kfree(info->table);
+err:
+	kfree(info);
+	return ION_CMA_ALLOCATE_FAILED;
+}
+
+static int ion_secure_cma_allocate(struct ion_heap *heap,
+				   struct ion_buffer *buffer,
+				   unsigned long len, unsigned long align,
+				   unsigned long flags)
+{
+	unsigned long secure_allocation = flags & ION_FLAG_SECURE;
+	struct ion_secure_cma_buffer_info *buf = NULL;
+	unsigned long allow_non_contig = flags & ION_FLAG_ALLOW_NON_CONTIG;
+
+	if (!secure_allocation &&
+	    !ion_heap_allow_secure_allocation(heap->type)) {
+		pr_err("%s: non-secure allocation disallowed from heap %s %lx\n",
+		       __func__, heap->name, flags);
+		return -ENOMEM;
+	}
+
+	if (ION_IS_CACHED(flags)) {
+		pr_err("%s: cannot allocate cached memory from secure heap %s\n",
+		       __func__, heap->name);
+		return -ENOMEM;
+	}
+
+	if (!IS_ALIGNED(len, SZ_1M)) {
+		pr_err("%s: length of allocation from %s must be a multiple of 1MB\n",
+		       __func__, heap->name);
+		return -ENOMEM;
+	}
+	trace_ion_secure_cma_allocate_start(heap->name, len, align, flags);
+	if (!allow_non_contig)
+		buf = __ion_secure_cma_allocate(heap, buffer, len, align,
+						flags);
+	else
+		buf = __ion_secure_cma_allocate_non_contig(heap, buffer, len,
+							   align, flags);
+	trace_ion_secure_cma_allocate_end(heap->name, len, align, flags);
+	if (buf) {
+		int ret;
+
+		if (!msm_secure_v2_is_supported()) {
+			pr_err("%s: securing buffers from clients is not supported on this platform\n",
+			       __func__);
+			ret = 1;
+		} else {
+			trace_ion_cp_secure_buffer_start(heap->name, len, align,
+							 flags);
+			ret = msm_secure_table(buf->table);
+			trace_ion_cp_secure_buffer_end(heap->name, len, align,
+						       flags);
+		}
+		if (ret) {
+			struct ion_cma_secure_heap *sheap =
+				container_of(buffer->heap,
+					     struct ion_cma_secure_heap, heap);
+
+			pr_err("%s: failed to secure buffer\n", __func__);
+			__ion_secure_cma_free(sheap, buf, true);
+		}
+		return ret;
+	} else {
+		return -ENOMEM;
+	}
+}
+
+static void ion_secure_cma_free(struct ion_buffer *buffer)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(buffer->heap, struct ion_cma_secure_heap, heap);
+	struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
+	int ret = 0;
+
+	dev_dbg(sheap->dev, "Release buffer %pK\n", buffer);
+	if (msm_secure_v2_is_supported())
+		ret = msm_unsecure_table(info->table);
+	atomic_sub(buffer->size, &sheap->total_allocated);
+	if (atomic_read(&sheap->total_allocated) < 0) {
+		WARN(1, "no memory is allocated from this pool\n");
+		return;
+	}
+
+	/* release memory */
+	if (ret) {
+		WARN(1, "Unsecure failed, can't free the memory. Leaking it!");
+		atomic_add(buffer->size, &sheap->total_leaked);
+	}
+
+	__ion_secure_cma_free(sheap, info, ret ? false : true);
+}
+
+static int ion_secure_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
+			       ion_phys_addr_t *addr, size_t *len)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+	struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
+
+	dev_dbg(sheap->dev, "Return buffer %pK physical address 0x%pa\n",
+		buffer, &info->phys);
+
+	*addr = info->phys;
+	*len = buffer->size;
+
+	return 0;
+}
+
+static struct sg_table *ion_secure_cma_heap_map_dma(struct ion_heap *heap,
+						    struct ion_buffer *buffer)
+{
+	struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
+
+	return info->table;
+}
+
+static void ion_secure_cma_heap_unmap_dma(struct ion_heap *heap,
+					  struct ion_buffer *buffer)
+{
+}
+
+static int ion_secure_cma_mmap(struct ion_heap *mapper,
+			       struct ion_buffer *buffer,
+			       struct vm_area_struct *vma)
+{
+	pr_info("%s: mmaping from secure heap %s disallowed\n",
+		__func__, mapper->name);
+	return -EINVAL;
+}
+
+static void *ion_secure_cma_map_kernel(struct ion_heap *heap,
+				       struct ion_buffer *buffer)
+{
+	pr_info("%s: kernel mapping from secure heap %s disallowed\n",
+		__func__, heap->name);
+	return ERR_PTR(-EINVAL);
+}
+
+static void ion_secure_cma_unmap_kernel(struct ion_heap *heap,
+					struct ion_buffer *buffer)
+{
+}
+
+static int ion_secure_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
+				      const struct list_head *mem_map)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+
+	if (mem_map) {
+		struct mem_map_data *data;
+
+		seq_puts(s, "\nMemory Map\n");
+		seq_printf(s, "%16.s %14.s %14.s %14.s\n",
+			   "client", "start address", "end address",
+			   "size");
+
+		list_for_each_entry(data, mem_map, node) {
+			const char *client_name = "(null)";
+
+			if (data->client_name)
+				client_name = data->client_name;
+
+			seq_printf(s, "%16.s 0x%14pa 0x%14pa %14lu (0x%lx)\n",
+				   client_name, &data->addr,
+				   &data->addr_end,
+				   data->size, data->size);
+		}
+	}
+	seq_printf(s, "Total allocated: 0x%x\n",
+		   atomic_read(&sheap->total_allocated));
+	seq_printf(s, "Total pool size: 0x%x\n",
+		   atomic_read(&sheap->total_pool_size));
+	seq_printf(s, "Total memory leaked due to unlock failures: 0x%x\n",
+		   atomic_read(&sheap->total_leaked));
+
+	return 0;
+}
+
+static struct ion_heap_ops ion_secure_cma_ops = {
+	.allocate = ion_secure_cma_allocate,
+	.free = ion_secure_cma_free,
+	.map_dma = ion_secure_cma_heap_map_dma,
+	.unmap_dma = ion_secure_cma_heap_unmap_dma,
+	.phys = ion_secure_cma_phys,
+	.map_user = ion_secure_cma_mmap,
+	.map_kernel = ion_secure_cma_map_kernel,
+	.unmap_kernel = ion_secure_cma_unmap_kernel,
+	.print_debug = ion_secure_cma_print_debug,
+};
+
+struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *data)
+{
+	struct ion_cma_secure_heap *sheap;
+	int map_size = BITS_TO_LONGS(data->size >> PAGE_SHIFT) * sizeof(long);
+
+	sheap = kzalloc(sizeof(*sheap), GFP_KERNEL);
+	if (!sheap)
+		return ERR_PTR(-ENOMEM);
+
+	sheap->dev = data->priv;
+	mutex_init(&sheap->chunk_lock);
+	mutex_init(&sheap->alloc_lock);
+	sheap->heap.ops = &ion_secure_cma_ops;
+	sheap->heap.type = ION_HEAP_TYPE_SECURE_DMA;
+	sheap->npages = data->size >> PAGE_SHIFT;
+	sheap->base = data->base;
+	sheap->heap_size = data->size;
+	sheap->bitmap = kmalloc(map_size, GFP_KERNEL);
+	INIT_LIST_HEAD(&sheap->chunks);
+	INIT_WORK(&sheap->work, ion_secure_pool_pages);
+	sheap->shrinker.seeks = DEFAULT_SEEKS;
+	sheap->shrinker.batch = 0;
+	sheap->shrinker.scan_objects = ion_secure_cma_shrinker;
+	sheap->shrinker.count_objects = ion_secure_cma_shrinker_count;
+	sheap->default_prefetch_size = sheap->heap_size;
+	register_shrinker(&sheap->shrinker);
+
+	if (!sheap->bitmap) {
+		kfree(sheap);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (data->extra_data) {
+		struct ion_cma_pdata *extra = data->extra_data;
+
+		sheap->default_prefetch_size = extra->default_prefetch_size;
+	}
+
+	/*
+	 * we initially mark everything in the allocator as being free so that
+	 * allocations can come in later
+	 */
+	bitmap_fill(sheap->bitmap, sheap->npages);
+
+	return &sheap->heap;
+}
+
+void ion_secure_cma_heap_destroy(struct ion_heap *heap)
+{
+	struct ion_cma_secure_heap *sheap =
+		container_of(heap, struct ion_cma_secure_heap, heap);
+
+	kfree(sheap);
+}
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index bb119cc..775c666 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -26,6 +26,7 @@
 #include <linux/rbtree.h>
 #include <linux/seq_file.h>
 
+#include "msm_ion_priv.h"
 #include <linux/sched.h>
 #include <linux/shrinker.h>
 #include <linux/types.h>
diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c
index 9d53391..3771726 100644
--- a/drivers/staging/android/ion/msm/msm_ion.c
+++ b/drivers/staging/android/ion/msm/msm_ion.c
@@ -404,6 +404,28 @@
 			ret = -ENOMEM;
 		break;
 	}
+	case ION_HEAP_TYPE_SECURE_DMA:
+	{
+		unsigned int val;
+		struct ion_cma_pdata *extra = NULL;
+
+		ret = of_property_read_u32(node,
+					   "qcom,default-prefetch-size", &val);
+		if (!ret) {
+			heap->extra_data = kzalloc(sizeof(*extra),
+						   GFP_KERNEL);
+
+			if (!heap->extra_data) {
+				ret = -ENOMEM;
+			} else {
+				extra = heap->extra_data;
+				extra->default_prefetch_size = val;
+			}
+		} else {
+			ret = 0;
+		}
+		break;
+	}
 	default:
 		heap->extra_data = 0;
 		break;
@@ -423,6 +445,7 @@
 	MAKE_HEAP_TYPE_MAPPING(CARVEOUT),
 	MAKE_HEAP_TYPE_MAPPING(CHUNK),
 	MAKE_HEAP_TYPE_MAPPING(DMA),
+	MAKE_HEAP_TYPE_MAPPING(SECURE_DMA),
 	MAKE_HEAP_TYPE_MAPPING(SYSTEM_SECURE),
 	MAKE_HEAP_TYPE_MAPPING(HYP_CMA),
 };
@@ -609,6 +632,16 @@
 	return type == ((enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE);
 }
 
+int ion_heap_allow_secure_allocation(enum ion_heap_type type)
+{
+	return type == ((enum ion_heap_type)ION_HEAP_TYPE_SECURE_DMA);
+}
+
+int ion_heap_allow_handle_secure(enum ion_heap_type type)
+{
+	return type == ((enum ion_heap_type)ION_HEAP_TYPE_SECURE_DMA);
+}
+
 int ion_heap_allow_heap_secure(enum ion_heap_type type)
 {
 	return false;
@@ -796,6 +829,13 @@
 		int ret;
 
 		ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
+			ION_HEAP_TYPE_SECURE_DMA,
+			(void *)data.prefetch_data.len,
+			ion_secure_cma_prefetch);
+		if (ret)
+			return ret;
+
+		ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
 				     ION_HEAP_TYPE_SYSTEM_SECURE,
 				     (void *)&data.prefetch_data,
 				     ion_system_secure_heap_prefetch);
@@ -806,6 +846,13 @@
 	case ION_IOC_DRAIN:
 	{
 		int ret;
+		ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
+				     ION_HEAP_TYPE_SECURE_DMA,
+				     (void *)data.prefetch_data.len,
+				     ion_secure_cma_drain_pool);
+
+		if (ret)
+			return ret;
 
 		ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
 				     ION_HEAP_TYPE_SYSTEM_SECURE,
@@ -959,6 +1006,11 @@
 	struct ion_heap *heap = NULL;
 
 	switch ((int)heap_data->type) {
+#ifdef CONFIG_CMA
+	case ION_HEAP_TYPE_SECURE_DMA:
+		heap = ion_secure_cma_heap_create(heap_data);
+		break;
+#endif
 	case ION_HEAP_TYPE_SYSTEM_SECURE:
 		heap = ion_system_secure_heap_create(heap_data);
 		break;
@@ -988,6 +1040,11 @@
 		return;
 
 	switch ((int)heap->type) {
+#ifdef CONFIG_CMA
+	case ION_HEAP_TYPE_SECURE_DMA:
+		ion_secure_cma_heap_destroy(heap);
+		break;
+#endif
 	case ION_HEAP_TYPE_SYSTEM_SECURE:
 		ion_system_secure_heap_destroy(heap);
 		break;
diff --git a/drivers/staging/android/ion/msm_ion_priv.h b/drivers/staging/android/ion/msm_ion_priv.h
new file mode 100644
index 0000000..bbf2e8b
--- /dev/null
+++ b/drivers/staging/android/ion/msm_ion_priv.h
@@ -0,0 +1,112 @@
+/*
+ * drivers/staging/android/ion/msm_ion_priv.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MSM_ION_PRIV_H
+#define _MSM_ION_PRIV_H
+
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/seq_file.h>
+
+struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap);
+void ion_iommu_heap_destroy(struct ion_heap *heap);
+
+struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap);
+void ion_cp_heap_destroy(struct ion_heap *heap);
+
+struct ion_heap *ion_system_secure_heap_create(struct ion_platform_heap *heap);
+void ion_system_secure_heap_destroy(struct ion_heap *heap);
+int ion_system_secure_heap_prefetch(struct ion_heap *heap, void *data);
+int ion_system_secure_heap_drain(struct ion_heap *heap, void *data);
+
+struct ion_heap *ion_cma_secure_heap_create(struct ion_platform_heap *heap);
+void ion_cma_secure_heap_destroy(struct ion_heap *heap);
+
+long msm_ion_custom_ioctl(struct ion_client *client,
+			  unsigned int cmd,
+			  unsigned long arg);
+
+#ifdef CONFIG_CMA
+struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *heap);
+void ion_secure_cma_heap_destroy(struct ion_heap *heap);
+
+int ion_secure_cma_prefetch(struct ion_heap *heap, void *data);
+
+int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused);
+
+#else
+static inline int ion_secure_cma_prefetch(struct ion_heap *heap, void *data)
+{
+	return -ENODEV;
+}
+
+static inline int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
+{
+	return -ENODEV;
+}
+
+#endif
+
+struct ion_heap *ion_removed_heap_create(struct ion_platform_heap *pheap);
+void ion_removed_heap_destroy(struct ion_heap *heap);
+
+#define ION_CP_ALLOCATE_FAIL -1
+#define ION_RESERVED_ALLOCATE_FAIL -1
+
+void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
+			  unsigned long *size);
+
+void ion_mem_map_show(struct ion_heap *heap);
+
+int ion_heap_is_system_secure_heap_type(enum ion_heap_type type);
+
+int ion_heap_allow_secure_allocation(enum ion_heap_type type);
+
+int ion_heap_allow_heap_secure(enum ion_heap_type type);
+
+int ion_heap_allow_handle_secure(enum ion_heap_type type);
+
+int get_secure_vmid(unsigned long flags);
+
+bool is_secure_vmid_valid(int vmid);
+
+/**
+ * Functions to help assign/unassign sg_table for System Secure Heap
+ */
+
+int ion_system_secure_heap_unassign_sg(struct sg_table *sgt, int source_vmid);
+int ion_system_secure_heap_assign_sg(struct sg_table *sgt, int dest_vmid);
+
+/**
+ * ion_create_chunked_sg_table - helper function to create sg table
+ * with specified chunk size
+ * @buffer_base:	The starting address used for the sg dma address
+ * @chunk_size:		The size of each entry in the sg table
+ * @total_size:		The total size of the sg table (i.e. the sum of the
+ *			entries). This will be rounded up to the nearest
+ *			multiple of `chunk_size'
+ */
+struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
+					     size_t chunk_size,
+					     size_t total_size);
+
+void show_ion_usage(struct ion_device *dev);
+#endif /* _MSM_ION_PRIV_H */
diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h
index 4f9dd73..d510fda 100644
--- a/drivers/staging/android/uapi/msm_ion.h
+++ b/drivers/staging/android/uapi/msm_ion.h
@@ -92,6 +92,11 @@
 #define ION_FLAG_CP_CAMERA_PREVIEW	ION_BIT(27)
 #define ION_FLAG_CP_SPSS_HLOS_SHARED	ION_BIT(30)
 
+/**
+ * Flag to allow non continguous allocation of memory from secure
+ * heap
+ */
+#define ION_FLAG_ALLOW_NON_CONTIG       ION_BIT(28)
 
 /**
  * Flag to use when allocating to indicate that a heap is secure.
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 5b5d4c7..34ed577 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -63,6 +63,15 @@
 	return tsk->signal->oom_mm;
 }
 
+/*
+ * Use this helper if tsk->mm != mm and the victim mm needs a special
+ * handling. This is guaranteed to stay true after once set.
+ */
+static inline bool mm_is_oom_victim(struct mm_struct *mm)
+{
+	return test_bit(MMF_OOM_VICTIM, &mm->flags);
+}
+
 extern unsigned long oom_badness(struct task_struct *p,
 		struct mem_cgroup *memcg, const nodemask_t *nodemask,
 		unsigned long totalpages);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0d4035a..62c770d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -636,6 +636,7 @@
 #define MMF_OOM_SKIP		21	/* mm is of no interest for the OOM killer */
 #define MMF_UNSTABLE		22	/* mm is unstable for copy_from_user */
 #define MMF_HUGE_ZERO_PAGE	23      /* mm has ever used the global huge zero page */
+#define MMF_OOM_VICTIM		25      /* mm is the oom victim */
 
 #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
 
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
index 12fa374..d9a526d 100644
--- a/include/soc/qcom/secure_buffer.h
+++ b/include/soc/qcom/secure_buffer.h
@@ -50,6 +50,8 @@
 #define PERM_EXEC			0x1
 
 #ifdef CONFIG_QCOM_SECURE_BUFFER
+int msm_secure_table(struct sg_table *table);
+int msm_unsecure_table(struct sg_table *table);
 int hyp_assign_table(struct sg_table *table,
 			u32 *source_vm_list, int source_nelems,
 			int *dest_vmids, int *dest_perms,
@@ -57,8 +59,19 @@
 extern int hyp_assign_phys(phys_addr_t addr, u64 size,
 			u32 *source_vmlist, int source_nelems,
 			int *dest_vmids, int *dest_perms, int dest_nelems);
+bool msm_secure_v2_is_supported(void);
 const char *msm_secure_vmid_to_string(int secure_vmid);
 #else
+static inline int msm_secure_table(struct sg_table *table)
+{
+	return -EINVAL;
+}
+
+static inline int msm_unsecure_table(struct sg_table *table)
+{
+	return -EINVAL;
+}
+
 static inline int hyp_assign_table(struct sg_table *table,
 			u32 *source_vm_list, int source_nelems,
 			int *dest_vmids, int *dest_perms,
@@ -74,6 +87,11 @@
 	return -EINVAL;
 }
 
+static inline bool msm_secure_v2_is_supported(void)
+{
+	return false;
+}
+
 static inline const char *msm_secure_vmid_to_string(int secure_vmid)
 {
 	return "N/A";
diff --git a/mm/mmap.c b/mm/mmap.c
index 7e6c049..c30a61e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2984,20 +2984,20 @@
 	/* Use -1 here to ensure all VMAs in the mm are unmapped */
 	unmap_vmas(&tlb, vma, 0, -1);
 
-	set_bit(MMF_OOM_SKIP, &mm->flags);
-	if (unlikely(tsk_is_oom_victim(current))) {
+	if (unlikely(mm_is_oom_victim(mm))) {
 		/*
 		 * Wait for oom_reap_task() to stop working on this
 		 * mm. Because MMF_OOM_SKIP is already set before
 		 * calling down_read(), oom_reap_task() will not run
 		 * on this "mm" post up_write().
 		 *
-		 * tsk_is_oom_victim() cannot be set from under us
-		 * either because current->mm is already set to NULL
+		 * mm_is_oom_victim() cannot be set from under us
+		 * either because victim->mm is already set to NULL
 		 * under task_lock before calling mmput and oom_mm is
-		 * set not NULL by the OOM killer only if current->mm
+		 * set not NULL by the OOM killer only if victim->mm
 		 * is found not NULL while holding the task_lock.
 		 */
+		set_bit(MMF_OOM_SKIP, &mm->flags);
 		down_write(&mm->mmap_sem);
 		up_write(&mm->mmap_sem);
 	}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index af9a8a6..6fd9773 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -677,8 +677,10 @@
 		return;
 
 	/* oom_mm is bound to the signal struct life time. */
-	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
+	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
 		atomic_inc(&tsk->signal->oom_mm->mm_count);
+		set_bit(MMF_OOM_VICTIM, &mm->flags);
+	}
 
 	/*
 	 * Make sure that the task is woken up from uninterruptible sleep