Merge "ARM: dts: msm: add device bindings for RPM stats" into msm-4.9
diff --git a/Documentation/devicetree/bindings/clock/qcom,aop-qmp.txt b/Documentation/devicetree/bindings/clock/qcom,aop-qmp.txt
new file mode 100644
index 0000000..231b8a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,aop-qmp.txt
@@ -0,0 +1,17 @@
+Qualcomm Technologies, Inc. Always On Processor Clock controller Binding
+------------------------------------------------------------------------
+
+Required properties :
+- compatible : must be "qcom,aop-qmp-clk"
+- #clock-cells : must contain 1
+- mboxes : list of QMP mailbox phandle and channel identifier tuples.
+- mbox-names: List of identifier strings for each mailbox channel.
+		Must contain "qdss_clk".
+
+Example :
+	clock_qdss: qcom,aopclk {
+		compatible = "qcom,aop-qmp-clk";
+		#clock-cells = <1>;
+		mboxes = <&qmp_aop 0>;
+		mbox-names = "qdss_clk";
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
index 6d72e8b..bdc0eba 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -10,6 +10,10 @@
   of macroblocks per second. The load is a reflection of hardware capability
   rather than a performance guarantee. Performance is guaranteed only up to
   advertised capability of the chipset.
+- qcom,max-hq-mbs-per-frame : Max no of mbs per frame beyond which
+    "High Quality" encoding is not supported.
+- qcom,max-hq-frames-per-sec : Max no of frames per second beyond which
+    "High Quality" encoding is not supported.
 
 Optional properties:
 - reg : offset and length of the register set for the device.
@@ -157,7 +161,6 @@
 
 Example:
 
-
 	qcom,vidc@fdc00000 {
 		compatible = "qcom,msm-vidc";
 		reg = <0xfdc00000 0xff000>;
@@ -182,6 +185,8 @@
 		qcom,use_dynamic_bw_update;
 		qcom,fw-bias = <0xe000000>;
 		qcom,allowed-clock-rates = <200000000 300000000 400000000>;
+		qcom,max-hq-mbs-per-frame = <8160>;
+		qcom,max-hq-frames-per-sec = <60>;
 		msm_vidc_cb1: msm_vidc_cb1 {
 			compatible = "qcom,msm-vidc,context-bank";
 			label = "venus_ns";
diff --git a/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt b/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt
index e53b691..04b624b 100644
--- a/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt
+++ b/Documentation/devicetree/bindings/serial/qcom,msm-geni-uart.txt
@@ -16,6 +16,10 @@
   Should be "active" and "sleep" for the pin confuguration when core is active
   or when entering sleep state.
 
+Optional properties:
+- qcom,bus-mas: contains the bus master id needed to put in bus bandwidth votes
+		for inter-connect buses.
+
 Example:
 qupv3_uart11: qcom,qup_uart@0xa88000 {
 	compatible = "qcom,msm-geni-uart";
@@ -29,4 +33,5 @@
 	pinctrl-0 = <&qup_1_uart_3_active>;
 	pinctrl-1 = <&qup_1_uart_3_sleep>;
 	interrupts = <0 355 0>;
+	qcom,bus-mas = <MASTER_BLSP_2>;
 };
diff --git a/Documentation/devicetree/bindings/thermal/qcom-lmh-dcvs.txt b/Documentation/devicetree/bindings/thermal/qcom-lmh-dcvs.txt
index 080d4da..8bead0d 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-lmh-dcvs.txt
+++ b/Documentation/devicetree/bindings/thermal/qcom-lmh-dcvs.txt
@@ -25,12 +25,18 @@
 			interrupt generated by the LMH DCVSh hardware. LMH
 			DCVSh hardware will generate this interrupt whenever
 			it makes a new cpu DCVS decision.
+- qcom,affinity:
+	Usage: Required
+	Value type: <u32>
+	Definition: Should specify the cluster affinity this hardware
+			corresponds to.
 
 Example:
 
 	lmh_dcvs0: qcom,limits-dcvs@0 {
 		compatible = "qcom,msm-hw-limits";
 		interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,affinity = <0>;
 	};
 
 	CPU0: cpu@0 {
diff --git a/Makefile b/Makefile
index c2660d2..7040118 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 23
+SUBLEVEL = 24
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
index 3766d51..f1501fa 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
@@ -33,16 +33,12 @@
 				<GIC_SPI 369 IRQ_TYPE_EDGE_RISING>,
 				<GIC_SPI 370 IRQ_TYPE_EDGE_RISING>,
 				<GIC_SPI 371 IRQ_TYPE_EDGE_RISING>;
-		clock-names = "gcc_gpu_cfg_ahb_clk",
-				"gcc_ddrss_gpu_axi_clk",
+		clock-names = "gcc_ddrss_gpu_axi_clk",
 				"gcc_gpu_memnoc_gfx_clk",
-				"gcc_gpu_snoc_dvm_gfx_clk",
 				"gpu_cc_ahb_clk",
 				"gpu_cc_cx_gmu_clk";
-		clocks = <&clock_gcc GCC_GPU_CFG_AHB_CLK>,
-			<&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
+		clocks = <&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
 			<&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,
-			<&clock_gcc GCC_GPU_SNOC_DVM_GFX_CLK>,
 			<&clock_gpucc GPU_CC_AHB_CLK>,
 			<&clock_gpucc GPU_CC_CX_GMU_CLK>;
 		attach-impl-defs =
diff --git a/arch/arm64/boot/dts/qcom/pm8998.dtsi b/arch/arm64/boot/dts/qcom/pm8998.dtsi
index 7c496f1..15db8da 100644
--- a/arch/arm64/boot/dts/qcom/pm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8998.dtsi
@@ -136,6 +136,7 @@
 			interrupt-names = "eoc-int-en-set";
 			qcom,adc-bit-resolution = <15>;
 			qcom,adc-vdd-reference = <1875>;
+			#thermal-sensor-cells = <1>;
 
 			chan@6 {
 				label = "die_temp";
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index 539685a..923804f 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -232,6 +232,25 @@
 						 <12000 2250>; /* 12V @ 2.25A */
 		};
 
+		bcl_sensor: bcl@4200 {
+			compatible = "qcom,msm-bcl-lmh";
+			reg = <0x4200 0xff>,
+				<0x4300 0xff>;
+			reg-names = "fg_user_adc",
+					"fg_lmh";
+			interrupts = <0x2 0x42 0x0 IRQ_TYPE_NONE>,
+					<0x2 0x42 0x1 IRQ_TYPE_NONE>,
+					<0x2 0x42 0x2 IRQ_TYPE_NONE>,
+					<0x2 0x42 0x3 IRQ_TYPE_NONE>,
+					<0x2 0x42 0x4 IRQ_TYPE_NONE>;
+			interrupt-names = "bcl-high-ibat",
+						"bcl-very-high-ibat",
+						"bcl-low-vbat",
+						"bcl-very-low-vbat",
+						"bcl-crit-low-vbat";
+			#thermal-sensor-cells = <1>;
+		};
+
 		pmi8998_rradc: rradc@4500 {
 			compatible = "qcom,rradc";
 			reg = <0x4500 0x100>;
@@ -651,3 +670,130 @@
 		};
 	};
 };
+
+&thermal_zones {
+	ibat-high {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "step_wise";
+		thermal-sensors = <&bcl_sensor 0>;
+
+		trips {
+			ibat_high: low-ibat {
+				temperature = <4200>;
+				hysteresis = <200>;
+				type = "passive";
+			};
+		};
+	};
+	ibat-vhigh {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "step_wise";
+		thermal-sensors = <&bcl_sensor 1>;
+
+		trips {
+			ibat_vhigh: ibat_vhigh {
+				temperature = <4300>;
+				hysteresis = <100>;
+				type = "passive";
+			};
+		};
+	};
+	vbat {
+		polling-delay-passive = <100>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_cap";
+		thermal-sensors = <&bcl_sensor 2>;
+		tracks-low;
+
+		trips {
+			low_vbat: low-vbat {
+				temperature = <3300>;
+				hysteresis = <100>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			vbat_cpu4 {
+				trip = <&low_vbat>;
+				cooling-device = <&CPU4 22 22>;
+			};
+			vbat_cpu5 {
+				trip = <&low_vbat>;
+				cooling-device = <&CPU5 22 22>;
+			};
+			vbat_map6 {
+				trip = <&low_vbat>;
+				cooling-device = <&CPU6 22 22>;
+			};
+			vbat_map7 {
+				trip = <&low_vbat>;
+				cooling-device = <&CPU7 22 22>;
+			};
+		};
+	};
+	vbat_low {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_cap";
+		thermal-sensors = <&bcl_sensor 3>;
+		tracks-low;
+
+		trips {
+			low-vbat {
+				temperature = <3100>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+	};
+	vbat_too_low {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_cap";
+		thermal-sensors = <&bcl_sensor 4>;
+		tracks-low;
+
+		trips {
+			low-vbat {
+				temperature = <2900>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+	};
+	soc {
+		polling-delay-passive = <100>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_cap";
+		thermal-sensors = <&bcl_sensor 5>;
+		tracks-low;
+
+		trips {
+			low_soc: low-soc {
+				temperature = <10>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			soc_cpu4 {
+				trip = <&low_soc>;
+				cooling-device = <&CPU4 22 22>;
+			};
+			soc_cpu5 {
+				trip = <&low_soc>;
+				cooling-device = <&CPU5 22 22>;
+			};
+			soc_map6 {
+				trip = <&low_soc>;
+				cooling-device = <&CPU6 22 22>;
+			};
+			soc_map7 {
+				trip = <&low_soc>;
+				cooling-device = <&CPU7 22 22>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
index fce1687..c0189a4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
@@ -14,7 +14,7 @@
 &soc {
 	led_flash_rear: qcom,camera-flash@0 {
 		cell-index = <0>;
-		reg = <0x0>;
+		reg = <0x00 0x00>;
 		compatible = "qcom,camera-flash";
 		qcom,flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
 		qcom,torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
@@ -24,7 +24,7 @@
 
 	led_flash_front: qcom,camera-flash@1 {
 		cell-index = <1>;
-		reg = <0x1>;
+		reg = <0x01 0x00>;
 		compatible = "qcom,camera-flash";
 		qcom,flash-source = <&pmi8998_flash2>;
 		qcom,torch-source = <&pmi8998_torch2>;
@@ -34,7 +34,7 @@
 
 	actuator_regulator: gpio-regulator@0 {
 		compatible = "regulator-fixed";
-		reg = <0x0>;
+		reg = <0x00 0x00>;
 		regulator-name = "actuator_regulator";
 		regulator-min-microvolt = <2800000>;
 		regulator-max-microvolt = <2800000>;
@@ -46,7 +46,7 @@
 
 	camera_rear_ldo: gpio-regulator@1 {
 		compatible = "regulator-fixed";
-		reg = <0x1>;
+		reg = <0x01 0x00>;
 		regulator-name = "camera_rear_ldo";
 		regulator-min-microvolt = <1050000>;
 		regulator-max-microvolt = <1050000>;
@@ -60,7 +60,7 @@
 
 	camera_ldo: gpio-regulator@2 {
 		compatible = "regulator-fixed";
-		reg = <0x2>;
+		reg = <0x02 0x00>;
 		regulator-name = "camera_ldo";
 		regulator-min-microvolt = <1050000>;
 		regulator-max-microvolt = <1050000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
index fce1687..c0189a4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
@@ -14,7 +14,7 @@
 &soc {
 	led_flash_rear: qcom,camera-flash@0 {
 		cell-index = <0>;
-		reg = <0x0>;
+		reg = <0x00 0x00>;
 		compatible = "qcom,camera-flash";
 		qcom,flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
 		qcom,torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
@@ -24,7 +24,7 @@
 
 	led_flash_front: qcom,camera-flash@1 {
 		cell-index = <1>;
-		reg = <0x1>;
+		reg = <0x01 0x00>;
 		compatible = "qcom,camera-flash";
 		qcom,flash-source = <&pmi8998_flash2>;
 		qcom,torch-source = <&pmi8998_torch2>;
@@ -34,7 +34,7 @@
 
 	actuator_regulator: gpio-regulator@0 {
 		compatible = "regulator-fixed";
-		reg = <0x0>;
+		reg = <0x00 0x00>;
 		regulator-name = "actuator_regulator";
 		regulator-min-microvolt = <2800000>;
 		regulator-max-microvolt = <2800000>;
@@ -46,7 +46,7 @@
 
 	camera_rear_ldo: gpio-regulator@1 {
 		compatible = "regulator-fixed";
-		reg = <0x1>;
+		reg = <0x01 0x00>;
 		regulator-name = "camera_rear_ldo";
 		regulator-min-microvolt = <1050000>;
 		regulator-max-microvolt = <1050000>;
@@ -60,7 +60,7 @@
 
 	camera_ldo: gpio-regulator@2 {
 		compatible = "regulator-fixed";
-		reg = <0x2>;
+		reg = <0x02 0x00>;
 		regulator-name = "camera_ldo";
 		regulator-min-microvolt = <1050000>;
 		regulator-max-microvolt = <1050000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index e98a973..6f35bf7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -267,6 +267,7 @@
 		qcom,scale-function = <4>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
+		qcom,vadc-thermal-node;
 	};
 
 	chan@4d {
@@ -278,6 +279,7 @@
 		qcom,scale-function = <2>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
+		qcom,vadc-thermal-node;
 	};
 
 	chan@4f {
@@ -289,6 +291,7 @@
 		qcom,scale-function = <2>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
+		qcom,vadc-thermal-node;
 	};
 
 	chan@51 {
@@ -300,6 +303,7 @@
 		qcom,scale-function = <2>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
+		qcom,vadc-thermal-node;
 	};
 };
 
@@ -313,48 +317,30 @@
 		qcom,hw-settle-time = <0>;
 		qcom,btm-channel-number = <0x60>;
 	};
+};
 
-	chan@4d {
-		label = "msm_therm";
-		reg = <0x4d>;
-		qcom,pre-div-channel-scaling = <0>;
-		qcom,calibration-type = "ratiometric";
-		qcom,scale-function = <2>;
-		qcom,hw-settle-time = <2>;
-		qcom,btm-channel-number = <0x68>;
-		qcom,thermal-node;
+&thermal_zones {
+	xo-therm-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm8998_vadc 0x4c>;
 	};
 
-	chan@51 {
-		label = "quiet_therm";
-		reg = <0x51>;
-		qcom,pre-div-channel-scaling = <0>;
-		qcom,calibration-type = "ratiometric";
-		qcom,scale-function = <2>;
-		qcom,hw-settle-time = <2>;
-		qcom,btm-channel-number = <0x70>;
-		qcom,thermal-node;
+	msm-therm-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm8998_vadc 0x4d>;
 	};
 
-	chan@4c {
-		label = "xo_therm";
-		reg = <0x4c>;
-		qcom,pre-div-channel-scaling = <0>;
-		qcom,calibration-type = "ratiometric";
-		qcom,scale-function = <4>;
-		qcom,hw-settle-time = <2>;
-		qcom,btm-channel-number = <0x78>;
-		qcom,thermal-node;
+	pa-therm1-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm8998_vadc 0x4f>;
 	};
 
-	chan@4f {
-		label = "pa_therm1";
-		reg = <0x4f>;
-		qcom,pre-div-channel-scaling = <0>;
-		qcom,calibration-type = "ratiometric";
-		qcom,scale-function = <2>;
-		qcom,hw-settle-time = <2>;
-		qcom,btm-channel-number = <0x80>;
-		qcom,thermal-node;
+	quiet-therm-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm8998_vadc 0x51>;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index 3f2317a..bfbaabb 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -74,6 +74,7 @@
 		qcom,gpu-qdss-stm = <0x161c0000 0x40000>; // base addr, size
 
 		qcom,tsens-name = "tsens_tz_sensor12";
+		#cooling-cells = <2>;
 
 		clocks = <&clock_gfx GPU_CC_GX_GFX3D_CLK>,
 			<&clock_gcc GCC_GPU_CFG_AHB_CLK>,
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 6a4f19a..faa0b5e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -245,6 +245,7 @@
 		qcom,scale-function = <4>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
+		qcom,vadc-thermal-node;
 	};
 
 	chan@4d {
@@ -256,6 +257,7 @@
 		qcom,scale-function = <2>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
+		qcom,vadc-thermal-node;
 	};
 
 	chan@4f {
@@ -267,6 +269,7 @@
 		qcom,scale-function = <2>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
+		qcom,vadc-thermal-node;
 	};
 
 	chan@51 {
@@ -278,6 +281,7 @@
 		qcom,scale-function = <2>;
 		qcom,hw-settle-time = <2>;
 		qcom,fast-avg-setup = <0>;
+		qcom,vadc-thermal-node;
 	};
 };
 
@@ -291,48 +295,30 @@
 		qcom,hw-settle-time = <0>;
 		qcom,btm-channel-number = <0x60>;
 	};
+};
 
-	chan@4d {
-		label = "msm_therm";
-		reg = <0x4d>;
-		qcom,pre-div-channel-scaling = <0>;
-		qcom,calibration-type = "ratiometric";
-		qcom,scale-function = <2>;
-		qcom,hw-settle-time = <2>;
-		qcom,btm-channel-number = <0x68>;
-		qcom,thermal-node;
+&thermal_zones {
+	xo-therm-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm8998_vadc 0x4c>;
 	};
 
-	chan@51 {
-		label = "quiet_therm";
-		reg = <0x51>;
-		qcom,pre-div-channel-scaling = <0>;
-		qcom,calibration-type = "ratiometric";
-		qcom,scale-function = <2>;
-		qcom,hw-settle-time = <2>;
-		qcom,btm-channel-number = <0x70>;
-		qcom,thermal-node;
+	msm-therm-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm8998_vadc 0x4d>;
 	};
 
-	chan@4c {
-		label = "xo_therm";
-		reg = <0x4c>;
-		qcom,pre-div-channel-scaling = <0>;
-		qcom,calibration-type = "ratiometric";
-		qcom,scale-function = <4>;
-		qcom,hw-settle-time = <2>;
-		qcom,btm-channel-number = <0x78>;
-		qcom,thermal-node;
+	pa-therm1-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm8998_vadc 0x4f>;
 	};
 
-	chan@4f {
-		label = "pa_therm1";
-		reg = <0x4f>;
-		qcom,pre-div-channel-scaling = <0>;
-		qcom,calibration-type = "ratiometric";
-		qcom,scale-function = <2>;
-		qcom,hw-settle-time = <2>;
-		qcom,btm-channel-number = <0x80>;
-		qcom,thermal-node;
+	quiet-therm-adc {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm8998_vadc 0x51>;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
index 1c31a7a..dd0d08e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
@@ -10,6 +10,8 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/msm/msm-bus-ids.h>
+
 &soc {
 	/* QUPv3 South instances */
 
@@ -30,6 +32,7 @@
 		pinctrl-1 = <&qupv3_se6_4uart_sleep>;
 		interrupts = <GIC_SPI 607 0>;
 		status = "disabled";
+		qcom,bus-mas = <MSM_BUS_MASTER_BLSP_1>;
 	};
 
 	qupv3_se7_4uart: qcom,qup_uart@0x89c000 {
@@ -45,6 +48,7 @@
 		pinctrl-1 = <&qupv3_se7_4uart_sleep>;
 		interrupts = <GIC_SPI 608 0>;
 		status = "disabled";
+		qcom,bus-mas = <MSM_BUS_MASTER_BLSP_1>;
 	};
 
 	/* I2C */
@@ -336,6 +340,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se9_2uart_active>;
 		pinctrl-1 = <&qupv3_se9_2uart_sleep>;
+		qcom,bus-mas = <MSM_BUS_MASTER_BLSP_2>;
 		interrupts = <GIC_SPI 354 0>;
 		status = "disabled";
 	};
@@ -353,6 +358,7 @@
 		pinctrl-0 = <&qupv3_se10_2uart_active>;
 		pinctrl-1 = <&qupv3_se10_2uart_sleep>;
 		interrupts = <GIC_SPI 355 0>;
+		qcom,bus-mas = <MSM_BUS_MASTER_BLSP_2>;
 		status = "disabled";
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 3e00577..2f45c41 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -23,15 +23,12 @@
 			<&clock_gcc GCC_DISP_AXI_CLK>,
 			<&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
 			<&clock_dispcc DISP_CC_MDSS_AXI_CLK>,
-			<&clock_dispcc DISP_CC_MDSS_MDP_CLK_SRC>,
-			<&clock_dispcc DISP_CC_MDSS_VSYNC_CLK_SRC>,
 			<&clock_dispcc DISP_CC_MDSS_MDP_CLK>,
 			<&clock_dispcc DISP_CC_MDSS_VSYNC_CLK>;
-		clock-names = "gcc_iface", "gcc_bus",
-			"iface_clk", "bus_clk", "core_clk_src",
-			"vsync_clk_src", "core_clk", "vsync_clk";
-		clock-rate = <0 0 0 0 300000000 0 300000000 0 0>;
-		clock-max-rate = <0 0 0 0 430000000 0 430000000 0 0>;
+		clock-names = "gcc_iface", "gcc_bus", "iface_clk",
+				"bus_clk", "core_clk", "vsync_clk";
+		clock-rate = <0 0 0 0 300000000 19200000 0>;
+		clock-max-rate = <0 0 0 0 430000000 19200000 0>;
 
 		sde-vdd-supply = <&mdss_core_gdsc>;
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
index d6ef67d..b9dc816 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
@@ -49,6 +49,8 @@
 		qcom,clock-configs = <0x1 0x0 0x0 0x1 0x0 0x1 0x0>;
 		qcom,allowed-clock-rates = <200000000 320000000 380000000
 			444000000 533000000>;
+		qcom,max-hq-mbs-per-frame = <8160>;
+		qcom,max-hq-frames-per-sec = <60>;
 		qcom,clock-freq-tbl {
 			qcom,profile-enc {
 				qcom,codec-mask = <0x55555555>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 24eeeec..f6297aa 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -22,6 +22,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/soc/qcom,tcs-mbox.h>
 #include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/thermal/thermal.h>
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM845";
@@ -47,6 +48,8 @@
 			efficiency = <1024>;
 			cache-size = <0x8000>;
 			cpu-release-addr = <0x0 0x90000000>;
+			qcom,lmh-dcvs = <&lmh_dcvs0>;
+			#cooling-cells = <2>;
 			next-level-cache = <&L2_0>;
 			L2_0: l2-cache {
 			      compatible = "arm,arch-cache";
@@ -78,6 +81,8 @@
 			efficiency = <1024>;
 			cache-size = <0x8000>;
 			cpu-release-addr = <0x0 0x90000000>;
+			qcom,lmh-dcvs = <&lmh_dcvs0>;
+			#cooling-cells = <2>;
 			next-level-cache = <&L2_100>;
 			L2_100: l2-cache {
 			      compatible = "arm,arch-cache";
@@ -103,6 +108,8 @@
 			efficiency = <1024>;
 			cache-size = <0x8000>;
 			cpu-release-addr = <0x0 0x90000000>;
+			qcom,lmh-dcvs = <&lmh_dcvs0>;
+			#cooling-cells = <2>;
 			next-level-cache = <&L2_200>;
 			L2_200: l2-cache {
 			      compatible = "arm,arch-cache";
@@ -128,6 +135,8 @@
 			efficiency = <1024>;
 			cache-size = <0x8000>;
 			cpu-release-addr = <0x0 0x90000000>;
+			qcom,lmh-dcvs = <&lmh_dcvs0>;
+			#cooling-cells = <2>;
 			next-level-cache = <&L2_300>;
 			L2_300: l2-cache {
 			      compatible = "arm,arch-cache";
@@ -153,6 +162,8 @@
 			efficiency = <1740>;
 			cache-size = <0x20000>;
 			cpu-release-addr = <0x0 0x90000000>;
+			qcom,lmh-dcvs = <&lmh_dcvs1>;
+			#cooling-cells = <2>;
 			next-level-cache = <&L2_400>;
 			L2_400: l2-cache {
 			      compatible = "arm,arch-cache";
@@ -178,6 +189,8 @@
 			efficiency = <1740>;
 			cache-size = <0x20000>;
 			cpu-release-addr = <0x0 0x90000000>;
+			qcom,lmh-dcvs = <&lmh_dcvs1>;
+			#cooling-cells = <2>;
 			next-level-cache = <&L2_500>;
 			L2_500: l2-cache {
 			      compatible = "arm,arch-cache";
@@ -203,6 +216,8 @@
 			efficiency = <1740>;
 			cache-size = <0x20000>;
 			cpu-release-addr = <0x0 0x90000000>;
+			qcom,lmh-dcvs = <&lmh_dcvs1>;
+			#cooling-cells = <2>;
 			next-level-cache = <&L2_600>;
 			L2_600: l2-cache {
 			      compatible = "arm,arch-cache";
@@ -228,6 +243,8 @@
 			efficiency = <1740>;
 			cache-size = <0x20000>;
 			cpu-release-addr = <0x0 0x90000000>;
+			qcom,lmh-dcvs = <&lmh_dcvs1>;
+			#cooling-cells = <2>;
 			next-level-cache = <&L2_700>;
 			L2_700: l2-cache {
 			      compatible = "arm,arch-cache";
@@ -2052,6 +2069,7 @@
 		qcom,rmnet-ipa-ssr;
 		qcom,ipa-loaduC;
 		qcom,ipa-advertise-sg-support;
+		qcom,ipa-napi-enable;
 	};
 
 	ipa_hw: qcom,ipa@01e00000 {
@@ -2070,6 +2088,8 @@
 		qcom,modem-cfg-emb-pipe-flt;
 		qcom,ipa-wdi2;
 		qcom,use-64-bit-dma-mask;
+		qcom,arm-smmu;
+		qcom,smmu-s1-bypass;
 		qcom,bandwidth-vote-for-ipa;
 		qcom,msm-bus,name = "ipa";
 		qcom,msm-bus,num-cases = <4>;
@@ -2149,26 +2169,28 @@
 			0x0	/* modem_comp_decomp_ofst; diff */
 			0x0	/* modem_comp_decomp_size; diff */
 			0xbd8	/* modem_ofst; */
-			0x1424	/* modem_size; */
-			0x1ffc	/* apps_v4_flt_hash_ofst; */
+			0x1024	/* modem_size; */
+			0x2000	/* apps_v4_flt_hash_ofst; */
 			0x0	/* apps_v4_flt_hash_size; */
-			0x1ffc	/* apps_v4_flt_nhash_ofst; */
+			0x2000	/* apps_v4_flt_nhash_ofst; */
 			0x0	/* apps_v4_flt_nhash_size; */
-			0x1ffc	/* apps_v6_flt_hash_ofst; */
+			0x2000	/* apps_v6_flt_hash_ofst; */
 			0x0	/* apps_v6_flt_hash_size; */
-			0x1ffc	/* apps_v6_flt_nhash_ofst; */
+			0x2000	/* apps_v6_flt_nhash_ofst; */
 			0x0	/* apps_v6_flt_nhash_size; */
 			0x80	/* uc_info_ofst; */
 			0x200	/* uc_info_size; */
 			0x2000	/* end_ofst; */
-			0x1ffc	/* apps_v4_rt_hash_ofst; */
+			0x2000	/* apps_v4_rt_hash_ofst; */
 			0x0	/* apps_v4_rt_hash_size; */
-			0x1ffc	/* apps_v4_rt_nhash_ofst; */
+			0x2000	/* apps_v4_rt_nhash_ofst; */
 			0x0	/* apps_v4_rt_nhash_size; */
-			0x1ffc	/* apps_v6_rt_hash_ofst; */
+			0x2000	/* apps_v6_rt_hash_ofst; */
 			0x0	/* apps_v6_rt_hash_size; */
-			0x1ffc	/* apps_v6_rt_nhash_ofst; */
+			0x2000	/* apps_v6_rt_nhash_ofst; */
 			0x0	/* apps_v6_rt_nhash_size; */
+			0x1c00	/* uc_event_ring_ofst; */
+			0x400	/* uc_event_ring_size; */
 		>;
 
 		/* smp2p gpio information */
@@ -2181,6 +2203,23 @@
 			compatible = "qcom,smp2pgpio-map-ipa-1-in";
 			gpios = <&smp2pgpio_ipa_1_in 0 0>;
 		};
+
+		ipa_smmu_ap: ipa_smmu_ap {
+			compatible = "qcom,ipa-smmu-ap-cb";
+			iommus = <&apps_smmu 0x720>;
+			qcom,iova-mapping = <0x20000000 0x40000000>;
+		};
+
+		ipa_smmu_wlan: ipa_smmu_wlan {
+			compatible = "qcom,ipa-smmu-wlan-cb";
+			iommus = <&apps_smmu 0x721>;
+		};
+
+		ipa_smmu_uc: ipa_smmu_uc {
+			compatible = "qcom,ipa-smmu-uc-cb";
+			iommus = <&apps_smmu 0x722>;
+			qcom,iova-mapping = <0x40000000 0x20000000>;
+		};
 	};
 
 	qcom,ipa_fws {
@@ -2266,7 +2305,7 @@
 		aoss0-ts0-h {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			thermal-sensors = <&tsens0 0>;
 			trips {
 				active-config0 {
@@ -2280,7 +2319,7 @@
 		cpu0-silver-ts0-h {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			thermal-sensors = <&tsens0 1>;
 			trips {
 				active-config0 {
@@ -2294,7 +2333,7 @@
 		cpu1-silver-ts0-h {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			thermal-sensors = <&tsens0 2>;
 			trips {
 				active-config0 {
@@ -2308,7 +2347,7 @@
 		cpu2-silver-ts0-h {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			thermal-sensors = <&tsens0 3>;
 			trips {
 				active-config0 {
@@ -2323,7 +2362,7 @@
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens0 4>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			trips {
 				active-config0 {
 					temperature = <65000>;
@@ -2337,7 +2376,7 @@
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens0 5>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			trips {
 				active-config0 {
 					temperature = <65000>;
@@ -2351,7 +2390,7 @@
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens0 6>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			trips {
 				active-config0 {
 					temperature = <65000>;
@@ -2365,7 +2404,7 @@
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens0 7>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			trips {
 				active-config0 {
 					temperature = <65000>;
@@ -2379,7 +2418,7 @@
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens0 8>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			trips {
 				active-config0 {
 					temperature = <65000>;
@@ -2393,7 +2432,7 @@
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens0 9>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			trips {
 				active-config0 {
 					temperature = <65000>;
@@ -2407,7 +2446,7 @@
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens0 10>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			trips {
 				active-config0 {
 					temperature = <65000>;
@@ -2421,7 +2460,7 @@
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens0 11>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			trips {
 				active-config0 {
 					temperature = <65000>;
@@ -2434,7 +2473,7 @@
 		gpu1-ts0-h {
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			thermal-sensors = <&tsens0 12>;
 			trips {
 				active-config0 {
@@ -2449,7 +2488,7 @@
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens1 0>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			trips {
 				active-config0 {
 					temperature = <65000>;
@@ -2463,7 +2502,7 @@
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens1 1>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			trips {
 				active-config0 {
 					temperature = <65000>;
@@ -2479,7 +2518,7 @@
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens1 2>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			trips {
 				active-config0 {
 					temperature = <65000>;
@@ -2493,7 +2532,7 @@
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens1 3>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			trips {
 				active-config0 {
 					temperature = <65000>;
@@ -2507,7 +2546,7 @@
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens1 4>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			trips {
 				active-config0 {
 					temperature = <65000>;
@@ -2521,7 +2560,7 @@
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens1 5>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			trips {
 				active-config0 {
 					temperature = <65000>;
@@ -2535,7 +2574,7 @@
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens1 6>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			trips {
 				active-config0 {
 					temperature = <65000>;
@@ -2549,7 +2588,7 @@
 			polling-delay-passive = <0>;
 			polling-delay = <0>;
 			thermal-sensors = <&tsens1 7>;
-			thermal-governor = "step_wise";
+			thermal-governor = "user_space";
 			trips {
 				active-config0 {
 					temperature = <65000>;
@@ -2558,6 +2597,709 @@
 				};
 			};
 		};
+
+		gpu0 {
+			polling-delay-passive = <10>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 11>;
+			thermal-governor = "step_wise";
+			trips {
+				gpu0_trip: gpu0-trip {
+					temperature = <95000>;
+					hysteresis = <0>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				gpu0_cdev {
+					trip = <&gpu0_trip>;
+					cooling-device =
+						<&msm_gpu 1 THERMAL_NO_LIMIT>;
+				};
+			};
+		};
+
+		gpu1 {
+			polling-delay-passive = <10>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens0 12>;
+			thermal-governor = "step_wise";
+			trips {
+				gpu1_trip: gpu1-trip {
+					temperature = <95000>;
+					hysteresis = <0>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				gpu1_cdev {
+					trip = <&gpu1_trip>;
+					cooling-device =
+						<&msm_gpu 1 THERMAL_NO_LIMIT>;
+				};
+			};
+		};
+
+		pop-mem {
+			polling-delay-passive = <10>;
+			polling-delay = <0>;
+			thermal-sensors = <&tsens1 2>;
+			thermal-governor = "step_wise";
+			trips {
+				pop_trip: pop-trip {
+					temperature = <95000>;
+					hysteresis = <0>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				pop_cdev {
+					trip = <&pop_trip>;
+					cooling-device =
+						<&CPU4 1 THERMAL_NO_LIMIT>;
+				};
+			};
+		};
+
+		aoss0-ts0-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens0 0>;
+			tracks-low;
+			trips {
+				aoss0_trip: aoss0-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&aoss0_trip>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&aoss0_trip>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&aoss0_trip>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		cpu0-silver-ts0-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens0 1>;
+			tracks-low;
+			trips {
+				cpu0_trip: cpu0-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&cpu0_trip>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&cpu0_trip>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&cpu0_trip>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		cpu1-silver-ts0-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens0 2>;
+			tracks-low;
+			trips {
+				cpu1_trip: cpu1-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&cpu1_trip>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&cpu1_trip>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&cpu1_trip>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		cpu2-silver-ts0-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens0 3>;
+			tracks-low;
+			trips {
+				cpu2_trip: cpu2-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&cpu2_trip>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&cpu2_trip>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&cpu2_trip>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		cpu3-silver-ts0-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens0 4>;
+			tracks-low;
+			trips {
+				cpu3_trip: cpu3-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&cpu3_trip>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&cpu3_trip>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&cpu3_trip>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		kryo-l3-0-ts0-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens0 5>;
+			tracks-low;
+			trips {
+				l3_0_trip: l3-0-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&l3_0_trip>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&l3_0_trip>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&l3_0_trip>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		kryo-l3-1-ts0-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens0 6>;
+			tracks-low;
+			trips {
+				l3_1_trip: l3-1-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&l3_1_trip>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&l3_1_trip>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&l3_1_trip>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		cpu0-gold-ts0-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens0 7>;
+			tracks-low;
+			trips {
+				cpug0_trip: cpug0-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&cpug0_trip>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&cpug0_trip>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&cpug0_trip>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		cpu1-gold-ts0-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens0 8>;
+			tracks-low;
+			trips {
+				cpug1_trip: cpug1-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&cpug1_trip>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&cpug1_trip>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&cpug1_trip>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		cpu2-gold-ts0-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens0 9>;
+			tracks-low;
+			trips {
+				cpug2_trip: cpug2-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&cpug2_trip>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&cpug2_trip>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&cpug2_trip>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		cpu3-gold-ts0-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens0 9>;
+			tracks-low;
+			trips {
+				cpug3_trip: cpug3-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&cpug3_trip>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&cpug3_trip>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&cpug3_trip>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		gpu0-ts0-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens0 10>;
+			tracks-low;
+			trips {
+				gpu0_trip_l: gpu0-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&gpu0_trip_l>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&gpu0_trip_l>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&gpu0_trip_l>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		gpu1-ts0-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens0 11>;
+			tracks-low;
+			trips {
+				gpu1_trip_l: gpu1-trip_l {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&gpu1_trip_l>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&gpu1_trip_l>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&gpu1_trip_l>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		aoss1-ts1-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens1 0>;
+			tracks-low;
+			trips {
+				aoss1_trip: aoss1-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&aoss1_trip>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&aoss1_trip>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&aoss1_trip>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		mdm-dsp-ts1-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens1 1>;
+			tracks-low;
+			trips {
+				dsp_trip: dsp-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&dsp_trip>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&dsp_trip>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&dsp_trip>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		ddr-ts1-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens1 2>;
+			tracks-low;
+			trips {
+				ddr_trip: ddr-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&ddr_trip>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&ddr_trip>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&ddr_trip>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		wlan-ts1-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens1 3>;
+			tracks-low;
+			trips {
+				wlan_trip: wlan-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&wlan_trip>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&wlan_trip>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&wlan_trip>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		compute-hvx-ts1-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens1 4>;
+			tracks-low;
+			trips {
+				hvx_trip: hvx-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&hvx_trip>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&hvx_trip>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&hvx_trip>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		camera-ts1-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens1 5>;
+			tracks-low;
+			trips {
+				camera_trip: camera-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&camera_trip>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&camera_trip>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&camera_trip>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		mmss-ts1-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens1 6>;
+			tracks-low;
+			trips {
+				mmss_trip: mmss-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&mmss_trip>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&mmss_trip>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&mmss_trip>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		mdm-core-ts1-l {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "low_limits_floor";
+			thermal-sensors = <&tsens1 7>;
+			tracks-low;
+			trips {
+				mdm_trip: mdm-trip {
+					temperature = <5000>;
+					hysteresis = <5000>;
+					type = "passive";
+				};
+			};
+			cooling-maps {
+				cpu0_vdd_cdev {
+					trip = <&mdm_trip>;
+					cooling-device = <&CPU0 12 12>;
+				};
+				cpu4_vdd_cdev {
+					trip = <&mdm_trip>;
+					cooling-device = <&CPU4 12 12>;
+				};
+				gpu_vdd_cdev {
+					trip = <&mdm_trip>;
+					cooling-device = <&msm_gpu 4 4>;
+				};
+			};
+		};
+
+		lmh-dcvs-01 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "user_space";
+			thermal-sensors = <&lmh_dcvs1>;
+
+			trips {
+				active-config {
+					temperature = <95000>;
+					hysteresis = <30000>;
+					type = "passive";
+				};
+			};
+		};
+
+		lmh-dcvs-00 {
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			thermal-governor = "user_space";
+			thermal-sensors = <&lmh_dcvs0>;
+
+			trips {
+				active-config {
+					temperature = <95000>;
+					hysteresis = <30000>;
+					type = "passive";
+				};
+			};
+		};
+
 	};
 
 	tsens0: tsens@c222000 {
@@ -2583,6 +3325,22 @@
 	};
 };
 
+&clock_cpucc {
+	lmh_dcvs0: qcom,limits-dcvs@0 {
+		compatible = "qcom,msm-hw-limits";
+		interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,affinity = <0>;
+		#thermal-sensor-cells = <0>;
+	};
+
+	lmh_dcvs1: qcom,limits-dcvs@1 {
+		compatible = "qcom,msm-hw-limits";
+		interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,affinity = <1>;
+		#thermal-sensor-cells = <0>;
+	};
+};
+
 &pcie_0_gdsc {
 	status = "ok";
 };
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 852c01f..658c871 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -301,9 +301,17 @@
 CONFIG_QPNP_QNOVO=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_WCD934X_CODEC=y
@@ -422,6 +430,7 @@
 CONFIG_MSM_CLK_RPMH=y
 CONFIG_CLOCK_CPU_OSM=y
 CONFIG_MSM_GPUCC_SDM845=y
+CONFIG_MSM_CLK_AOP_QMP=y
 CONFIG_QCOM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MSM_QMP=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index a78c60d..7507c3b 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -311,9 +311,17 @@
 CONFIG_QPNP_QNOVO=y
 CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
 CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_WCD934X_CODEC=y
@@ -439,6 +447,7 @@
 CONFIG_MSM_CLK_RPMH=y
 CONFIG_CLOCK_CPU_OSM=y
 CONFIG_MSM_GPUCC_SDM845=y
+CONFIG_MSM_CLK_AOP_QMP=y
 CONFIG_QCOM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MSM_QMP=y
@@ -524,10 +533,12 @@
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
 CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_PAGEALLOC=y
 CONFIG_SLUB_DEBUG_PANIC_ON=y
 CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_PAGE_POISONING=y
 CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_FREE=y
 CONFIG_DEBUG_OBJECTS_TIMERS=y
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 3b40f26..aaf4bd7 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -394,7 +394,7 @@
 {
 	struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL1];
 
-	if (!sge) {
+	if (sched_is_energy_aware() && !sge) {
 		pr_warn("Invalid sched_group_energy for Cluster%d\n", cpu);
 		return NULL;
 	}
@@ -407,7 +407,7 @@
 {
 	struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL0];
 
-	if (!sge) {
+	if (sched_is_energy_aware() && !sge) {
 		pr_warn("Invalid sched_group_energy for CPU%d\n", cpu);
 		return NULL;
 	}
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 0ddf369..8ac0e59 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -269,11 +269,6 @@
 DEFINE_HWx_IRQDISPATCH(5)
 #endif
 
-static void ltq_hw_irq_handler(struct irq_desc *desc)
-{
-	ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
-}
-
 #ifdef CONFIG_MIPS_MT_SMP
 void __init arch_init_ipiirq(int irq, struct irqaction *action)
 {
@@ -318,19 +313,23 @@
 asmlinkage void plat_irq_dispatch(void)
 {
 	unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
-	int irq;
+	unsigned int i;
 
-	if (!pending) {
-		spurious_interrupt();
-		return;
+	if ((MIPS_CPU_TIMER_IRQ == 7) && (pending & CAUSEF_IP7)) {
+		do_IRQ(MIPS_CPU_TIMER_IRQ);
+		goto out;
+	} else {
+		for (i = 0; i < MAX_IM; i++) {
+			if (pending & (CAUSEF_IP2 << i)) {
+				ltq_hw_irqdispatch(i);
+				goto out;
+			}
+		}
 	}
+	pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status());
 
-	pending >>= CAUSEB_IP;
-	while (pending) {
-		irq = fls(pending) - 1;
-		do_IRQ(MIPS_CPU_IRQ_BASE + irq);
-		pending &= ~BIT(irq);
-	}
+out:
+	return;
 }
 
 static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
@@ -355,6 +354,11 @@
 	.map = icu_map,
 };
 
+static struct irqaction cascade = {
+	.handler = no_action,
+	.name = "cascade",
+};
+
 int __init icu_of_init(struct device_node *node, struct device_node *parent)
 {
 	struct device_node *eiu_node;
@@ -386,7 +390,7 @@
 	mips_cpu_irq_init();
 
 	for (i = 0; i < MAX_IM; i++)
-		irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
+		setup_irq(i + 2, &cascade);
 
 	if (cpu_has_vint) {
 		pr_info("Setting up vectored interrupts\n");
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 7fcf512..0497cec 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -42,10 +42,10 @@
 #define get_user __get_user
 
 #if !defined(CONFIG_64BIT)
-#define LDD_USER(ptr)		__get_user_asm64(ptr)
+#define LDD_USER(val, ptr)	__get_user_asm64(val, ptr)
 #define STD_USER(x, ptr)	__put_user_asm64(x, ptr)
 #else
-#define LDD_USER(ptr)		__get_user_asm("ldd", ptr)
+#define LDD_USER(val, ptr)	__get_user_asm(val, "ldd", ptr)
 #define STD_USER(x, ptr)	__put_user_asm("std", x, ptr)
 #endif
 
@@ -100,63 +100,87 @@
 		" mtsp %0,%%sr2\n\t"		\
 		: : "r"(get_fs()) : )
 
-#define __get_user(x, ptr)                               \
-({                                                       \
-	register long __gu_err __asm__ ("r8") = 0;       \
-	register long __gu_val;				 \
-							 \
-	load_sr2();					 \
-	switch (sizeof(*(ptr))) {			 \
-	    case 1: __get_user_asm("ldb", ptr); break;   \
-	    case 2: __get_user_asm("ldh", ptr); break;   \
-	    case 4: __get_user_asm("ldw", ptr); break;   \
-	    case 8: LDD_USER(ptr);  break;		 \
-	    default: BUILD_BUG(); break;		 \
-	}                                                \
-							 \
-	(x) = (__force __typeof__(*(ptr))) __gu_val;	 \
-	__gu_err;                                        \
+#define __get_user_internal(val, ptr)			\
+({							\
+	register long __gu_err __asm__ ("r8") = 0;	\
+							\
+	switch (sizeof(*(ptr))) {			\
+	case 1: __get_user_asm(val, "ldb", ptr); break;	\
+	case 2: __get_user_asm(val, "ldh", ptr); break; \
+	case 4: __get_user_asm(val, "ldw", ptr); break; \
+	case 8: LDD_USER(val, ptr); break;		\
+	default: BUILD_BUG();				\
+	}						\
+							\
+	__gu_err;					\
 })
 
-#define __get_user_asm(ldx, ptr)                        \
+#define __get_user(val, ptr)				\
+({							\
+	load_sr2();					\
+	__get_user_internal(val, ptr);			\
+})
+
+#define __get_user_asm(val, ldx, ptr)			\
+{							\
+	register long __gu_val;				\
+							\
 	__asm__("1: " ldx " 0(%%sr2,%2),%0\n"		\
 		"9:\n"					\
 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	\
 		: "=r"(__gu_val), "=r"(__gu_err)        \
-		: "r"(ptr), "1"(__gu_err));
+		: "r"(ptr), "1"(__gu_err));		\
+							\
+	(val) = (__force __typeof__(*(ptr))) __gu_val;	\
+}
 
 #if !defined(CONFIG_64BIT)
 
-#define __get_user_asm64(ptr) 				\
+#define __get_user_asm64(val, ptr)			\
+{							\
+	union {						\
+		unsigned long long	l;		\
+		__typeof__(*(ptr))	t;		\
+	} __gu_tmp;					\
+							\
 	__asm__("   copy %%r0,%R0\n"			\
 		"1: ldw 0(%%sr2,%2),%0\n"		\
 		"2: ldw 4(%%sr2,%2),%R0\n"		\
 		"9:\n"					\
 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	\
 		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)	\
-		: "=r"(__gu_val), "=r"(__gu_err)	\
-		: "r"(ptr), "1"(__gu_err));
+		: "=&r"(__gu_tmp.l), "=r"(__gu_err)	\
+		: "r"(ptr), "1"(__gu_err));		\
+							\
+	(val) = __gu_tmp.t;				\
+}
 
 #endif /* !defined(CONFIG_64BIT) */
 
 
-#define __put_user(x, ptr)                                      \
+#define __put_user_internal(x, ptr)				\
 ({								\
 	register long __pu_err __asm__ ("r8") = 0;      	\
         __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x);	\
 								\
-	load_sr2();						\
 	switch (sizeof(*(ptr))) {				\
-	    case 1: __put_user_asm("stb", __x, ptr); break;     \
-	    case 2: __put_user_asm("sth", __x, ptr); break;     \
-	    case 4: __put_user_asm("stw", __x, ptr); break;     \
-	    case 8: STD_USER(__x, ptr); break;			\
-	    default: BUILD_BUG(); break;			\
-	}                                                       \
+	case 1: __put_user_asm("stb", __x, ptr); break;		\
+	case 2: __put_user_asm("sth", __x, ptr); break;		\
+	case 4: __put_user_asm("stw", __x, ptr); break;		\
+	case 8: STD_USER(__x, ptr); break;			\
+	default: BUILD_BUG();					\
+	}							\
 								\
 	__pu_err;						\
 })
 
+#define __put_user(x, ptr)					\
+({								\
+	load_sr2();						\
+	__put_user_internal(x, ptr);				\
+})
+
+
 /*
  * The "__put_user/kernel_asm()" macros tell gcc they read from memory
  * instead of writing. This is because they do not write to any memory
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index f01188c..85c28bb 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -201,7 +201,7 @@
 	add	dst,len,end
 
 	/* short copy with less than 16 bytes? */
-	cmpib,>>=,n 15,len,.Lbyte_loop
+	cmpib,COND(>>=),n 15,len,.Lbyte_loop
 
 	/* same alignment? */
 	xor	src,dst,t0
@@ -216,7 +216,7 @@
 	/* loop until we are 64-bit aligned */
 .Lalign_loop64:
 	extru	dst,31,3,t1
-	cmpib,=,n	0,t1,.Lcopy_loop_16
+	cmpib,=,n	0,t1,.Lcopy_loop_16_start
 20:	ldb,ma	1(srcspc,src),t1
 21:	stb,ma	t1,1(dstspc,dst)
 	b	.Lalign_loop64
@@ -225,6 +225,7 @@
 	ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
 	ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
 
+.Lcopy_loop_16_start:
 	ldi	31,t0
 .Lcopy_loop_16:
 	cmpb,COND(>>=),n t0,len,.Lword_loop
@@ -267,7 +268,7 @@
 	/* loop until we are 32-bit aligned */
 .Lalign_loop32:
 	extru	dst,31,2,t1
-	cmpib,=,n	0,t1,.Lcopy_loop_4
+	cmpib,=,n	0,t1,.Lcopy_loop_8
 20:	ldb,ma	1(srcspc,src),t1
 21:	stb,ma	t1,1(dstspc,dst)
 	b	.Lalign_loop32
@@ -277,7 +278,7 @@
 	ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
 
 
-.Lcopy_loop_4:
+.Lcopy_loop_8:
 	cmpib,COND(>>=),n 15,len,.Lbyte_loop
 
 10:	ldw	0(srcspc,src),t1
@@ -299,7 +300,7 @@
 	ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
 	ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
 
-	b	.Lcopy_loop_4
+	b	.Lcopy_loop_8
 	ldo	-16(len),len
 
 .Lbyte_loop:
@@ -324,7 +325,7 @@
 .Lunaligned_copy:
 	/* align until dst is 32bit-word-aligned */
 	extru	dst,31,2,t1
-	cmpib,COND(=),n	0,t1,.Lcopy_dstaligned
+	cmpib,=,n	0,t1,.Lcopy_dstaligned
 20:	ldb	0(srcspc,src),t1
 	ldo	1(src),src
 21:	stb,ma	t1,1(dstspc,dst)
@@ -362,7 +363,7 @@
 	cmpiclr,<> 1,t0,%r0
 	b,n .Lcase1
 .Lcase0:
-	cmpb,= %r0,len,.Lcda_finish
+	cmpb,COND(=) %r0,len,.Lcda_finish
 	nop
 
 1:	ldw,ma 4(srcspc,src), a3
@@ -376,7 +377,7 @@
 1:	ldw,ma 4(srcspc,src), a3
 	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
 	ldo -1(len),len
-	cmpb,=,n %r0,len,.Ldo0
+	cmpb,COND(=),n %r0,len,.Ldo0
 .Ldo4:
 1:	ldw,ma 4(srcspc,src), a0
 	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
@@ -402,7 +403,7 @@
 1:	stw,ma t0, 4(dstspc,dst)
 	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
 	ldo -4(len),len
-	cmpb,<> %r0,len,.Ldo4
+	cmpb,COND(<>) %r0,len,.Ldo4
 	nop
 .Ldo0:
 	shrpw a2, a3, %sar, t0
@@ -436,14 +437,14 @@
 	/* fault exception fixup handlers: */
 #ifdef CONFIG_64BIT
 .Lcopy16_fault:
-10:	b	.Lcopy_done
-	std,ma	t1,8(dstspc,dst)
+	b	.Lcopy_done
+10:	std,ma	t1,8(dstspc,dst)
 	ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
 #endif
 
 .Lcopy8_fault:
-10:	b	.Lcopy_done
-	stw,ma	t1,4(dstspc,dst)
+	b	.Lcopy_done
+10:	stw,ma	t1,4(dstspc,dst)
 	ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
 
 	.exit
diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
index 7853b53..3f9d1a8 100644
--- a/arch/x86/entry/vdso/vdso32-setup.c
+++ b/arch/x86/entry/vdso/vdso32-setup.c
@@ -30,8 +30,10 @@
 {
 	vdso32_enabled = simple_strtoul(s, NULL, 0);
 
-	if (vdso32_enabled > 1)
+	if (vdso32_enabled > 1) {
 		pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
+		vdso32_enabled = 0;
+	}
 
 	return 1;
 }
@@ -62,13 +64,18 @@
 /* Register vsyscall32 into the ABI table */
 #include <linux/sysctl.h>
 
+static const int zero;
+static const int one = 1;
+
 static struct ctl_table abi_table2[] = {
 	{
 		.procname	= "vsyscall32",
 		.data		= &vdso32_enabled,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= (int *)&zero,
+		.extra2		= (int *)&one,
 	},
 	{}
 };
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 81b321a..f924629 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -507,6 +507,9 @@
 		cpuc->lbr_entries[i].to		= msr_lastbranch.to;
 		cpuc->lbr_entries[i].mispred	= 0;
 		cpuc->lbr_entries[i].predicted	= 0;
+		cpuc->lbr_entries[i].in_tx	= 0;
+		cpuc->lbr_entries[i].abort	= 0;
+		cpuc->lbr_entries[i].cycles	= 0;
 		cpuc->lbr_entries[i].reserved	= 0;
 	}
 	cpuc->lbr_stack.nr = i;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index e7f155c..94aad63 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -278,7 +278,7 @@
 
 #define	ARCH_DLINFO_IA32						\
 do {									\
-	if (vdso32_enabled) {						\
+	if (VDSO_CURRENT_BASE) {					\
 		NEW_AUX_ENT(AT_SYSINFO,	VDSO_ENTRY);			\
 		NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE);	\
 	}								\
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index 2c1ebeb..529bb4a 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -55,7 +55,8 @@
  * @size:	number of bytes to write back
  *
  * Write back a cache range using the CLWB (cache line write back)
- * instruction.
+ * instruction. Note that @size is internally rounded up to be cache
+ * line size aligned.
  */
 static inline void arch_wb_cache_pmem(void *addr, size_t size)
 {
@@ -69,15 +70,6 @@
 		clwb(p);
 }
 
-/*
- * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
- * iterators, so for other types (bvec & kvec) we must do a cache write-back.
- */
-static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
-{
-	return iter_is_iovec(i) == false;
-}
-
 /**
  * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
  * @addr:	PMEM destination address
@@ -94,7 +86,35 @@
 	/* TODO: skip the write-back by always using non-temporal stores */
 	len = copy_from_iter_nocache(addr, bytes, i);
 
-	if (__iter_needs_pmem_wb(i))
+	/*
+	 * In the iovec case on x86_64 copy_from_iter_nocache() uses
+	 * non-temporal stores for the bulk of the transfer, but we need
+	 * to manually flush if the transfer is unaligned. A cached
+	 * memory copy is used when destination or size is not naturally
+	 * aligned. That is:
+	 *   - Require 8-byte alignment when size is 8 bytes or larger.
+	 *   - Require 4-byte alignment when size is 4 bytes.
+	 *
+	 * In the non-iovec case the entire destination needs to be
+	 * flushed.
+	 */
+	if (iter_is_iovec(i)) {
+		unsigned long flushed, dest = (unsigned long) addr;
+
+		if (bytes < 8) {
+			if (!IS_ALIGNED(dest, 4) || (bytes != 4))
+				arch_wb_cache_pmem(addr, 1);
+		} else {
+			if (!IS_ALIGNED(dest, 8)) {
+				dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
+				arch_wb_cache_pmem(addr, 1);
+			}
+
+			flushed = dest - (unsigned long) addr;
+			if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
+				arch_wb_cache_pmem(addr + bytes - 1, 1);
+		}
+	} else
 		arch_wb_cache_pmem(addr, bytes);
 
 	return len;
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index ec1f756..71beb28 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -151,8 +151,8 @@
 
 				if (from->si_signo == SIGSEGV) {
 					if (from->si_code == SEGV_BNDERR) {
-						compat_uptr_t lower = (unsigned long)&to->si_lower;
-						compat_uptr_t upper = (unsigned long)&to->si_upper;
+						compat_uptr_t lower = (unsigned long)from->si_lower;
+						compat_uptr_t upper = (unsigned long)from->si_upper;
 						put_user_ex(lower, &to->si_lower);
 						put_user_ex(upper, &to->si_upper);
 					}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 69b8f8a..43b55ef 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6925,14 +6925,20 @@
 		}
 
 		page = nested_get_page(vcpu, vmptr);
-		if (page == NULL ||
-		    *(u32 *)kmap(page) != VMCS12_REVISION) {
+		if (page == NULL) {
 			nested_vmx_failInvalid(vcpu);
+			skip_emulated_instruction(vcpu);
+			return 1;
+		}
+		if (*(u32 *)kmap(page) != VMCS12_REVISION) {
 			kunmap(page);
+			nested_release_page_clean(page);
+			nested_vmx_failInvalid(vcpu);
 			skip_emulated_instruction(vcpu);
 			return 1;
 		}
 		kunmap(page);
+		nested_release_page_clean(page);
 		vmx->nested.vmxon_ptr = vmptr;
 		break;
 	case EXIT_REASON_VMCLEAR:
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 22af912..889e761 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -643,21 +643,40 @@
  * devmem_is_allowed() checks to see if /dev/mem access to a certain address
  * is valid. The argument is a physical page number.
  *
- *
- * On x86, access has to be given to the first megabyte of ram because that area
- * contains BIOS code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
+ * On x86, access has to be given to the first megabyte of RAM because that
+ * area traditionally contains BIOS code and data regions used by X, dosemu,
+ * and similar apps. Since they map the entire memory range, the whole range
+ * must be allowed (for mapping), but any areas that would otherwise be
+ * disallowed are flagged as being "zero filled" instead of rejected.
+ * Access has to be given to non-kernel-ram areas as well, these contain the
+ * PCI mmio resources as well as potential bios/acpi data regions.
  */
 int devmem_is_allowed(unsigned long pagenr)
 {
-	if (pagenr < 256)
-		return 1;
-	if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+	if (page_is_ram(pagenr)) {
+		/*
+		 * For disallowed memory regions in the low 1MB range,
+		 * request that the page be shown as all zeros.
+		 */
+		if (pagenr < 256)
+			return 2;
+
 		return 0;
-	if (!page_is_ram(pagenr))
-		return 1;
-	return 0;
+	}
+
+	/*
+	 * This must follow RAM test, since System RAM is considered a
+	 * restricted resource under CONFIG_STRICT_IOMEM.
+	 */
+	if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
+		/* Low 1MB bypasses iomem restrictions. */
+		if (pagenr < 256)
+			return 1;
+
+		return 0;
+	}
+
+	return 1;
 }
 
 void free_init_pages(char *what, unsigned long begin, unsigned long end)
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 30031d5..cdfe8c6 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -201,6 +201,10 @@
 		return;
 	}
 
+	/* No need to reserve regions that will never be freed. */
+	if (md.attribute & EFI_MEMORY_RUNTIME)
+		return;
+
 	size += addr % EFI_PAGE_SIZE;
 	size = round_up(size, EFI_PAGE_SIZE);
 	addr = round_down(addr, EFI_PAGE_SIZE);
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
index 44c88ad..bcea81f 100644
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -145,7 +145,7 @@
 static int xen_cpu_present_to_apicid(int cpu)
 {
 	if (cpu_present(cpu))
-		return xen_get_apic_id(xen_apic_read(APIC_ID));
+		return cpu_data(cpu).apicid;
 	else
 		return BAD_APICID;
 }
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 2ce8bcb..cce0268 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -31,6 +31,7 @@
 	crypto_completion_t complete;
 	void *data;
 	u8 *result;
+	u32 flags;
 	void *ubuf[] CRYPTO_MINALIGN_ATTR;
 };
 
@@ -252,6 +253,8 @@
 	priv->result = req->result;
 	priv->complete = req->base.complete;
 	priv->data = req->base.data;
+	priv->flags = req->base.flags;
+
 	/*
 	 * WARNING: We do not backup req->priv here! The req->priv
 	 *          is for internal use of the Crypto API and the
@@ -266,38 +269,44 @@
 	return 0;
 }
 
-static void ahash_restore_req(struct ahash_request *req)
+static void ahash_restore_req(struct ahash_request *req, int err)
 {
 	struct ahash_request_priv *priv = req->priv;
 
+	if (!err)
+		memcpy(priv->result, req->result,
+		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+
 	/* Restore the original crypto request. */
 	req->result = priv->result;
-	req->base.complete = priv->complete;
-	req->base.data = priv->data;
+
+	ahash_request_set_callback(req, priv->flags,
+				   priv->complete, priv->data);
 	req->priv = NULL;
 
 	/* Free the req->priv.priv from the ADJUSTED request. */
 	kzfree(priv);
 }
 
-static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
+static void ahash_notify_einprogress(struct ahash_request *req)
 {
 	struct ahash_request_priv *priv = req->priv;
+	struct crypto_async_request oreq;
 
-	if (err == -EINPROGRESS)
-		return;
+	oreq.data = priv->data;
 
-	if (!err)
-		memcpy(priv->result, req->result,
-		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
-
-	ahash_restore_req(req);
+	priv->complete(&oreq, -EINPROGRESS);
 }
 
 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
 {
 	struct ahash_request *areq = req->data;
 
+	if (err == -EINPROGRESS) {
+		ahash_notify_einprogress(areq);
+		return;
+	}
+
 	/*
 	 * Restore the original request, see ahash_op_unaligned() for what
 	 * goes where.
@@ -308,7 +317,7 @@
 	 */
 
 	/* First copy req->result into req->priv.result */
-	ahash_op_unaligned_finish(areq, err);
+	ahash_restore_req(areq, err);
 
 	/* Complete the ORIGINAL request. */
 	areq->base.complete(&areq->base, err);
@@ -324,7 +333,12 @@
 		return err;
 
 	err = op(req);
-	ahash_op_unaligned_finish(req, err);
+	if (err == -EINPROGRESS ||
+	    (err == -EBUSY && (ahash_request_flags(req) &
+			       CRYPTO_TFM_REQ_MAY_BACKLOG)))
+		return err;
+
+	ahash_restore_req(req, err);
 
 	return err;
 }
@@ -359,25 +373,14 @@
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
 
-static void ahash_def_finup_finish2(struct ahash_request *req, int err)
-{
-	struct ahash_request_priv *priv = req->priv;
-
-	if (err == -EINPROGRESS)
-		return;
-
-	if (!err)
-		memcpy(priv->result, req->result,
-		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
-
-	ahash_restore_req(req);
-}
-
 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
 {
 	struct ahash_request *areq = req->data;
 
-	ahash_def_finup_finish2(areq, err);
+	if (err == -EINPROGRESS)
+		return;
+
+	ahash_restore_req(areq, err);
 
 	areq->base.complete(&areq->base, err);
 }
@@ -388,11 +391,15 @@
 		goto out;
 
 	req->base.complete = ahash_def_finup_done2;
-	req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
 	err = crypto_ahash_reqtfm(req)->final(req);
+	if (err == -EINPROGRESS ||
+	    (err == -EBUSY && (ahash_request_flags(req) &
+			       CRYPTO_TFM_REQ_MAY_BACKLOG)))
+		return err;
 
 out:
-	ahash_def_finup_finish2(req, err);
+	ahash_restore_req(req, err);
 	return err;
 }
 
@@ -400,7 +407,16 @@
 {
 	struct ahash_request *areq = req->data;
 
+	if (err == -EINPROGRESS) {
+		ahash_notify_einprogress(areq);
+		return;
+	}
+
+	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
 	err = ahash_def_finup_finish1(areq, err);
+	if (areq->priv)
+		return;
 
 	areq->base.complete(&areq->base, err);
 }
@@ -415,6 +431,11 @@
 		return err;
 
 	err = tfm->update(req);
+	if (err == -EINPROGRESS ||
+	    (err == -EBUSY && (ahash_request_flags(req) &
+			       CRYPTO_TFM_REQ_MAY_BACKLOG)))
+		return err;
+
 	return ahash_def_finup_finish1(req, err);
 }
 
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index e8817e2..fde8d88 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -39,6 +39,7 @@
 	struct aead_async_rsgl first_rsgl;
 	struct list_head list;
 	struct kiocb *iocb;
+	struct sock *sk;
 	unsigned int tsgls;
 	char iv[];
 };
@@ -379,12 +380,10 @@
 
 static void aead_async_cb(struct crypto_async_request *_req, int err)
 {
-	struct sock *sk = _req->data;
-	struct alg_sock *ask = alg_sk(sk);
-	struct aead_ctx *ctx = ask->private;
-	struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
-	struct aead_request *req = aead_request_cast(_req);
+	struct aead_request *req = _req->data;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
+	struct sock *sk = areq->sk;
 	struct scatterlist *sg = areq->tsgl;
 	struct aead_async_rsgl *rsgl;
 	struct kiocb *iocb = areq->iocb;
@@ -447,11 +446,12 @@
 	memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
 	INIT_LIST_HEAD(&areq->list);
 	areq->iocb = msg->msg_iocb;
+	areq->sk = sk;
 	memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
 	aead_request_set_tfm(req, tfm);
 	aead_request_set_ad(req, ctx->aead_assoclen);
 	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-				  aead_async_cb, sk);
+				  aead_async_cb, req);
 	used -= ctx->aead_assoclen;
 
 	/* take over all tx sgls from ctx */
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 48e19d0..22ca892 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -729,12 +729,12 @@
 
 static int ec_guard(struct acpi_ec *ec)
 {
-	unsigned long guard = usecs_to_jiffies(ec_polling_guard);
+	unsigned long guard = usecs_to_jiffies(ec->polling_guard);
 	unsigned long timeout = ec->timestamp + guard;
 
 	/* Ensure guarding period before polling EC status */
 	do {
-		if (ec_busy_polling) {
+		if (ec->busy_polling) {
 			/* Perform busy polling */
 			if (ec_transaction_completed(ec))
 				return 0;
@@ -998,6 +998,28 @@
 	spin_unlock_irqrestore(&ec->lock, flags);
 }
 
+static void acpi_ec_enter_noirq(struct acpi_ec *ec)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ec->lock, flags);
+	ec->busy_polling = true;
+	ec->polling_guard = 0;
+	ec_log_drv("interrupt blocked");
+	spin_unlock_irqrestore(&ec->lock, flags);
+}
+
+static void acpi_ec_leave_noirq(struct acpi_ec *ec)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ec->lock, flags);
+	ec->busy_polling = ec_busy_polling;
+	ec->polling_guard = ec_polling_guard;
+	ec_log_drv("interrupt unblocked");
+	spin_unlock_irqrestore(&ec->lock, flags);
+}
+
 void acpi_ec_block_transactions(void)
 {
 	struct acpi_ec *ec = first_ec;
@@ -1278,7 +1300,7 @@
 	if (function != ACPI_READ && function != ACPI_WRITE)
 		return AE_BAD_PARAMETER;
 
-	if (ec_busy_polling || bits > 8)
+	if (ec->busy_polling || bits > 8)
 		acpi_ec_burst_enable(ec);
 
 	for (i = 0; i < bytes; ++i, ++address, ++value)
@@ -1286,7 +1308,7 @@
 			acpi_ec_read(ec, address, value) :
 			acpi_ec_write(ec, address, *value);
 
-	if (ec_busy_polling || bits > 8)
+	if (ec->busy_polling || bits > 8)
 		acpi_ec_burst_disable(ec);
 
 	switch (result) {
@@ -1329,6 +1351,8 @@
 	spin_lock_init(&ec->lock);
 	INIT_WORK(&ec->work, acpi_ec_event_handler);
 	ec->timestamp = jiffies;
+	ec->busy_polling = true;
+	ec->polling_guard = 0;
 	return ec;
 }
 
@@ -1390,6 +1414,7 @@
 	acpi_ec_start(ec, false);
 
 	if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
+		acpi_ec_enter_noirq(ec);
 		status = acpi_install_address_space_handler(ec->handle,
 							    ACPI_ADR_SPACE_EC,
 							    &acpi_ec_space_handler,
@@ -1429,6 +1454,7 @@
 		/* This is not fatal as we can poll EC events */
 		if (ACPI_SUCCESS(status)) {
 			set_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
+			acpi_ec_leave_noirq(ec);
 			if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
 			    ec->reference_count >= 1)
 				acpi_ec_enable_gpe(ec, true);
@@ -1839,34 +1865,6 @@
 }
 
 #ifdef CONFIG_PM_SLEEP
-static void acpi_ec_enter_noirq(struct acpi_ec *ec)
-{
-	unsigned long flags;
-
-	if (ec == first_ec) {
-		spin_lock_irqsave(&ec->lock, flags);
-		ec->saved_busy_polling = ec_busy_polling;
-		ec->saved_polling_guard = ec_polling_guard;
-		ec_busy_polling = true;
-		ec_polling_guard = 0;
-		ec_log_drv("interrupt blocked");
-		spin_unlock_irqrestore(&ec->lock, flags);
-	}
-}
-
-static void acpi_ec_leave_noirq(struct acpi_ec *ec)
-{
-	unsigned long flags;
-
-	if (ec == first_ec) {
-		spin_lock_irqsave(&ec->lock, flags);
-		ec_busy_polling = ec->saved_busy_polling;
-		ec_polling_guard = ec->saved_polling_guard;
-		ec_log_drv("interrupt unblocked");
-		spin_unlock_irqrestore(&ec->lock, flags);
-	}
-}
-
 static int acpi_ec_suspend_noirq(struct device *dev)
 {
 	struct acpi_ec *ec =
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 0c45226..219b90b 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -172,8 +172,8 @@
 	struct work_struct work;
 	unsigned long timestamp;
 	unsigned long nr_pending_queries;
-	bool saved_busy_polling;
-	unsigned int saved_polling_guard;
+	bool busy_polling;
+	unsigned int polling_guard;
 };
 
 extern struct acpi_ec *first_ec;
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index d1664df..9ef3941 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1617,7 +1617,11 @@
 	const struct nfit_set_info_map *map0 = m0;
 	const struct nfit_set_info_map *map1 = m1;
 
-	return map0->region_offset - map1->region_offset;
+	if (map0->region_offset < map1->region_offset)
+		return -1;
+	else if (map0->region_offset > map1->region_offset)
+		return 1;
+	return 0;
 }
 
 /* Retrieve the nth entry referencing this spa */
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 5a2fdf1..dd3786a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1827,15 +1827,20 @@
 		return;
 
 	device->flags.match_driver = true;
-	if (!ret) {
-		ret = device_attach(&device->dev);
-		if (ret < 0)
-			return;
-
-		if (!ret && device->pnp.type.platform_id)
-			acpi_default_enumeration(device);
+	if (ret > 0) {
+		acpi_device_set_enumerated(device);
+		goto ok;
 	}
 
+	ret = device_attach(&device->dev);
+	if (ret < 0)
+		return;
+
+	if (ret > 0 || !device->pnp.type.platform_id)
+		acpi_device_set_enumerated(device);
+	else
+		acpi_default_enumeration(device);
+
  ok:
 	list_for_each_entry(child, &device->children, node)
 		acpi_bus_attach(child);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 7a10487..c9441f9 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -54,7 +54,7 @@
 
 	struct mutex tx_lock;
 	struct gendisk *disk;
-	int blksize;
+	loff_t blksize;
 	loff_t bytesize;
 
 	/* protects initialization and shutdown of the socket */
@@ -126,7 +126,7 @@
 }
 
 static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
-			int blocksize, int nr_blocks)
+			loff_t blocksize, loff_t nr_blocks)
 {
 	int ret;
 
@@ -135,7 +135,7 @@
 		return ret;
 
 	nbd->blksize = blocksize;
-	nbd->bytesize = (loff_t)blocksize * (loff_t)nr_blocks;
+	nbd->bytesize = blocksize * nr_blocks;
 
 	nbd_size_update(nbd, bdev);
 
@@ -648,7 +648,7 @@
 
 	case NBD_SET_SIZE:
 		return nbd_size_set(nbd, bdev, nbd->blksize,
-				    arg / nbd->blksize);
+					div_s64(arg, nbd->blksize));
 
 	case NBD_SET_SIZE_BLOCKS:
 		return nbd_size_set(nbd, bdev, nbd->blksize, arg);
@@ -817,7 +817,7 @@
 	debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
 	debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
 	debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
-	debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
+	debugfs_create_u64("blocksize", 0444, dir, &nbd->blksize);
 	debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
 
 	return 0;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index d2ef51c..c9914d65 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -582,13 +582,13 @@
 
 	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
 		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
-		clear_page(mem);
+		memset(mem, 0, PAGE_SIZE);
 		return 0;
 	}
 
 	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
 	if (size == PAGE_SIZE) {
-		copy_page(mem, cmem);
+		memcpy(mem, cmem, PAGE_SIZE);
 	} else {
 		struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
 
@@ -780,7 +780,7 @@
 
 	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
 		src = kmap_atomic(page);
-		copy_page(cmem, src);
+		memcpy(cmem, src, PAGE_SIZE);
 		kunmap_atomic(src);
 	} else {
 		memcpy(cmem, src, clen);
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 9c26f87..e907d0d 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -773,6 +773,7 @@
 	if (*buf != 0x80) {
 		list_del(&entry->track);
 		kfree(entry);
+		entry = NULL;
 		return 1;
 	}
 
@@ -790,6 +791,7 @@
 	if (delayed_rsp_id == 0) {
 		list_del(&entry->track);
 		kfree(entry);
+		entry = NULL;
 		return 1;
 	}
 
@@ -803,6 +805,7 @@
 	if (rsp_count > 0 && rsp_count < 0x1000) {
 		list_del(&entry->track);
 		kfree(entry);
+		entry = NULL;
 		return 1;
 	}
 
@@ -1447,6 +1450,7 @@
 		dci_ops_tbl[proc].peripheral_status &= ~peripheral_mask;
 
 	/* Notify the DCI process that the peripheral DCI Channel is up */
+	mutex_lock(&driver->dci_mutex);
 	list_for_each_safe(start, temp, &driver->dci_client_list) {
 		entry = list_entry(start, struct diag_dci_client_tbl, track);
 		if (entry->client_info.token != proc)
@@ -1469,6 +1473,7 @@
 						info.si_int, stat);
 		}
 	}
+	mutex_unlock(&driver->dci_mutex);
 }
 
 static int diag_send_dci_pkt(struct diag_cmd_reg_t *entry,
@@ -1942,6 +1947,7 @@
 	reg_entry.cmd_code_hi = header->subsys_cmd_code;
 	reg_entry.cmd_code_lo = header->subsys_cmd_code;
 
+	mutex_lock(&driver->cmd_reg_mutex);
 	temp_entry = diag_cmd_search(&reg_entry, ALL_PROC);
 	if (temp_entry) {
 		reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
@@ -1953,6 +1959,7 @@
 				reg_entry.cmd_code, reg_entry.subsys_id,
 				reg_entry.cmd_code_hi);
 	}
+	mutex_unlock(&driver->cmd_reg_mutex);
 
 	return ret;
 }
@@ -2684,10 +2691,12 @@
 err:
 	pr_err("diag: Could not initialize diag DCI buffers");
 	kfree(driver->apps_dci_buf);
+	driver->apps_dci_buf = NULL;
 
 	if (driver->diag_dci_wq)
 		destroy_workqueue(driver->diag_dci_wq);
 	kfree(partial_pkt.data);
+	partial_pkt.data = NULL;
 	mutex_destroy(&driver->dci_mutex);
 	mutex_destroy(&dci_log_mask_mutex);
 	mutex_destroy(&dci_event_mask_mutex);
@@ -2707,7 +2716,9 @@
 void diag_dci_exit(void)
 {
 	kfree(partial_pkt.data);
+	partial_pkt.data = NULL;
 	kfree(driver->apps_dci_buf);
+	driver->apps_dci_buf = NULL;
 	mutex_destroy(&driver->dci_mutex);
 	mutex_destroy(&dci_log_mask_mutex);
 	mutex_destroy(&dci_event_mask_mutex);
@@ -2914,22 +2925,30 @@
 				mutex_destroy(&proc_buf->health_mutex);
 				if (proc_buf->buf_primary) {
 					kfree(proc_buf->buf_primary->data);
+					proc_buf->buf_primary->data = NULL;
 					mutex_destroy(
 					   &proc_buf->buf_primary->data_mutex);
 				}
 				kfree(proc_buf->buf_primary);
+				proc_buf->buf_primary = NULL;
 				if (proc_buf->buf_cmd) {
 					kfree(proc_buf->buf_cmd->data);
+					proc_buf->buf_cmd->data = NULL;
 					mutex_destroy(
 					   &proc_buf->buf_cmd->data_mutex);
 				}
 				kfree(proc_buf->buf_cmd);
+				proc_buf->buf_cmd = NULL;
 			}
 		}
 		kfree(new_entry->dci_event_mask);
+		new_entry->dci_event_mask = NULL;
 		kfree(new_entry->dci_log_mask);
+		new_entry->dci_log_mask = NULL;
 		kfree(new_entry->buffers);
+		new_entry->buffers = NULL;
 		kfree(new_entry);
+		new_entry = NULL;
 	}
 	mutex_unlock(&driver->dci_mutex);
 	return DIAG_DCI_NO_REG;
@@ -2960,6 +2979,7 @@
 	 * masks and send the masks to peripherals
 	 */
 	kfree(entry->dci_log_mask);
+	entry->dci_log_mask = NULL;
 	diag_dci_invalidate_cumulative_log_mask(token);
 	if (token == DCI_LOCAL_PROC)
 		diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
@@ -2967,6 +2987,7 @@
 	if (ret != DIAG_DCI_NO_ERROR)
 		return ret;
 	kfree(entry->dci_event_mask);
+	entry->dci_event_mask = NULL;
 	diag_dci_invalidate_cumulative_event_mask(token);
 	if (token == DCI_LOCAL_PROC)
 		diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
@@ -2981,6 +3002,7 @@
 			if (!list_empty(&req_entry->track))
 				list_del(&req_entry->track);
 			kfree(req_entry);
+			req_entry = NULL;
 		}
 	}
 
@@ -2996,6 +3018,7 @@
 			buf_entry->data = NULL;
 			mutex_unlock(&buf_entry->data_mutex);
 			kfree(buf_entry);
+			buf_entry = NULL;
 		} else if (buf_entry->buf_type == DCI_BUF_CMD) {
 			peripheral = buf_entry->data_source;
 			if (peripheral == APPS_DATA)
@@ -3022,14 +3045,17 @@
 			mutex_unlock(&buf_entry->data_mutex);
 			mutex_destroy(&buf_entry->data_mutex);
 			kfree(buf_entry);
+			buf_entry = NULL;
 		}
 
 		mutex_lock(&proc_buf->buf_primary->data_mutex);
 		kfree(proc_buf->buf_primary->data);
+		proc_buf->buf_primary->data = NULL;
 		mutex_unlock(&proc_buf->buf_primary->data_mutex);
 
 		mutex_lock(&proc_buf->buf_cmd->data_mutex);
 		kfree(proc_buf->buf_cmd->data);
+		proc_buf->buf_cmd->data = NULL;
 		mutex_unlock(&proc_buf->buf_cmd->data_mutex);
 
 		mutex_destroy(&proc_buf->health_mutex);
@@ -3037,13 +3063,17 @@
 		mutex_destroy(&proc_buf->buf_cmd->data_mutex);
 
 		kfree(proc_buf->buf_primary);
+		proc_buf->buf_primary = NULL;
 		kfree(proc_buf->buf_cmd);
+		proc_buf->buf_cmd = NULL;
 		mutex_unlock(&proc_buf->buf_mutex);
 	}
 	mutex_destroy(&entry->write_buf_mutex);
 
 	kfree(entry->buffers);
+	entry->buffers = NULL;
 	kfree(entry);
+	entry = NULL;
 
 	if (driver->num_dci_client == 0) {
 		diag_update_proc_vote(DIAG_PROC_DCI, VOTE_DOWN, token);
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index 558e362..13ad402 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -354,8 +354,8 @@
 			ch->tbl[j].buf = NULL;
 			ch->tbl[j].len = 0;
 			ch->tbl[j].ctx = 0;
-			spin_lock_init(&(ch->lock));
 		}
+		spin_lock_init(&(ch->lock));
 	}
 
 	return 0;
diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c
index ac8a6d0..1cf7f52 100644
--- a/drivers/char/diag/diag_usb.c
+++ b/drivers/char/diag/diag_usb.c
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/device.h>
+#include <linux/kernel.h>
 #include <linux/err.h>
 #include <linux/sched.h>
 #include <linux/ratelimit.h>
@@ -218,7 +219,8 @@
 	if (!ch)
 		return;
 
-	if (!atomic_read(&ch->connected) && driver->usb_connected)
+	if (!atomic_read(&ch->connected) &&
+		driver->usb_connected && diag_mask_param())
 		diag_clear_masks(NULL);
 
 	if (ch && ch->ops && ch->ops->close)
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index ea380fb..d3dde50 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -505,6 +505,7 @@
 	int ref_count;
 	int mask_clear;
 	struct mutex diag_maskclear_mutex;
+	struct mutex diag_notifier_mutex;
 	struct mutex diagchar_mutex;
 	struct mutex diag_file_mutex;
 	wait_queue_head_t wait_q;
@@ -547,7 +548,7 @@
 	struct mutex diag_id_mutex;
 	struct mutex cmd_reg_mutex;
 	uint32_t cmd_reg_count;
-	struct mutex diagfwd_channel_mutex;
+	struct mutex diagfwd_channel_mutex[NUM_PERIPHERALS];
 	/* Sizes that reflect memory pool sizes */
 	unsigned int poolsize;
 	unsigned int poolsize_hdlc;
@@ -666,6 +667,7 @@
 void diag_cmd_remove_reg_by_pid(int pid);
 void diag_cmd_remove_reg_by_proc(int proc);
 int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry);
+int diag_mask_param(void);
 void diag_clear_masks(struct diag_md_session_t *info);
 
 void diag_record_stats(int type, int flag);
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index c44a9ea..128d6ce 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -143,6 +143,14 @@
 static struct timer_list drain_timer;
 static int timer_in_progress;
 
+/*
+ * Diag Mask clear variable
+ * Used for clearing masks upon
+ * USB disconnection and stopping ODL
+ */
+static int diag_mask_clear_param = 1;
+module_param(diag_mask_clear_param, int, 0644);
+
 struct diag_apps_data_t {
 	void *buf;
 	uint32_t len;
@@ -388,7 +396,10 @@
 
 	return ret;
 }
-
+int diag_mask_param(void)
+{
+	return diag_mask_clear_param;
+}
 void diag_clear_masks(struct diag_md_session_t *info)
 {
 	int ret;
@@ -421,14 +432,17 @@
 	if (!session_info)
 		return;
 
-	diag_clear_masks(session_info);
+	if (diag_mask_clear_param)
+		diag_clear_masks(session_info);
 
 	mutex_lock(&driver->diag_maskclear_mutex);
 	driver->mask_clear = 1;
 	mutex_unlock(&driver->diag_maskclear_mutex);
 
+	mutex_lock(&driver->diagchar_mutex);
 	session_peripheral_mask = session_info->peripheral_mask;
 	diag_md_session_close(session_info);
+	mutex_unlock(&driver->diagchar_mutex);
 	for (i = 0; i < NUM_MD_SESSIONS; i++)
 		if (MD_PERIPHERAL_MASK(i) & session_peripheral_mask)
 			diag_mux_close_peripheral(DIAG_LOCAL_PROC, i);
@@ -701,7 +715,7 @@
 
 	list_for_each_safe(start, temp, &driver->cmd_reg_list) {
 		item = list_entry(start, struct diag_cmd_reg_t, link);
-		if (item == NULL || &item->entry == NULL) {
+		if (&item->entry == NULL) {
 			pr_err("diag: In %s, unable to search command\n",
 			       __func__);
 			return NULL;
@@ -3402,7 +3416,7 @@
 static int __init diagchar_init(void)
 {
 	dev_t dev;
-	int ret;
+	int ret, i;
 
 	pr_debug("diagfwd initializing ..\n");
 	ret = 0;
@@ -3445,10 +3459,12 @@
 	mutex_init(&driver->hdlc_disable_mutex);
 	mutex_init(&driver->diagchar_mutex);
 	mutex_init(&driver->diag_maskclear_mutex);
+	mutex_init(&driver->diag_notifier_mutex);
 	mutex_init(&driver->diag_file_mutex);
 	mutex_init(&driver->delayed_rsp_mutex);
 	mutex_init(&apps_data_mutex);
-	mutex_init(&driver->diagfwd_channel_mutex);
+	for (i = 0; i < NUM_PERIPHERALS; i++)
+		mutex_init(&driver->diagfwd_channel_mutex[i]);
 	init_waitqueue_head(&driver->wait_q);
 	INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn);
 	INIT_WORK(&(driver->update_user_clients),
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index f9dc670d..cd49f00 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1303,6 +1303,8 @@
 
 static int diagfwd_mux_close(int id, int mode)
 {
+	uint8_t i;
+
 	switch (mode) {
 	case DIAG_USB_MODE:
 		driver->usb_connected = 0;
@@ -1323,10 +1325,23 @@
 		 */
 	} else {
 		/*
-		 * With clearing of masks on ODL exit and
-		 * USB disconnection, closing of the channel is
-		 * not needed.This enables read and drop of stale packets.
+		 * With sysfs parameter to clear masks set,
+		 * peripheral masks are cleared on ODL exit and
+		 * USB disconnection and buffers are not marked busy.
+		 * This enables read and drop of stale packets.
+		 *
+		 * With sysfs parameter to clear masks cleared,
+		 * masks are not cleared and buffers are to be marked
+		 * busy to ensure traffic generated by peripheral
+		 * are not read
 		 */
+		if (!(diag_mask_param())) {
+			for (i = 0; i < NUM_PERIPHERALS; i++) {
+				diagfwd_close(i, TYPE_DATA);
+				diagfwd_close(i, TYPE_CMD);
+			}
+		}
+		/* Re enable HDLC encoding */
 		pr_debug("diag: In %s, re-enabling HDLC encoding\n",
 		       __func__);
 		mutex_lock(&driver->hdlc_disable_mutex);
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index b262897..e13871e 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -359,6 +359,8 @@
 		feature_mask_len = FEATURE_MASK_LEN;
 	}
 
+	diag_cmd_remove_reg_by_proc(peripheral);
+
 	driver->feature[peripheral].rcvd_feature_mask = 1;
 
 	for (i = 0; i < feature_mask_len && read_len < len; i++) {
@@ -660,7 +662,7 @@
 	if (!new_item)
 		return -ENOMEM;
 	kmemleak_not_leak(new_item);
-	new_item->process_name = kzalloc(strlen(process_name), GFP_KERNEL);
+	new_item->process_name = kzalloc(strlen(process_name) + 1, GFP_KERNEL);
 	if (!new_item->process_name) {
 		kfree(new_item);
 		new_item = NULL;
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 6685be3..5a8ef04 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -436,9 +436,9 @@
 			fwd_info->inited = 1;
 			fwd_info->read_bytes = 0;
 			fwd_info->write_bytes = 0;
-			spin_lock_init(&fwd_info->buf_lock);
-			spin_lock_init(&fwd_info->write_buf_lock);
+			mutex_init(&fwd_info->buf_mutex);
 			mutex_init(&fwd_info->data_mutex);
+			spin_lock_init(&fwd_info->write_buf_lock);
 		}
 	}
 
@@ -452,8 +452,8 @@
 			fwd_info->ch_open = 0;
 			fwd_info->read_bytes = 0;
 			fwd_info->write_bytes = 0;
-			spin_lock_init(&fwd_info->buf_lock);
 			spin_lock_init(&fwd_info->write_buf_lock);
+			mutex_init(&fwd_info->buf_mutex);
 			mutex_init(&fwd_info->data_mutex);
 			/*
 			 * This state shouldn't be set for Control channels
@@ -646,7 +646,7 @@
 
 	}
 
-	mutex_lock(&driver->diagfwd_channel_mutex);
+	mutex_lock(&driver->diagfwd_channel_mutex[peripheral]);
 	fwd_info = &early_init_info[transport][peripheral];
 	if (fwd_info->p_ops && fwd_info->p_ops->close)
 		fwd_info->p_ops->close(fwd_info->ctxt);
@@ -670,7 +670,7 @@
 		diagfwd_late_open(dest_info);
 	diagfwd_cntl_open(dest_info);
 	init_fn(peripheral);
-	mutex_unlock(&driver->diagfwd_channel_mutex);
+	mutex_unlock(&driver->diagfwd_channel_mutex[peripheral]);
 	diagfwd_queue_read(&peripheral_info[TYPE_DATA][peripheral]);
 	diagfwd_queue_read(&peripheral_info[TYPE_CMD][peripheral]);
 }
@@ -1065,7 +1065,6 @@
 
 void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
 {
-	unsigned long flags;
 
 	if (!fwd_info)
 		return;
@@ -1076,10 +1075,10 @@
 		return;
 	}
 
-	spin_lock_irqsave(&fwd_info->buf_lock, flags);
+	mutex_lock(&fwd_info->buf_mutex);
 	if (!fwd_info->buf_1) {
 		fwd_info->buf_1 = kzalloc(sizeof(struct diagfwd_buf_t),
-					  GFP_ATOMIC);
+					  GFP_KERNEL);
 		if (!fwd_info->buf_1)
 			goto err;
 		kmemleak_not_leak(fwd_info->buf_1);
@@ -1087,7 +1086,7 @@
 	if (!fwd_info->buf_1->data) {
 		fwd_info->buf_1->data = kzalloc(PERIPHERAL_BUF_SZ +
 					APF_DIAG_PADDING,
-					GFP_ATOMIC);
+					GFP_KERNEL);
 		if (!fwd_info->buf_1->data)
 			goto err;
 		fwd_info->buf_1->len = PERIPHERAL_BUF_SZ;
@@ -1099,7 +1098,7 @@
 	if (fwd_info->type == TYPE_DATA) {
 		if (!fwd_info->buf_2) {
 			fwd_info->buf_2 = kzalloc(sizeof(struct diagfwd_buf_t),
-					      GFP_ATOMIC);
+					      GFP_KERNEL);
 			if (!fwd_info->buf_2)
 				goto err;
 			kmemleak_not_leak(fwd_info->buf_2);
@@ -1108,7 +1107,7 @@
 		if (!fwd_info->buf_2->data) {
 			fwd_info->buf_2->data = kzalloc(PERIPHERAL_BUF_SZ +
 							APF_DIAG_PADDING,
-						    GFP_ATOMIC);
+						    GFP_KERNEL);
 			if (!fwd_info->buf_2->data)
 				goto err;
 			fwd_info->buf_2->len = PERIPHERAL_BUF_SZ;
@@ -1124,7 +1123,7 @@
 				fwd_info->buf_1->data_raw =
 					kzalloc(PERIPHERAL_BUF_SZ +
 						APF_DIAG_PADDING,
-						GFP_ATOMIC);
+						GFP_KERNEL);
 				if (!fwd_info->buf_1->data_raw)
 					goto err;
 				fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ;
@@ -1134,7 +1133,7 @@
 				fwd_info->buf_2->data_raw =
 					kzalloc(PERIPHERAL_BUF_SZ +
 						APF_DIAG_PADDING,
-						GFP_ATOMIC);
+						GFP_KERNEL);
 				if (!fwd_info->buf_2->data_raw)
 					goto err;
 				fwd_info->buf_2->len_raw = PERIPHERAL_BUF_SZ;
@@ -1148,7 +1147,7 @@
 		if (!fwd_info->buf_1->data_raw) {
 			fwd_info->buf_1->data_raw = kzalloc(PERIPHERAL_BUF_SZ +
 						APF_DIAG_PADDING,
-							GFP_ATOMIC);
+							GFP_KERNEL);
 			if (!fwd_info->buf_1->data_raw)
 				goto err;
 			fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ;
@@ -1156,22 +1155,21 @@
 		}
 	}
 
-	spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
+	mutex_unlock(&fwd_info->buf_mutex);
 	return;
 
 err:
-	spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
+	mutex_unlock(&fwd_info->buf_mutex);
 	diagfwd_buffers_exit(fwd_info);
 }
 
 static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info)
 {
-	unsigned long flags;
 
 	if (!fwd_info)
 		return;
 
-	spin_lock_irqsave(&fwd_info->buf_lock, flags);
+	mutex_lock(&fwd_info->buf_mutex);
 	if (fwd_info->buf_1) {
 		kfree(fwd_info->buf_1->data);
 		fwd_info->buf_1->data = NULL;
@@ -1188,7 +1186,7 @@
 		kfree(fwd_info->buf_2);
 		fwd_info->buf_2 = NULL;
 	}
-	spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
+	mutex_unlock(&fwd_info->buf_mutex);
 }
 
 void diagfwd_write_buffers_init(struct diagfwd_info *fwd_info)
diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h
index b8deb38..5884a12 100644
--- a/drivers/char/diag/diagfwd_peripheral.h
+++ b/drivers/char/diag/diagfwd_peripheral.h
@@ -71,8 +71,8 @@
 	atomic_t opened;
 	unsigned long read_bytes;
 	unsigned long write_bytes;
-	spinlock_t buf_lock;
 	spinlock_t write_buf_lock;
+	struct mutex buf_mutex;
 	struct mutex data_mutex;
 	void *ctxt;
 	struct diagfwd_buf_t *buf_1;
diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c
index 6403abc..af8bf00 100644
--- a/drivers/char/diag/diagfwd_socket.c
+++ b/drivers/char/diag/diagfwd_socket.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -34,14 +34,17 @@
 #include "diagfwd_socket.h"
 #include "diag_ipc_logging.h"
 
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+
 #define DIAG_SVC_ID		0x1001
 
 #define MODEM_INST_BASE		0
 #define LPASS_INST_BASE		64
 #define WCNSS_INST_BASE		128
 #define SENSORS_INST_BASE	192
-#define WDSP_INST_BASE	256
-#define CDSP_INST_BASE  320
+#define CDSP_INST_BASE	256
+#define WDSP_INST_BASE  320
 
 #define INST_ID_CNTL		0
 #define INST_ID_CMD		1
@@ -50,6 +53,7 @@
 #define INST_ID_DCI		4
 
 struct diag_cntl_socket_info *cntl_socket;
+static uint64_t bootup_req[NUM_SOCKET_SUBSYSTEMS];
 
 struct diag_socket_info socket_data[NUM_PERIPHERALS] = {
 	{
@@ -287,13 +291,6 @@
 	spin_unlock_irqrestore(&info->lock, flags);
 	diag_ws_on_notify();
 
-	/*
-	 * Initialize read buffers for the servers. The servers must read data
-	 * first to get the address of its clients.
-	 */
-	if (!atomic_read(&info->opened) && info->port_type == PORT_TYPE_SERVER)
-		diagfwd_buffers_init(info->fwd_ctxt);
-
 	queue_work(info->wq, &(info->read_work));
 	wake_up_interruptible(&info->read_wait_q);
 }
@@ -422,7 +419,7 @@
 		return;
 	}
 	__socket_open_channel(info);
-	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n", info->name);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s opened client\n", info->name);
 }
 
 static void socket_open_server(struct diag_socket_info *info)
@@ -498,6 +495,13 @@
 	if (!atomic_read(&info->opened))
 		return;
 
+	if (bootup_req[info->peripheral] == PEPIPHERAL_SSR_UP) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: %s is up, stopping cleanup: bootup_req = %d\n",
+		info->name, (int)bootup_req[info->peripheral]);
+		return;
+	}
+
 	memset(&info->remote_addr, 0, sizeof(struct sockaddr_msm_ipc));
 	diagfwd_channel_close(info->fwd_ctxt);
 
@@ -614,7 +618,9 @@
 	case CNTL_CMD_REMOVE_CLIENT:
 		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s received remove client\n",
 			 info->name);
+		mutex_lock(&driver->diag_notifier_mutex);
 		socket_close_channel(info);
+		mutex_unlock(&driver->diag_notifier_mutex);
 		break;
 	default:
 		return -EINVAL;
@@ -623,6 +629,25 @@
 	return 0;
 }
 
+static int restart_notifier_cb(struct notifier_block *this,
+				  unsigned long code,
+				  void *data);
+
+struct restart_notifier_block {
+	unsigned int processor;
+	char *name;
+	struct notifier_block nb;
+};
+
+static struct restart_notifier_block restart_notifiers[] = {
+	{SOCKET_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
+	{SOCKET_ADSP, "adsp", .nb.notifier_call = restart_notifier_cb},
+	{SOCKET_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
+	{SOCKET_SLPI, "slpi", .nb.notifier_call = restart_notifier_cb},
+	{SOCKET_CDSP, "cdsp", .nb.notifier_call = restart_notifier_cb},
+};
+
+
 static void cntl_socket_read_work_fn(struct work_struct *work)
 {
 	union cntl_port_msg msg;
@@ -630,7 +655,6 @@
 	struct kvec iov = { 0 };
 	struct msghdr read_msg = { 0 };
 
-
 	if (!cntl_socket)
 		return;
 
@@ -679,6 +703,9 @@
 	if (!info)
 		return;
 
+	if (!atomic_read(&info->opened) && info->port_type == PORT_TYPE_SERVER)
+		diagfwd_buffers_init(info->fwd_ctxt);
+
 	diagfwd_channel_read(info->fwd_ctxt);
 }
 
@@ -847,8 +874,11 @@
 int diag_socket_init(void)
 {
 	int err = 0;
+	int i;
 	int peripheral = 0;
+	void *handle;
 	struct diag_socket_info *info = NULL;
+	struct restart_notifier_block *nb;
 
 	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
 		info = &socket_cntl[peripheral];
@@ -869,6 +899,17 @@
 		goto fail;
 	}
 
+	for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
+		nb = &restart_notifiers[i];
+		if (nb) {
+			handle = subsys_notif_register_notifier(nb->name,
+				&nb->nb);
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"%s: registering notifier for '%s', handle=%p\n",
+			__func__, nb->name, handle);
+		}
+	}
+
 	register_ipcrtr_af_init_notifier(&socket_notify);
 fail:
 	return err;
@@ -904,6 +945,65 @@
 	return 0;
 }
 
+static int restart_notifier_cb(struct notifier_block *this, unsigned long code,
+	void *_cmd)
+{
+	struct restart_notifier_block *notifier;
+
+	notifier = container_of(this,
+			struct restart_notifier_block, nb);
+	if (!notifier) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: %s: invalid notifier block\n", __func__);
+		return NOTIFY_DONE;
+	}
+
+	mutex_lock(&driver->diag_notifier_mutex);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+	"%s: ssr for processor %d ('%s')\n",
+	__func__, notifier->processor, notifier->name);
+
+	switch (code) {
+
+	case SUBSYS_BEFORE_SHUTDOWN:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: %s: SUBSYS_BEFORE_SHUTDOWN\n", __func__);
+		bootup_req[notifier->processor] = PEPIPHERAL_SSR_DOWN;
+		break;
+
+	case SUBSYS_AFTER_SHUTDOWN:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: %s: SUBSYS_AFTER_SHUTDOWN\n", __func__);
+		break;
+
+	case SUBSYS_BEFORE_POWERUP:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: %s: SUBSYS_BEFORE_POWERUP\n", __func__);
+		break;
+
+	case SUBSYS_AFTER_POWERUP:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: %s: SUBSYS_AFTER_POWERUP\n", __func__);
+		if (!bootup_req[notifier->processor]) {
+			bootup_req[notifier->processor] = PEPIPHERAL_SSR_DOWN;
+			break;
+		}
+		bootup_req[notifier->processor] = PEPIPHERAL_SSR_UP;
+		break;
+
+	default:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: code: %lu\n", code);
+		break;
+	}
+	mutex_unlock(&driver->diag_notifier_mutex);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+	"diag: bootup_req[%s] = %d\n",
+	notifier->name, (int)bootup_req[notifier->processor]);
+
+	return NOTIFY_DONE;
+}
+
 int diag_socket_init_peripheral(uint8_t peripheral)
 {
 	struct diag_socket_info *info = NULL;
@@ -986,9 +1086,9 @@
 				      (info->data_ready > 0) || (!info->hdl) ||
 				      (atomic_read(&info->diag_state) == 0));
 	if (err) {
-		mutex_lock(&driver->diagfwd_channel_mutex);
+		mutex_lock(&driver->diagfwd_channel_mutex[info->peripheral]);
 		diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
-		mutex_unlock(&driver->diagfwd_channel_mutex);
+		mutex_unlock(&driver->diagfwd_channel_mutex[info->peripheral]);
 		return -ERESTARTSYS;
 	}
 
@@ -1000,9 +1100,9 @@
 		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
 			 "%s closing read thread. diag state is closed\n",
 			 info->name);
-		mutex_lock(&driver->diagfwd_channel_mutex);
+		mutex_lock(&driver->diagfwd_channel_mutex[info->peripheral]);
 		diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
-		mutex_unlock(&driver->diagfwd_channel_mutex);
+		mutex_unlock(&driver->diagfwd_channel_mutex[info->peripheral]);
 		return 0;
 	}
 
@@ -1069,10 +1169,10 @@
 	if (total_recd > 0) {
 		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s read total bytes: %d\n",
 			 info->name, total_recd);
-		mutex_lock(&driver->diagfwd_channel_mutex);
+		mutex_lock(&driver->diagfwd_channel_mutex[info->peripheral]);
 		err = diagfwd_channel_read_done(info->fwd_ctxt,
 						buf, total_recd);
-		mutex_unlock(&driver->diagfwd_channel_mutex);
+		mutex_unlock(&driver->diagfwd_channel_mutex[info->peripheral]);
 		if (err)
 			goto fail;
 	} else {
@@ -1085,9 +1185,9 @@
 	return 0;
 
 fail:
-	mutex_lock(&driver->diagfwd_channel_mutex);
+	mutex_lock(&driver->diagfwd_channel_mutex[info->peripheral]);
 	diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
-	mutex_unlock(&driver->diagfwd_channel_mutex);
+	mutex_unlock(&driver->diagfwd_channel_mutex[info->peripheral]);
 	return -EIO;
 }
 
diff --git a/drivers/char/diag/diagfwd_socket.h b/drivers/char/diag/diagfwd_socket.h
index a2b922a..a9487b1 100644
--- a/drivers/char/diag/diagfwd_socket.h
+++ b/drivers/char/diag/diagfwd_socket.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,10 +24,24 @@
 #define PORT_TYPE_SERVER		0
 #define PORT_TYPE_CLIENT		1
 
+#define PEPIPHERAL_AFTER_BOOT		0
+#define PEPIPHERAL_SSR_DOWN		1
+#define PEPIPHERAL_SSR_UP		2
+
 #define CNTL_CMD_NEW_SERVER		4
 #define CNTL_CMD_REMOVE_SERVER		5
 #define CNTL_CMD_REMOVE_CLIENT		6
 
+enum {
+	SOCKET_MODEM,
+	SOCKET_ADSP,
+	SOCKET_WCNSS,
+	SOCKET_SLPI,
+	SOCKET_CDSP,
+	SOCKET_APPS,
+	NUM_SOCKET_SUBSYSTEMS,
+};
+
 struct diag_socket_info {
 	uint8_t peripheral;
 	uint8_t type;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6d9cc2d..7e4a9d1 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -60,6 +60,10 @@
 #endif
 
 #ifdef CONFIG_STRICT_DEVMEM
+static inline int page_is_allowed(unsigned long pfn)
+{
+	return devmem_is_allowed(pfn);
+}
 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 {
 	u64 from = ((u64)pfn) << PAGE_SHIFT;
@@ -75,6 +79,10 @@
 	return 1;
 }
 #else
+static inline int page_is_allowed(unsigned long pfn)
+{
+	return 1;
+}
 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 {
 	return 1;
@@ -122,23 +130,31 @@
 
 	while (count > 0) {
 		unsigned long remaining;
+		int allowed;
 
 		sz = size_inside_page(p, count);
 
-		if (!range_is_allowed(p >> PAGE_SHIFT, count))
+		allowed = page_is_allowed(p >> PAGE_SHIFT);
+		if (!allowed)
 			return -EPERM;
+		if (allowed == 2) {
+			/* Show zeros for restricted memory. */
+			remaining = clear_user(buf, sz);
+		} else {
+			/*
+			 * On ia64 if a page has been mapped somewhere as
+			 * uncached, then it must also be accessed uncached
+			 * by the kernel or data corruption may occur.
+			 */
+			ptr = xlate_dev_mem_ptr(p);
+			if (!ptr)
+				return -EFAULT;
 
-		/*
-		 * On ia64 if a page has been mapped somewhere as uncached, then
-		 * it must also be accessed uncached by the kernel or data
-		 * corruption may occur.
-		 */
-		ptr = xlate_dev_mem_ptr(p);
-		if (!ptr)
-			return -EFAULT;
+			remaining = copy_to_user(buf, ptr, sz);
 
-		remaining = copy_to_user(buf, ptr, sz);
-		unxlate_dev_mem_ptr(p, ptr);
+			unxlate_dev_mem_ptr(p, ptr);
+		}
+
 		if (remaining)
 			return -EFAULT;
 
@@ -181,30 +197,36 @@
 #endif
 
 	while (count > 0) {
+		int allowed;
+
 		sz = size_inside_page(p, count);
 
-		if (!range_is_allowed(p >> PAGE_SHIFT, sz))
+		allowed = page_is_allowed(p >> PAGE_SHIFT);
+		if (!allowed)
 			return -EPERM;
 
-		/*
-		 * On ia64 if a page has been mapped somewhere as uncached, then
-		 * it must also be accessed uncached by the kernel or data
-		 * corruption may occur.
-		 */
-		ptr = xlate_dev_mem_ptr(p);
-		if (!ptr) {
-			if (written)
-				break;
-			return -EFAULT;
-		}
+		/* Skip actual writing when a page is marked as restricted. */
+		if (allowed == 1) {
+			/*
+			 * On ia64 if a page has been mapped somewhere as
+			 * uncached, then it must also be accessed uncached
+			 * by the kernel or data corruption may occur.
+			 */
+			ptr = xlate_dev_mem_ptr(p);
+			if (!ptr) {
+				if (written)
+					break;
+				return -EFAULT;
+			}
 
-		copied = copy_from_user(ptr, buf, sz);
-		unxlate_dev_mem_ptr(p, ptr);
-		if (copied) {
-			written += sz - copied;
-			if (written)
-				break;
-			return -EFAULT;
+			copied = copy_from_user(ptr, buf, sz);
+			unxlate_dev_mem_ptr(p, ptr);
+			if (copied) {
+				written += sz - copied;
+				if (written)
+					break;
+				return -EFAULT;
+			}
 		}
 
 		buf += sz;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 5649234..471a301 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1136,6 +1136,8 @@
 {
 	struct port *port;
 	struct scatterlist sg[1];
+	void *data;
+	int ret;
 
 	if (unlikely(early_put_chars))
 		return early_put_chars(vtermno, buf, count);
@@ -1144,8 +1146,14 @@
 	if (!port)
 		return -EPIPE;
 
-	sg_init_one(sg, buf, count);
-	return __send_to_port(port, sg, 1, count, (void *)buf, false);
+	data = kmemdup(buf, count, GFP_ATOMIC);
+	if (!data)
+		return -ENOMEM;
+
+	sg_init_one(sg, data, count);
+	ret = __send_to_port(port, sg, 1, count, data, false);
+	kfree(data);
+	return ret;
 }
 
 /*
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index ece2f00..b248b1b 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -71,6 +71,8 @@
 	bool			orphan;
 	unsigned int		enable_count;
 	unsigned int		prepare_count;
+	bool			need_handoff_enable;
+	bool			need_handoff_prepare;
 	unsigned long		min_rate;
 	unsigned long		max_rate;
 	unsigned long		accuracy;
@@ -997,6 +999,19 @@
 	hlist_for_each_entry(child, &core->children, child_node)
 		clk_unprepare_unused_subtree(child);
 
+	/*
+	 * setting CLK_ENABLE_HAND_OFF flag triggers this conditional
+	 *
+	 * need_handoff_prepare implies this clk was already prepared by
+	 * __clk_init. now we have a proper user, so unset the flag in our
+	 * internal bookkeeping. See CLK_ENABLE_HAND_OFF flag in clk-provider.h
+	 * for details.
+	 */
+	if (core->need_handoff_prepare) {
+		core->need_handoff_prepare = false;
+		clk_core_unprepare(core);
+	}
+
 	if (core->prepare_count)
 		return;
 
@@ -1023,6 +1038,21 @@
 	hlist_for_each_entry(child, &core->children, child_node)
 		clk_disable_unused_subtree(child);
 
+	/*
+	 * setting CLK_ENABLE_HAND_OFF flag triggers this conditional
+	 *
+	 * need_handoff_enable implies this clk was already enabled by
+	 * __clk_init. now we have a proper user, so unset the flag in our
+	 * internal bookkeeping. See CLK_ENABLE_HAND_OFF flag in clk-provider.h
+	 * for details.
+	 */
+	if (core->need_handoff_enable) {
+		core->need_handoff_enable = false;
+		flags = clk_enable_lock();
+		clk_core_disable(core);
+		clk_enable_unlock(flags);
+	}
+
 	if (core->flags & CLK_OPS_PARENT_ENABLE)
 		clk_core_prepare_enable(core->parent);
 
@@ -3140,6 +3170,37 @@
 		clk_enable_unlock(flags);
 	}
 
+	/*
+	 * enable clocks with the CLK_ENABLE_HAND_OFF flag set
+	 *
+	 * This flag causes the framework to enable the clock at registration
+	 * time, which is sometimes necessary for clocks that would cause a
+	 * system crash when gated (e.g. cpu, memory, etc). The prepare_count
+	 * is migrated over to the first clk consumer to call clk_prepare().
+	 * Similarly the clk's enable_count is migrated to the first consumer
+	 * to call clk_enable().
+	 */
+	if (core->flags & CLK_ENABLE_HAND_OFF) {
+		unsigned long flags;
+
+		/*
+		 * Few clocks might have hardware gating which would be
+		 * required to be ON before prepare/enabling the clocks. So
+		 * check if the clock has been turned ON earlier and we should
+		 * prepare/enable those clocks.
+		 */
+		if (clk_core_is_enabled(core)) {
+			core->need_handoff_prepare = true;
+			core->need_handoff_enable = true;
+			ret = clk_core_prepare(core);
+			if (ret)
+				goto out;
+			flags = clk_enable_lock();
+			clk_core_enable(core);
+			clk_enable_unlock(flags);
+		}
+	}
+
 	kref_init(&core->ref);
 out:
 	clk_prepare_unlock();
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 7226dd3..46a3d27 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -225,4 +225,13 @@
 	  sdm845 devices.
 	  Say Y if you want to support graphics controller devices.
 
+config MSM_CLK_AOP_QMP
+	tristate "AOP QMP Clock Driver"
+	depends on COMMON_CLK_QCOM && MSM_QMP
+	help
+	Always On Processor manages few shared clocks on some Qualcomm
+	Technologies, Inc. SoCs. It accepts requests from other hardware
+	subsystems via QMP mailboxes.
+	Say Y to support the clocks managed by AOP on platforms such as sdm845.
+
 source "drivers/clk/qcom/mdss/Kconfig"
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 1d042cd..930e281 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -24,6 +24,7 @@
 obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
 obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
 obj-$(CONFIG_MSM_CAMCC_SDM845) += camcc-sdm845.o
+obj-$(CONFIG_MSM_CLK_AOP_QMP) += clk-aop-qmp.o
 obj-$(CONFIG_MSM_CLK_RPMH) += clk-rpmh.o
 obj-$(CONFIG_MSM_DISPCC_SDM845) += dispcc-sdm845.o
 obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
diff --git a/drivers/clk/qcom/clk-aop-qmp.c b/drivers/clk/qcom/clk-aop-qmp.c
new file mode 100644
index 0000000..f698a55
--- /dev/null
+++ b/drivers/clk/qcom/clk-aop-qmp.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/mailbox_client.h>
+#include <dt-bindings/clock/qcom,aop-qmp.h>
+
+#define MAX_LEN			        96
+#define MBOX_TOUT_MS			1000
+
+struct qmp_pkt {
+	u32 size;
+	void *data;
+};
+
+#define DEFINE_CLK_AOP_QMP(_name, _class, _res, _estate, _dstate)	\
+	static struct clk_aop_qmp _name = {				\
+		.msg.class = #_class,					\
+		.msg.res = #_res,					\
+		.enable_state = _estate,				\
+		.disable_state = _dstate,				\
+		.hw.init = &(struct clk_init_data){			\
+			.ops = &aop_qmp_clk_ops,			\
+			.name = #_name,					\
+			.num_parents = 0,				\
+			.flags = CLK_ENABLE_HAND_OFF,			\
+		},							\
+	}
+
+#define to_aop_qmp_clk(hw) container_of(hw, struct clk_aop_qmp, hw)
+
+/*
+ * struct qmp_mbox_msg -  mailbox data to QMP
+ * @class:	identifies the class.
+ * @res:	identifies the resource in the class
+ * @level:	identifies the level for the resource.
+ */
+struct qmp_mbox_msg {
+	char class[MAX_LEN];
+	char res[MAX_LEN];
+	int level;
+};
+
+/*
+ * struct clk_aop_qmp -  AOP clock
+ * @dev:		The device that corresponds to this clock.
+ * @hw:			The clock hardware for this clock.
+ * @cl:			The client mailbox for this clock.
+ * @mbox:		The mbox controller for this clock.
+ * @level:		The clock level for this clock.
+ * @enable_state:	The clock state when this clock is prepared.
+ * @disable_state:	The clock state when this clock is unprepared.
+ * @msg:		QMP data associated with this clock.
+ * @enabled:		Status of the clock enable.
+ */
+struct clk_aop_qmp {
+	struct device *dev;
+	struct clk_hw hw;
+	struct mbox_client cl;
+	struct mbox_chan *mbox;
+	int level;
+	int enable_state;
+	int disable_state;
+	struct qmp_mbox_msg msg;
+	bool enabled;
+};
+
+static DEFINE_MUTEX(clk_aop_lock);
+
+static unsigned long clk_aop_qmp_recalc_rate(struct clk_hw *hw,
+						unsigned long parent_rate)
+{
+	struct clk_aop_qmp *clk = to_aop_qmp_clk(hw);
+
+	return clk->level;
+}
+
+static long clk_aop_qmp_round_rate(struct clk_hw *hw, unsigned long rate,
+						unsigned long *parent_rate)
+{
+	return rate;
+}
+
+static int clk_aop_qmp_set_rate(struct clk_hw *hw, unsigned long rate,
+						unsigned long parent_rate)
+{
+	char mbox_msg[MAX_LEN];
+	struct qmp_pkt pkt;
+	struct clk_aop_qmp *clk = to_aop_qmp_clk(hw);
+	int ret = 0;
+
+	mutex_lock(&clk_aop_lock);
+
+	snprintf(mbox_msg, MAX_LEN, "{class: %s, res: %s, val: %ld}",
+					clk->msg.class, clk->msg.res, rate);
+	pkt.size = MAX_LEN;
+	pkt.data = mbox_msg;
+
+	ret = mbox_send_message(clk->mbox, &pkt);
+	if (ret < 0) {
+		pr_err("Failed to send set rate request of %lu for %s, ret %d\n",
+					rate, clk_hw_get_name(hw), ret);
+		goto err;
+	} else
+		/* Success: update the return value */
+		ret = 0;
+
+	/* update the current clock level once the mailbox message is sent */
+	clk->level = rate;
+err:
+	mutex_unlock(&clk_aop_lock);
+
+	return ret;
+}
+
+static int clk_aop_qmp_prepare(struct clk_hw *hw)
+{
+	char mbox_msg[MAX_LEN];
+	unsigned long rate;
+	int ret = 0;
+	struct qmp_pkt pkt;
+	struct clk_aop_qmp *clk = to_aop_qmp_clk(hw);
+
+	mutex_lock(&clk_aop_lock);
+
+	if (clk->level)
+		rate = clk->level;
+	else
+		rate = clk->enable_state;
+
+	snprintf(mbox_msg, MAX_LEN, "{class: %s, res: %s, val: %ld}",
+				clk->msg.class, clk->msg.res, rate);
+	pkt.size = MAX_LEN;
+	pkt.data = mbox_msg;
+
+	ret = mbox_send_message(clk->mbox, &pkt);
+	if (ret < 0) {
+		pr_err("Failed to send clk prepare request for %s, ret %d\n",
+					clk_hw_get_name(hw), ret);
+		goto err;
+	} else
+		/* Success: update the return value */
+		ret = 0;
+
+	/* update the current clock level once the mailbox message is sent */
+	clk->level = rate;
+
+	clk->enabled = true;
+err:
+	mutex_unlock(&clk_aop_lock);
+
+	return ret;
+}
+
+static void clk_aop_qmp_unprepare(struct clk_hw *hw)
+{
+	char mbox_msg[MAX_LEN];
+	unsigned long rate;
+	int ret = 0;
+	struct qmp_pkt pkt;
+	struct clk_aop_qmp *clk = to_aop_qmp_clk(hw);
+
+	mutex_lock(&clk_aop_lock);
+
+	rate = clk->disable_state;
+
+	snprintf(mbox_msg, MAX_LEN, "{class: %s, res: %s, val: %ld}",
+				clk->msg.class, clk->msg.res, rate);
+	pkt.size = MAX_LEN;
+	pkt.data = mbox_msg;
+
+	ret = mbox_send_message(clk->mbox, &pkt);
+	if (ret < 0) {
+		pr_err("Failed to send clk unprepare request for %s, ret %d\n",
+					clk_hw_get_name(hw), ret);
+		goto err;
+	}
+
+	clk->enabled = false;
+err:
+	mutex_unlock(&clk_aop_lock);
+}
+
+static int clk_aop_qmp_is_enabled(struct clk_hw *hw)
+{
+	struct clk_aop_qmp *clk = to_aop_qmp_clk(hw);
+
+	return clk->enabled;
+}
+
+static const struct clk_ops aop_qmp_clk_ops = {
+	.prepare	= clk_aop_qmp_prepare,
+	.unprepare	= clk_aop_qmp_unprepare,
+	.recalc_rate	= clk_aop_qmp_recalc_rate,
+	.set_rate	= clk_aop_qmp_set_rate,
+	.round_rate	= clk_aop_qmp_round_rate,
+	.is_enabled	= clk_aop_qmp_is_enabled,
+};
+
+DEFINE_CLK_AOP_QMP(qdss_qmp_clk, clock, qdss,
+		QDSS_CLK_LEVEL_DYNAMIC, QDSS_CLK_LEVEL_OFF);
+
+static struct clk_hw *aop_qmp_clk_hws[] = {
+	[QDSS_CLK] = &qdss_qmp_clk.hw,
+};
+
+static int qmp_update_client(struct clk_hw *hw, struct device *dev,
+		struct mbox_chan *mbox)
+{
+	struct clk_aop_qmp *clk_aop = to_aop_qmp_clk(hw);
+
+	/* Use mailbox client with blocking mode */
+	clk_aop->cl.dev = dev;
+	clk_aop->cl.tx_block = true;
+	clk_aop->cl.tx_tout = MBOX_TOUT_MS;
+	clk_aop->cl.knows_txdone = false;
+
+	if (mbox) {
+		clk_aop->mbox = mbox;
+		return 0;
+	}
+
+	/* Allocate mailbox channel */
+	mbox = clk_aop->mbox = mbox_request_channel(&clk_aop->cl, 0);
+	if (IS_ERR(clk_aop->mbox) && PTR_ERR(clk_aop->mbox) != -EPROBE_DEFER) {
+		dev_err(dev, "Failed to get mailbox channel %pK %ld\n",
+						mbox, PTR_ERR(mbox));
+		return PTR_ERR(clk_aop->mbox);
+	}
+
+	return 0;
+}
+
+static int aop_qmp_clk_probe(struct platform_device *pdev)
+{
+	struct clk *clk;
+	struct device_node *np = pdev->dev.of_node;
+	struct mbox_chan *mbox = NULL;
+	int num_clks = ARRAY_SIZE(aop_qmp_clk_hws);
+	int ret = 0, i = 0;
+
+	/*
+	 * Allocate mbox channel for the first clock client. The same channel
+	 * would be used for the rest of the clock clients.
+	 */
+	ret = qmp_update_client(aop_qmp_clk_hws[i], &pdev->dev, mbox);
+	if (ret < 0)
+		return ret;
+
+	for (i = 1; i < num_clks; i++) {
+		ret = qmp_update_client(aop_qmp_clk_hws[i], &pdev->dev, mbox);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "Failed to update QMP client %d\n",
+							ret);
+			goto fail;
+		}
+	}
+
+	for (i = 0; i < num_clks; i++) {
+		ret = clk_aop_qmp_prepare(aop_qmp_clk_hws[i]);
+		if (ret < 0)
+			goto fail;
+	}
+
+	for (i = 0; i < num_clks; i++) {
+		clk = devm_clk_register(&pdev->dev, aop_qmp_clk_hws[i]);
+		if (IS_ERR(clk)) {
+			ret = PTR_ERR(clk);
+			goto fail;
+		}
+	}
+
+	ret = of_clk_add_provider(np, of_clk_src_simple_get, clk);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register clock provider\n");
+		goto fail;
+	}
+
+	dev_info(&pdev->dev, "Registered clocks with AOP\n");
+
+	return ret;
+fail:
+	mbox_free_channel(mbox);
+
+	return ret;
+}
+
+static const struct of_device_id aop_qmp_clk_of_match[] = {
+	{ .compatible = "qcom,aop-qmp-clk", },
+	{}
+};
+
+static struct platform_driver aop_qmp_clk_driver = {
+	.driver = {
+		.name = "qmp-aop-clk",
+		.of_match_table = aop_qmp_clk_of_match,
+	},
+	.probe = aop_qmp_clk_probe,
+};
+
+static int __init aop_qmp_clk_init(void)
+{
+	return platform_driver_register(&aop_qmp_clk_driver);
+}
+subsys_initcall(aop_qmp_clk_init);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 66e604e..b315236 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2480,6 +2480,20 @@
  *********************************************************************/
 static enum cpuhp_state hp_online;
 
+static int cpuhp_cpufreq_online(unsigned int cpu)
+{
+	cpufreq_online(cpu);
+
+	return 0;
+}
+
+static int cpuhp_cpufreq_offline(unsigned int cpu)
+{
+	cpufreq_offline(cpu);
+
+	return 0;
+}
+
 /**
  * cpufreq_register_driver - register a CPU Frequency driver
  * @driver_data: A struct cpufreq_driver containing the values#
@@ -2542,8 +2556,8 @@
 	}
 
 	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
-					cpufreq_online,
-					cpufreq_offline);
+					cpuhp_cpufreq_online,
+					cpuhp_cpufreq_offline);
 	if (ret < 0)
 		goto err_if_unreg;
 	hp_online = ret;
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
index a4a1cfb..cd4fdfb 100644
--- a/drivers/devfreq/bimc-bwmon.c
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -65,7 +65,7 @@
 	void __iomem *base;
 	void __iomem *global_base;
 	unsigned int mport;
-	unsigned int irq;
+	int irq;
 	const struct bwmon_spec *spec;
 	struct device *dev;
 	struct bw_hwmon hw;
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index 932742e..24c461d 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -149,7 +149,8 @@
 
 		status = __gop_query32(sys_table_arg, gop32, &info, &size,
 				       &current_fb_base);
-		if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+		if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+		    info->pixel_format != PIXEL_BLT_ONLY) {
 			/*
 			 * Systems that use the UEFI Console Splitter may
 			 * provide multiple GOP devices, not all of which are
@@ -266,7 +267,8 @@
 
 		status = __gop_query64(sys_table_arg, gop64, &info, &size,
 				       &current_fb_base);
-		if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+		if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+		    info->pixel_format != PIXEL_BLT_ONLY) {
 			/*
 			 * Systems that use the UEFI Console Splitter may
 			 * provide multiple GOP devices, not all of which are
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index b87d278..a336754 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1305,7 +1305,7 @@
 	if (!fence) {
 		event_free(gpu, event);
 		ret = -ENOMEM;
-		goto out_pm_put;
+		goto out_unlock;
 	}
 
 	gpu->event[event].fence = fence;
@@ -1345,6 +1345,7 @@
 	hangcheck_timer_reset(gpu);
 	ret = 0;
 
+out_unlock:
 	mutex_unlock(&gpu->lock);
 
 out_pm_put:
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 6e793d9..6c2d643 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -78,7 +78,8 @@
 	.get_brightness = sde_backlight_device_get_brightness,
 };
 
-static int sde_backlight_setup(struct sde_connector *c_conn)
+static int sde_backlight_setup(struct sde_connector *c_conn,
+					struct drm_device *dev)
 {
 	struct backlight_device *bl_device;
 	struct backlight_properties props;
@@ -87,7 +88,7 @@
 	static int display_count;
 	char bl_node_name[BL_NODE_NAME_SIZE];
 
-	if (!c_conn) {
+	if (!c_conn || !dev || !dev->dev) {
 		SDE_ERROR("invalid param\n");
 		return -EINVAL;
 	} else if (c_conn->connector_type != DRM_MODE_CONNECTOR_DSI) {
@@ -104,7 +105,7 @@
 	props.brightness = bl_config->brightness_max_level;
 	snprintf(bl_node_name, BL_NODE_NAME_SIZE, "panel%u-backlight",
 							display_count);
-	bl_device = backlight_device_register(bl_node_name, c_conn->base.kdev,
+	bl_device = backlight_device_register(bl_node_name, dev->dev,
 			c_conn, &sde_backlight_device_ops, &props);
 	if (IS_ERR_OR_NULL(bl_device)) {
 		SDE_ERROR("Failed to register backlight: %ld\n",
@@ -779,7 +780,7 @@
 		goto error_cleanup_fence;
 	}
 
-	rc = sde_backlight_setup(c_conn);
+	rc = sde_backlight_setup(c_conn, dev);
 	if (rc) {
 		SDE_ERROR("failed to setup backlight, rc=%d\n", rc);
 		goto error_cleanup_fence;
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index e7a867f..a44dd68 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -1459,7 +1459,7 @@
 				continue;
 
 			cstate->rsc_client =
-				sde_encoder_update_rsc_client(encoder, true);
+				sde_encoder_get_rsc_client(encoder);
 		}
 		cstate->rsc_update = true;
 	}
@@ -1796,7 +1796,6 @@
 		if (encoder->crtc != crtc)
 			continue;
 		sde_encoder_register_frame_event_callback(encoder, NULL, NULL);
-		sde_encoder_update_rsc_client(encoder, false);
 		cstate->rsc_client = NULL;
 		cstate->rsc_update = false;
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 67fb783..8c41b12 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -656,6 +656,71 @@
 	return ret;
 }
 
+static int sde_encoder_update_rsc_client(
+		struct drm_encoder *drm_enc, bool enable)
+{
+	struct sde_encoder_virt *sde_enc;
+	enum sde_rsc_state rsc_state;
+	struct sde_rsc_cmd_config rsc_config;
+	int ret;
+	struct msm_display_info *disp_info;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	disp_info = &sde_enc->disp_info;
+
+	/**
+	 * only primary command mode panel can request CMD state.
+	 * all other panels/displays can request for VID state including
+	 * secondary command mode panel.
+	 */
+	rsc_state = enable ?
+		(((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) &&
+		  disp_info->is_primary) ? SDE_RSC_CMD_STATE :
+		SDE_RSC_VID_STATE) : SDE_RSC_IDLE_STATE;
+
+	if (rsc_state != SDE_RSC_IDLE_STATE && !sde_enc->rsc_state_update
+					&& disp_info->is_primary) {
+		rsc_config.fps = disp_info->frame_rate;
+		rsc_config.vtotal = disp_info->vtotal;
+		rsc_config.prefill_lines = disp_info->prefill_lines;
+		rsc_config.jitter = disp_info->jitter;
+		/* update it only once */
+		sde_enc->rsc_state_update = true;
+
+		ret = sde_rsc_client_state_update(sde_enc->rsc_client,
+			rsc_state, &rsc_config,
+			drm_enc->crtc ? drm_enc->crtc->index : -1);
+	} else {
+		ret = sde_rsc_client_state_update(sde_enc->rsc_client,
+			rsc_state, NULL,
+			drm_enc->crtc ? drm_enc->crtc->index : -1);
+	}
+
+	if (ret)
+		SDE_ERROR("sde rsc client update failed ret:%d\n", ret);
+
+	return ret;
+}
+
+struct sde_rsc_client *sde_encoder_get_rsc_client(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct msm_display_info *disp_info;
+
+	if (!drm_enc)
+		return NULL;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	disp_info = &sde_enc->disp_info;
+
+	return disp_info->is_primary ? sde_enc->rsc_client : NULL;
+}
+
 static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 				      struct drm_display_mode *mode,
 				      struct drm_display_mode *adj_mode)
@@ -778,6 +843,8 @@
 		}
 	}
 
+	sde_encoder_update_rsc_client(drm_enc, true);
+
 	if (!sde_enc->cur_master)
 		SDE_ERROR("virt encoder has no master! num_phys %d\n", i);
 	else if (sde_enc->cur_master->ops.enable)
@@ -833,6 +900,8 @@
 		del_timer_sync(&sde_enc->frame_done_timer);
 	}
 
+	sde_encoder_update_rsc_client(drm_enc, false);
+
 	if (sde_enc->cur_master && sde_enc->cur_master->ops.disable)
 		sde_enc->cur_master->ops.disable(sde_enc->cur_master);
 
@@ -927,57 +996,6 @@
 	}
 }
 
-struct sde_rsc_client *sde_encoder_update_rsc_client(
-		struct drm_encoder *drm_enc, bool enable)
-{
-	struct sde_encoder_virt *sde_enc;
-	enum sde_rsc_state rsc_state;
-	struct sde_rsc_cmd_config rsc_config;
-	int ret;
-	struct msm_display_info *disp_info;
-
-	if (!drm_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return NULL;
-	}
-
-	sde_enc = to_sde_encoder_virt(drm_enc);
-	disp_info = &sde_enc->disp_info;
-
-	/**
-	 * only primary command mode panel can request CMD state.
-	 * all other panels/displays can request for VID state including
-	 * secondary command mode panel.
-	 */
-	rsc_state = enable ?
-		(((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) &&
-		  disp_info->is_primary) ? SDE_RSC_CMD_STATE :
-		SDE_RSC_VID_STATE) : SDE_RSC_IDLE_STATE;
-
-	if (rsc_state != SDE_RSC_IDLE_STATE && !sde_enc->rsc_state_update
-					&& disp_info->is_primary) {
-		rsc_config.fps = disp_info->frame_rate;
-		rsc_config.vtotal = disp_info->vtotal;
-		rsc_config.prefill_lines = disp_info->prefill_lines;
-		rsc_config.jitter = disp_info->jitter;
-		/* update it only once */
-		sde_enc->rsc_state_update = true;
-
-		ret = sde_rsc_client_state_update(sde_enc->rsc_client,
-			rsc_state, &rsc_config,
-			drm_enc->crtc ? drm_enc->crtc->index : -1);
-	} else {
-		ret = sde_rsc_client_state_update(sde_enc->rsc_client,
-			rsc_state, NULL,
-			drm_enc->crtc ? drm_enc->crtc->index : -1);
-	}
-
-	if (ret)
-		SDE_ERROR("sde rsc client update failed ret:%d\n", ret);
-
-	return sde_enc->disp_info.is_primary ? sde_enc->rsc_client : NULL;
-}
-
 void sde_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
 		void (*frame_event_cb)(void *, u32 event),
 		void *frame_event_cb_data)
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index cdecd08..5795e04 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -83,13 +83,11 @@
 		void (*cb)(void *, u32), void *data);
 
 /**
- * sde_encoder_update_rsc_client - updates the rsc client state for primary
+ * sde_encoder_get_rsc_client - gets the rsc client state for primary
  *      for primary display.
  * @encoder:	encoder pointer
- * @enable:	enable/disable the client
  */
-struct sde_rsc_client *sde_encoder_update_rsc_client(
-		struct drm_encoder *encoder, bool enable);
+struct sde_rsc_client *sde_encoder_get_rsc_client(struct drm_encoder *encoder);
 
 /**
  * sde_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_hwio.h b/drivers/gpu/drm/msm/sde/sde_hw_hwio.h
deleted file mode 100644
index e69de29..0000000
--- a/drivers/gpu/drm/msm/sde/sde_hw_hwio.h
+++ /dev/null
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 98c59c3..2393e61 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -348,9 +348,16 @@
 static void sde_kms_prepare_commit(struct msm_kms *kms,
 		struct drm_atomic_state *state)
 {
-	struct sde_kms *sde_kms = to_sde_kms(kms);
-	struct drm_device *dev = sde_kms->dev;
-	struct msm_drm_private *priv = dev->dev_private;
+	struct sde_kms *sde_kms;
+	struct msm_drm_private *priv;
+
+	if (!kms)
+		return;
+	sde_kms = to_sde_kms(kms);
+
+	if (!sde_kms->dev || !sde_kms->dev->dev_private)
+		return;
+	priv = sde_kms->dev->dev_private;
 
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
 }
@@ -373,13 +380,20 @@
 static void sde_kms_complete_commit(struct msm_kms *kms,
 		struct drm_atomic_state *old_state)
 {
-	struct sde_kms *sde_kms = to_sde_kms(kms);
-	struct drm_device *dev = sde_kms->dev;
-	struct msm_drm_private *priv = dev->dev_private;
+	struct sde_kms *sde_kms;
+	struct msm_drm_private *priv;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *old_crtc_state;
 	int i;
 
+	if (!kms || !old_state)
+		return;
+	sde_kms = to_sde_kms(kms);
+
+	if (!sde_kms->dev || !sde_kms->dev->dev_private)
+		return;
+	priv = sde_kms->dev->dev_private;
+
 	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
 		sde_crtc_complete_commit(crtc, old_crtc_state);
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
@@ -1515,7 +1529,7 @@
 	}
 
 	rc = sde_core_perf_init(&sde_kms->perf, dev, sde_kms->catalog,
-			&priv->phandle, priv->pclient, "core_clk_src");
+			&priv->phandle, priv->pclient, "core_clk");
 	if (rc) {
 		SDE_ERROR("failed to init perf %d\n", rc);
 		goto perf_err;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index e0d7f84..d741ff8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -714,7 +714,7 @@
 	.i2c = nv04_i2c_new,
 	.imem = nv40_instmem_new,
 	.mc = nv44_mc_new,
-	.mmu = nv44_mmu_new,
+	.mmu = nv04_mmu_new,
 	.pci = nv40_pci_new,
 	.therm = nv40_therm_new,
 	.timer = nv41_timer_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index fbb8c7d..0d65e7f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -433,8 +433,6 @@
 	case 0x94:
 	case 0x96:
 	case 0x98:
-	case 0xaa:
-	case 0xac:
 		return true;
 	default:
 		break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index 003ac91..8a88952 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -198,7 +198,7 @@
 		}
 
 		if (type == 0x00000010) {
-			if (!nv31_mpeg_mthd(mpeg, mthd, data))
+			if (nv31_mpeg_mthd(mpeg, mthd, data))
 				show &= ~0x01000000;
 		}
 	}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index e536f37..c3cf02e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -172,7 +172,7 @@
 		}
 
 		if (type == 0x00000010) {
-			if (!nv44_mpeg_mthd(subdev->device, mthd, data))
+			if (nv44_mpeg_mthd(subdev->device, mthd, data))
 				show &= ~0x01000000;
 		}
 	}
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index fddfb2c..28d93a9 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -718,9 +718,9 @@
 #define A6XX_GMU_GENERAL_7			0x1F9CC
 
 #define A6XX_GMU_AO_INTERRUPT_EN		0x23B03
-#define A6XX_GMU_HOST_INTERRUPT_CLR		0x23B04
-#define A6XX_GMU_HOST_INTERRUPT_STATUS		0x23B05
-#define A6XX_GMU_HOST_INTERRUPT_MASK		0x23B06
+#define A6XX_GMU_AO_HOST_INTERRUPT_CLR		0x23B04
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS	0x23B05
+#define A6XX_GMU_AO_HOST_INTERRUPT_MASK		0x23B06
 #define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS	0x23B0C
 #define A6XX_GMU_AHB_FENCE_STATUS		0x23B13
 #define A6XX_GMU_RBBM_INT_UNMASKED_STATUS	0x23B15
@@ -764,7 +764,6 @@
 #define PDC_GPU_TCS1_CMD0_MSGID			0x21575
 #define PDC_GPU_TCS1_CMD0_ADDR			0x21576
 #define PDC_GPU_TCS1_CMD0_DATA			0x21577
-#define PDC_GPU_TIMESTAMP_UNIT1_EN_DRV0		0x23489
 #define PDC_GPU_SEQ_MEM_0			0xA0000
 
 #endif /* _A6XX_REG_H */
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 75d5587..530529f 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -618,9 +618,9 @@
 	ADRENO_REG_VBIF_XIN_HALT_CTRL1,
 	ADRENO_REG_VBIF_VERSION,
 	ADRENO_REG_GMU_AO_INTERRUPT_EN,
-	ADRENO_REG_GMU_HOST_INTERRUPT_CLR,
-	ADRENO_REG_GMU_HOST_INTERRUPT_STATUS,
-	ADRENO_REG_GMU_HOST_INTERRUPT_MASK,
+	ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
+	ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS,
+	ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
 	ADRENO_REG_GMU_PWR_COL_KEEPALIVE,
 	ADRENO_REG_GMU_AHB_FENCE_STATUS,
 	ADRENO_REG_GMU_RPMH_POWER_STATE,
@@ -629,6 +629,7 @@
 	ADRENO_REG_GMU_HFI_SFR_ADDR,
 	ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
 	ADRENO_REG_GMU_GMU2HOST_INTR_INFO,
+	ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
 	ADRENO_REG_GMU_HOST2GMU_INTR_SET,
 	ADRENO_REG_GMU_HOST2GMU_INTR_CLR,
 	ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 944faa3..6609357 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -504,15 +504,6 @@
 	__raw_writel(value, reg);
 }
 
-static void _gmu_regrmw(struct kgsl_device *device,
-		unsigned int offsetwords, unsigned int mask)
-{
-	unsigned int value;
-
-	kgsl_gmu_regread(device, offsetwords, &value);
-	kgsl_gmu_regwrite(device, offsetwords, value | mask);
-}
-
 /*
  * _load_gmu_rpmh_ucode() - Load the ucode into the GPU PDC/RSC blocks
  * PDC and RSC execute GPU power on/off RPMh sequence
@@ -651,22 +642,25 @@
 	/* Configure registers for idle setting. The setting is cumulative */
 	switch (gmu->idle_level) {
 	case GPU_HW_MIN_VOLT:
-		_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, MIN_BW_ENABLE_MASK);
-		_gmu_regrmw(device, A6XX_GMU_RPMH_HYST_CTRL, MIN_BW_HYST);
+		kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, 0,
+				MIN_BW_ENABLE_MASK);
+		kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_HYST_CTRL, 0,
+				MIN_BW_HYST);
 		/* fall through */
 	case GPU_HW_NAP:
-		_gmu_regrmw(device, A6XX_GMU_GPU_NAP_CTRL, HW_NAP_ENABLE_MASK);
+		kgsl_gmu_regrmw(device, A6XX_GMU_GPU_NAP_CTRL, 0,
+				HW_NAP_ENABLE_MASK);
 		/* fall through */
 	case GPU_HW_IFPC:
 		kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
 				0x000A0080);
-		_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL,
+		kgsl_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
 				IFPC_ENABLE_MASK);
 		/* fall through */
 	case GPU_HW_SPTP_PC:
 		kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_SPTPRAC_HYST,
 				0x000A0080);
-		_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL,
+		kgsl_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
 				SPTP_ENABLE_MASK);
 		/* fall through */
 	default:
@@ -675,11 +669,13 @@
 
 	/* ACD feature enablement */
 	if (ADRENO_FEATURE(adreno_dev, ADRENO_LM))
-		_gmu_regrmw(device, A6XX_GMU_BOOT_KMD_LM_HANDSHAKE, BIT(10));
+		kgsl_gmu_regrmw(device, A6XX_GMU_BOOT_KMD_LM_HANDSHAKE, 0,
+				BIT(10));
 
 	/* Enable RPMh GPU client */
 	if (ADRENO_FEATURE(adreno_dev, ADRENO_RPMH))
-		_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, RPMH_ENABLE_MASK);
+		kgsl_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, 0,
+				RPMH_ENABLE_MASK);
 
 	/* Disable reference bandgap voltage */
 	kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 1);
@@ -721,9 +717,8 @@
 {
 	struct gmu_device *gmu = &device->gmu;
 
-	kgsl_gmu_regwrite(device, A6XX_GMU_GMU2HOST_INTR_MASK,
-			(HFI_IRQ_MASK & (~HFI_IRQ_MSGQ_MASK)));
-
+	kgsl_gmu_regrmw(device, A6XX_GMU_GMU2HOST_INTR_MASK,
+			HFI_IRQ_MSGQ_MASK, 0);
 	kgsl_gmu_regwrite(device, A6XX_GMU_HFI_CTRL_INIT, 1);
 
 	if (timed_poll_check(device,
@@ -1092,7 +1087,7 @@
 	ret = a6xx_hm_sptprac_control(device, false);
 
 	/* RSC sleep sequence */
-	_regwrite(gmu->pdc_reg_virt, PDC_GPU_TIMESTAMP_UNIT1_EN_DRV0, 1);
+	kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
 	kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 1);
 	wmb();
 
@@ -2053,12 +2048,12 @@
 				A6XX_GMU_ALWAYS_ON_COUNTER_H),
 	ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_INTERRUPT_EN,
 				A6XX_GMU_AO_INTERRUPT_EN),
-	ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST_INTERRUPT_CLR,
-				A6XX_GMU_HOST_INTERRUPT_CLR),
-	ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST_INTERRUPT_STATUS,
-				A6XX_GMU_HOST_INTERRUPT_STATUS),
-	ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST_INTERRUPT_MASK,
-				A6XX_GMU_HOST_INTERRUPT_MASK),
+	ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
+				A6XX_GMU_AO_HOST_INTERRUPT_CLR),
+	ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS,
+				A6XX_GMU_AO_HOST_INTERRUPT_STATUS),
+	ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
+				A6XX_GMU_AO_HOST_INTERRUPT_MASK),
 	ADRENO_REG_DEFINE(ADRENO_REG_GMU_PWR_COL_KEEPALIVE,
 				A6XX_GMU_GMU_PWR_COL_KEEPALIVE),
 	ADRENO_REG_DEFINE(ADRENO_REG_GMU_AHB_FENCE_STATUS,
@@ -2075,6 +2070,8 @@
 				A6XX_GMU_GMU2HOST_INTR_CLR),
 	ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_INFO,
 				A6XX_GMU_GMU2HOST_INTR_INFO),
+	ADRENO_REG_DEFINE(ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
+				A6XX_GMU_GMU2HOST_INTR_MASK),
 	ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_SET,
 				A6XX_GMU_HOST2GMU_INTR_SET),
 	ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_CLR,
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 15157b76..ba83cd7 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -221,12 +221,12 @@
 
 static const unsigned int a6xx_registers[] = {
 	/* RBBM */
-	0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0014, 0x0014,
-	0x0018, 0x001B, 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042,
-	0x0044, 0x0044, 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE,
-	0x00B0, 0x00FB, 0x0100, 0x011D, 0x0200, 0x020D, 0x0210, 0x0213,
-	0x0218, 0x023D, 0x0400, 0x04F9, 0x0500, 0x0500, 0x0505, 0x050B,
-	0x050E, 0x0511, 0x0533, 0x0533, 0x0540, 0x0555,
+	0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B,
+	0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044,
+	0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB,
+	0x0100, 0x011D, 0x0200, 0x020D, 0x0210, 0x0213, 0x0218, 0x023D,
+	0x0400, 0x04F9, 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511,
+	0x0533, 0x0533, 0x0540, 0x0555,
 	/* CP */
 	0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0827,
 	0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F, 0x0880, 0x088A,
@@ -240,8 +240,8 @@
 	0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
 	0x0E38, 0x0E39,
 	/* GRAS */
-	0x8600, 0x8601, 0x8604, 0x8605, 0x8610, 0x861B, 0x8620, 0x8620,
-	0x8628, 0x862B, 0x8630, 0x8637,
+	0x8600, 0x8601, 0x8610, 0x861B, 0x8620, 0x8620, 0x8628, 0x862B,
+	0x8630, 0x8637,
 	/* RB */
 	0x8E01, 0x8E01, 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E0C, 0x8E0C,
 	0x8E10, 0x8E1C, 0x8E20, 0x8E25, 0x8E28, 0x8E28, 0x8E2C, 0x8E2F,
@@ -254,7 +254,7 @@
 	0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
 	/* VFD */
 	0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
-	0xA630, 0xA630, 0xD200, 0xD263,
+	0xA630, 0xA630,
 };
 
 enum a6xx_debugbus_id {
@@ -583,8 +583,9 @@
 				struct kgsl_snapshot *snapshot)
 {
 	unsigned int pool_size;
+	u8 *buf = snapshot->ptr;
 
-	/* Save the mempool size to 0 to stabilize it while dumping */
+	/* Set the mempool size to 0 to stabilize it while dumping */
 	kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
 	kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
 
@@ -592,6 +593,22 @@
 		A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
 		0, 0x2060);
 
+	/*
+	 * Data at offset 0x2000 in the mempool section is the mempool size.
+	 * Since we set it to 0, patch in the original size so that the data
+	 * is consistent.
+	 */
+	if (buf < snapshot->ptr) {
+		unsigned int *data;
+
+		/* Skip over the headers */
+		buf += sizeof(struct kgsl_snapshot_section_header) +
+				sizeof(struct kgsl_snapshot_indexed_regs);
+
+		data = (unsigned int *)buf + 0x2000;
+		*data = pool_size;
+	}
+
 	/* Restore the saved mempool size */
 	kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
 }
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index bf31c00..d955aa0 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -569,6 +569,17 @@
 	device->ftbl->regwrite(device, offsetwords, val | bits);
 }
 
+static inline void kgsl_gmu_regrmw(struct kgsl_device *device,
+		unsigned int offsetwords,
+		unsigned int mask, unsigned int bits)
+{
+	unsigned int val = 0;
+
+	kgsl_gmu_regread(device, offsetwords, &val);
+	val &= ~mask;
+	kgsl_gmu_regwrite(device, offsetwords, val | bits);
+}
+
 static inline int kgsl_idle(struct kgsl_device *device)
 {
 	return device->ftbl->idle(device);
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 17c10eb..416085f 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -748,44 +748,49 @@
 {
 	struct gmu_device *gmu = data;
 	struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
-	struct kgsl_hfi *hfi = &gmu->hfi;
 	unsigned int status = 0;
 
-	if (irq == gmu->gmu_interrupt_num) {
-		adreno_read_gmureg(ADRENO_DEVICE(device),
-				ADRENO_REG_GMU_HOST_INTERRUPT_STATUS,
-				&status);
+	adreno_read_gmureg(ADRENO_DEVICE(device),
+			ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS, &status);
+	adreno_write_gmureg(ADRENO_DEVICE(device),
+			ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR, status);
 
-		/* Ignore GMU_INT_RSCC_COMP interrupts */
-		if (status & GMU_INT_WDOG_BITE)
-			dev_err_ratelimited(&gmu->pdev->dev,
-					"GMU watchdog expired interrupt\n");
-		if (status & GMU_INT_DBD_WAKEUP)
-			dev_err_ratelimited(&gmu->pdev->dev,
-					"GMU doorbell interrupt received\n");
-		if (status & GMU_INT_HOST_AHB_BUS_ERR)
-			dev_err_ratelimited(&gmu->pdev->dev,
-					"AHB bus error interrupt received\n");
+	/* Ignore GMU_INT_RSCC_COMP and GMU_INT_DBD WAKEUP interrupts */
+	if (status & GMU_INT_WDOG_BITE)
+		dev_err_ratelimited(&gmu->pdev->dev,
+				"GMU watchdog expired interrupt received\n");
+	if (status & GMU_INT_HOST_AHB_BUS_ERR)
+		dev_err_ratelimited(&gmu->pdev->dev,
+				"AHB bus error interrupt received\n");
+	if (status & ~GMU_AO_INT_MASK)
+		dev_err_ratelimited(&gmu->pdev->dev,
+				"Unhandled GMU interrupts 0x%lx\n",
+				status & ~GMU_AO_INT_MASK);
 
-		adreno_write_gmureg(ADRENO_DEVICE(device),
-				ADRENO_REG_GMU_HOST_INTERRUPT_CLR,
-				status);
-	} else {
-		adreno_read_gmureg(ADRENO_DEVICE(device),
-				ADRENO_REG_GMU_GMU2HOST_INTR_INFO,
-				&status);
-		adreno_write_gmureg(ADRENO_DEVICE(device),
-				ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
-				status);
+	return IRQ_HANDLED;
+}
 
-		if (status & HFI_IRQ_MASK) {
-			if (status & HFI_IRQ_MSGQ_MASK)
-				tasklet_hi_schedule(&hfi->tasklet);
-		} else
-			dev_err_ratelimited(&gmu->pdev->dev,
-					"Unhandled GMU interrupts %x\n",
-					status);
-	}
+static irqreturn_t hfi_irq_handler(int irq, void *data)
+{
+	struct kgsl_hfi *hfi = data;
+	struct gmu_device *gmu = container_of(hfi, struct gmu_device, hfi);
+	struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
+	unsigned int status = 0;
+
+	adreno_read_gmureg(ADRENO_DEVICE(device),
+			ADRENO_REG_GMU_GMU2HOST_INTR_INFO, &status);
+	adreno_write_gmureg(ADRENO_DEVICE(device),
+			ADRENO_REG_GMU_GMU2HOST_INTR_CLR, status);
+
+	if (status & HFI_IRQ_MSGQ_MASK)
+		tasklet_hi_schedule(&hfi->tasklet);
+	if (status & HFI_IRQ_CM3_FAULT_MASK)
+		dev_err_ratelimited(&gmu->pdev->dev,
+				"GMU CM3 fault interrupt received\n");
+	if (status & ~HFI_IRQ_MASK)
+		dev_err_ratelimited(&gmu->pdev->dev,
+				"Unhandled HFI interrupts 0x%lx\n",
+				status & ~HFI_IRQ_MASK);
 
 	return IRQ_HANDLED;
 }
@@ -978,6 +983,82 @@
 	return 0;
 }
 
+static int gmu_irq_probe(struct gmu_device *gmu)
+{
+	int ret;
+	struct kgsl_hfi *hfi = &gmu->hfi;
+
+	hfi->hfi_interrupt_num = platform_get_irq_byname(gmu->pdev,
+			"kgsl_hfi_irq");
+	ret = devm_request_irq(&gmu->pdev->dev,
+			hfi->hfi_interrupt_num,
+			hfi_irq_handler, IRQF_TRIGGER_HIGH,
+			"HFI", hfi);
+	if (ret) {
+		dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
+				hfi->hfi_interrupt_num, ret);
+		return ret;
+	}
+
+	gmu->gmu_interrupt_num = platform_get_irq_byname(gmu->pdev,
+			"kgsl_gmu_irq");
+	ret = devm_request_irq(&gmu->pdev->dev,
+			gmu->gmu_interrupt_num,
+			gmu_irq_handler, IRQF_TRIGGER_HIGH,
+			"GMU", gmu);
+	if (ret)
+		dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
+				gmu->gmu_interrupt_num, ret);
+
+	return ret;
+}
+
+static void gmu_irq_enable(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct gmu_device *gmu = &device->gmu;
+	struct kgsl_hfi *hfi = &gmu->hfi;
+
+	/* Clear any pending IRQs before unmasking on GMU */
+	adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
+			0xFFFFFFFF);
+	adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
+			0xFFFFFFFF);
+
+	/* Unmask needed IRQs on GMU */
+	adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
+			(unsigned int) ~HFI_IRQ_MASK);
+	adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
+			(unsigned int) ~GMU_AO_INT_MASK);
+
+	/* Enable all IRQs on host */
+	enable_irq(hfi->hfi_interrupt_num);
+	enable_irq(gmu->gmu_interrupt_num);
+}
+
+static void gmu_irq_disable(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct gmu_device *gmu = &device->gmu;
+	struct kgsl_hfi *hfi = &gmu->hfi;
+
+	/* Disable all IRQs on host */
+	disable_irq(gmu->gmu_interrupt_num);
+	disable_irq(hfi->hfi_interrupt_num);
+
+	/* Mask all IRQs on GMU */
+	adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK,
+			0xFFFFFFFF);
+	adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_MASK,
+			0xFFFFFFFF);
+
+	/* Clear any pending IRQs before disabling */
+	adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
+			0xFFFFFFFF);
+	adreno_write_gmureg(adreno_dev, ADRENO_REG_GMU_GMU2HOST_INTR_CLR,
+			0xFFFFFFFF);
+}
+
 /* Do not access any GMU registers in GMU probe function */
 int gmu_probe(struct kgsl_device *device)
 {
@@ -1024,32 +1105,13 @@
 
 	gmu->gmu2gpu_offset = (gmu->reg_phys - device->reg_phys) >> 2;
 
-	/* Initialize HFI GMU interrupts */
-	hfi->hfi_interrupt_num = platform_get_irq_byname(gmu->pdev,
-						"kgsl_hfi_irq");
-	ret = devm_request_irq(&gmu->pdev->dev,
-				  hfi->hfi_interrupt_num,
-				  gmu_irq_handler, IRQF_TRIGGER_HIGH,
-				  "GMU", gmu);
-	if (ret) {
-		dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
-			      hfi->hfi_interrupt_num, ret);
+	/* Initialize HFI and GMU interrupts */
+	ret = gmu_irq_probe(gmu);
+	if (ret)
 		goto error;
-	}
-
-	gmu->gmu_interrupt_num = platform_get_irq_byname(gmu->pdev,
-						"kgsl_gmu_irq");
-	ret = devm_request_irq(&gmu->pdev->dev,
-				  gmu->gmu_interrupt_num,
-				  gmu_irq_handler, IRQF_TRIGGER_HIGH,
-				  "GMU", gmu);
-	if (ret) {
-		dev_err(&gmu->pdev->dev, "request_irq(%d) failed: %d\n",
-			      gmu->gmu_interrupt_num, ret);
-		goto error;
-	}
 
 	/* Don't enable GMU interrupts until GMU started */
+	/* We cannot use gmu_irq_disable because it writes registers */
 	disable_irq(gmu->gmu_interrupt_num);
 	disable_irq(hfi->hfi_interrupt_num);
 
@@ -1199,7 +1261,6 @@
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
 	struct gmu_device *gmu = &device->gmu;
-	struct kgsl_hfi *hfi = &gmu->hfi;
 	int bus_level = pwr->pwrlevels[pwr->default_pwrlevel].bus_freq;
 
 	if (!kgsl_gmu_isenabled(device))
@@ -1234,8 +1295,7 @@
 		if (ret)
 			goto error_bus;
 
-		enable_irq(hfi->hfi_interrupt_num);
-		enable_irq(gmu->gmu_interrupt_num);
+		gmu_irq_enable(device);
 
 		ret = hfi_start(gmu, GMU_COLD_BOOT);
 		if (ret)
@@ -1253,8 +1313,7 @@
 		if (ret)
 			goto error_clks;
 
-		enable_irq(hfi->hfi_interrupt_num);
-		enable_irq(gmu->gmu_interrupt_num);
+		gmu_irq_enable(device);
 
 		ret = hfi_start(gmu, GMU_WARM_BOOT);
 		if (ret)
@@ -1292,8 +1351,7 @@
 
 error_gpu:
 	hfi_stop(gmu);
-	disable_irq(gmu->gmu_interrupt_num);
-	disable_irq(hfi->hfi_interrupt_num);
+	gmu_irq_disable(device);
 	if (device->state == KGSL_STATE_INIT ||
 			device->state == KGSL_STATE_SUSPEND) {
 		if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
@@ -1315,7 +1373,6 @@
 void gmu_stop(struct kgsl_device *device)
 {
 	struct gmu_device *gmu = &device->gmu;
-	struct kgsl_hfi *hfi = &gmu->hfi;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 
@@ -1333,9 +1390,7 @@
 	/* Pending message in all queues are abandoned */
 	hfi_stop(gmu);
 	clear_bit(GMU_HFI_ON, &gmu->flags);
-
-	disable_irq(gmu->gmu_interrupt_num);
-	disable_irq(hfi->hfi_interrupt_num);
+	gmu_irq_disable(device);
 
 	gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0);
 	gmu_disable_clks(gmu);
@@ -1358,6 +1413,7 @@
 	tasklet_kill(&hfi->tasklet);
 
 	gmu_stop(device);
+	gmu_irq_disable(device);
 
 	while ((i < MAX_GMU_CLKS) && gmu->clks[i]) {
 		gmu->clks[i] = NULL;
@@ -1365,14 +1421,12 @@
 	}
 
 	if (gmu->gmu_interrupt_num) {
-		disable_irq(gmu->gmu_interrupt_num);
 		devm_free_irq(&gmu->pdev->dev,
 				gmu->gmu_interrupt_num, gmu);
 		gmu->gmu_interrupt_num = 0;
 	}
 
 	if (hfi->hfi_interrupt_num) {
-		disable_irq(hfi->hfi_interrupt_num);
 		devm_free_irq(&gmu->pdev->dev,
 				hfi->hfi_interrupt_num, gmu);
 		hfi->hfi_interrupt_num = 0;
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index ac2c151..a2ca67c 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -26,7 +26,6 @@
 #define GMU_INT_HOST_AHB_BUS_ERR	BIT(5)
 #define GMU_AO_INT_MASK		\
 		(GMU_INT_WDOG_BITE |	\
-		GMU_INT_DBD_WAKEUP |	\
 		GMU_INT_HOST_AHB_BUS_ERR)
 
 #define MAX_GMUFW_SIZE	0x2000	/* in dwords */
diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h
index 39b513e..83abec4 100644
--- a/drivers/gpu/msm/kgsl_hfi.h
+++ b/drivers/gpu/msm/kgsl_hfi.h
@@ -38,16 +38,15 @@
 #define GMU_QUEUE_START_ADDR(hfi_mem, i) \
 	((hfi_mem)->gmuaddr + HFI_QUEUE_OFFSET(i))
 
-#define HFI_IRQ_MSGQ_MASK		0x1
-#define HFI_IRQ_DBGQ_MASK		0x2
-#define HFI_IRQ_BLOCKED_MSG_MASK	0x4
-#define HFI_IRQ_GMU_ERR_MASK		0xFF0000
-#define HFI_IRQ_OOB_MASK		0xFF000000
-#define HFI_IRQ_MASK  (HFI_IRQ_MSGQ_MASK |\
-			HFI_IRQ_DBGQ_MASK |\
-			HFI_IRQ_BLOCKED_MSG_MASK |\
-			HFI_IRQ_GMU_ERR_MASK |\
-			HFI_IRQ_OOB_MASK)
+#define HFI_IRQ_MSGQ_MASK		BIT(0)
+#define HFI_IRQ_DBGQ_MASK		BIT(1)
+#define HFI_IRQ_BLOCKED_MSG_MASK	BIT(2)
+#define HFI_IRQ_CM3_FAULT_MASK		BIT(23)
+#define HFI_IRQ_GMU_ERR_MASK		GENMASK(22, 16)
+#define HFI_IRQ_OOB_MASK		GENMASK(31, 24)
+#define HFI_IRQ_MASK			(HFI_IRQ_MSGQ_MASK |\
+					HFI_IRQ_CM3_FAULT_MASK)
+
 /**
  * struct hfi_queue_table_header - HFI queue table structure
  * @version: HFI protocol version
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index 673689c..69511cd 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -159,11 +159,6 @@
 	TPDM_SUPPORT_TYPE_NO,
 };
 
-enum tpdm_cmb_mode {
-	TPDM_CMB_MODE_CONTINUOUS,
-	TPDM_CMB_MODE_TRACE_ON_CHANGE,
-};
-
 enum tpdm_cmb_patt_bits {
 	TPDM_CMB_LSB,
 	TPDM_CMB_MSB,
@@ -234,7 +229,8 @@
 };
 
 struct cmb_dataset {
-	enum tpdm_cmb_mode	mode;
+	bool			trace_mode;
+	uint32_t		cycle_acc;
 	uint32_t		patt_val[TPDM_CMB_PATT_CMP];
 	uint32_t		patt_mask[TPDM_CMB_PATT_CMP];
 	bool			patt_ts;
@@ -528,24 +524,18 @@
 static void __tpdm_enable_cmb(struct tpdm_drvdata *drvdata)
 {
 	uint32_t val;
+	int i;
 
-	tpdm_writel(drvdata, drvdata->cmb->patt_val[TPDM_CMB_LSB],
-		    TPDM_CMB_TPR(TPDM_CMB_LSB));
-	tpdm_writel(drvdata, drvdata->cmb->patt_mask[TPDM_CMB_LSB],
-		    TPDM_CMB_TPMR(TPDM_CMB_LSB));
-	tpdm_writel(drvdata, drvdata->cmb->patt_val[TPDM_CMB_MSB],
-		    TPDM_CMB_TPR(TPDM_CMB_MSB));
-	tpdm_writel(drvdata, drvdata->cmb->patt_mask[TPDM_CMB_MSB],
-		    TPDM_CMB_TPMR(TPDM_CMB_MSB));
-
-	tpdm_writel(drvdata, drvdata->cmb->trig_patt_val[TPDM_CMB_LSB],
-		    TPDM_CMB_XPR(TPDM_CMB_LSB));
-	tpdm_writel(drvdata, drvdata->cmb->trig_patt_mask[TPDM_CMB_LSB],
-		    TPDM_CMB_XPMR(TPDM_CMB_LSB));
-	tpdm_writel(drvdata, drvdata->cmb->trig_patt_val[TPDM_CMB_MSB],
-		    TPDM_CMB_XPR(TPDM_CMB_MSB));
-	tpdm_writel(drvdata, drvdata->cmb->trig_patt_mask[TPDM_CMB_MSB],
-		    TPDM_CMB_XPMR(TPDM_CMB_MSB));
+	for (i = 0; i < TPDM_CMB_PATT_CMP; i++) {
+		tpdm_writel(drvdata, drvdata->cmb->patt_val[i],
+			    TPDM_CMB_TPR(i));
+		tpdm_writel(drvdata, drvdata->cmb->patt_mask[i],
+			    TPDM_CMB_TPMR(i));
+		tpdm_writel(drvdata, drvdata->cmb->trig_patt_val[i],
+			    TPDM_CMB_XPR(i));
+		tpdm_writel(drvdata, drvdata->cmb->trig_patt_mask[i],
+			    TPDM_CMB_XPMR(i));
+	}
 
 	val = tpdm_readl(drvdata, TPDM_CMB_TIER);
 	if (drvdata->cmb->patt_ts == true)
@@ -563,10 +553,13 @@
 	val = tpdm_readl(drvdata, TPDM_CMB_CR);
 	/* Set the flow control bit */
 	val = val & ~BIT(2);
-	if (drvdata->cmb->mode == TPDM_CMB_MODE_CONTINUOUS)
-		val = val & ~BIT(1);
-	else
+	if (drvdata->cmb->trace_mode)
 		val = val | BIT(1);
+	else
+		val = val & ~BIT(1);
+
+	val = val & ~BM(8, 9);
+	val = val | BMVAL(drvdata->cmb->cycle_acc, 0, 1) << 8;
 	tpdm_writel(drvdata, val, TPDM_CMB_CR);
 	/* Set the enable bit */
 	val = val | BIT(0);
@@ -577,24 +570,18 @@
 {
 	uint32_t val;
 	struct mcmb_dataset *mcmb = drvdata->cmb->mcmb;
+	int i;
 
-	tpdm_writel(drvdata, drvdata->cmb->patt_val[TPDM_CMB_LSB],
-		    TPDM_CMB_TPR(TPDM_CMB_LSB));
-	tpdm_writel(drvdata, drvdata->cmb->patt_mask[TPDM_CMB_LSB],
-		    TPDM_CMB_TPMR(TPDM_CMB_LSB));
-	tpdm_writel(drvdata, drvdata->cmb->patt_val[TPDM_CMB_MSB],
-		    TPDM_CMB_TPR(TPDM_CMB_MSB));
-	tpdm_writel(drvdata, drvdata->cmb->patt_mask[TPDM_CMB_MSB],
-		    TPDM_CMB_TPMR(TPDM_CMB_MSB));
-
-	tpdm_writel(drvdata, drvdata->cmb->trig_patt_val[TPDM_CMB_LSB],
-		    TPDM_CMB_XPR(TPDM_CMB_LSB));
-	tpdm_writel(drvdata, drvdata->cmb->trig_patt_mask[TPDM_CMB_LSB],
-		    TPDM_CMB_XPMR(TPDM_CMB_LSB));
-	tpdm_writel(drvdata, drvdata->cmb->trig_patt_val[TPDM_CMB_MSB],
-		    TPDM_CMB_XPR(TPDM_CMB_MSB));
-	tpdm_writel(drvdata, drvdata->cmb->trig_patt_mask[TPDM_CMB_MSB],
-		    TPDM_CMB_XPMR(TPDM_CMB_MSB));
+	for (i = 0; i < TPDM_CMB_PATT_CMP; i++) {
+		tpdm_writel(drvdata, drvdata->cmb->patt_val[i],
+			    TPDM_CMB_TPR(i));
+		tpdm_writel(drvdata, drvdata->cmb->patt_mask[i],
+			    TPDM_CMB_TPMR(i));
+		tpdm_writel(drvdata, drvdata->cmb->trig_patt_val[i],
+			    TPDM_CMB_XPR(i));
+		tpdm_writel(drvdata, drvdata->cmb->trig_patt_mask[i],
+			    TPDM_CMB_XPMR(i));
+	}
 
 	val = tpdm_readl(drvdata, TPDM_CMB_TIER);
 	if (drvdata->cmb->patt_ts == true)
@@ -612,14 +599,17 @@
 	val = tpdm_readl(drvdata, TPDM_CMB_CR);
 	/* Set the flow control bit */
 	val = val & ~BIT(2);
-	if (drvdata->cmb->mode == TPDM_CMB_MODE_CONTINUOUS)
-		val = val & ~BIT(1);
-	else
+	if (drvdata->cmb->trace_mode)
 		val = val | BIT(1);
+	else
+		val = val & ~BIT(1);
 
-	val = val | (BMVAL(mcmb->mcmb_trig_lane, 0, 3) << 18);
-
-	val = val | (mcmb->mcmb_lane_select << 10);
+	val = val & ~BM(8, 9);
+	val = val | BMVAL(drvdata->cmb->cycle_acc, 0, 1) << 8;
+	val = val & ~BM(18, 20);
+	val = val | (BMVAL(mcmb->mcmb_trig_lane, 0, 2) << 18);
+	val = val & ~BM(10, 17);
+	val = val | (BMVAL(mcmb->mcmb_lane_select, 0, 7) << 10);
 
 	tpdm_writel(drvdata, val, TPDM_CMB_CR);
 	/* Set the enable bit */
@@ -722,7 +712,8 @@
 	if (test_bit(TPDM_DS_DSB, drvdata->enable_ds))
 		__tpdm_disable_dsb(drvdata);
 
-	if (test_bit(TPDM_DS_CMB, drvdata->enable_ds))
+	if (test_bit(TPDM_DS_CMB, drvdata->enable_ds) ||
+		test_bit(TPDM_DS_MCMB, drvdata->enable_ds))
 		__tpdm_disable_cmb(drvdata);
 
 	if (drvdata->clk_enable)
@@ -3192,9 +3183,10 @@
 	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
 		return -EPERM;
 
-	return scnprintf(buf, PAGE_SIZE, "%s\n",
-			 drvdata->cmb->mode == TPDM_CMB_MODE_CONTINUOUS ?
-			 "continuous" : "trace_on_change");
+	return scnprintf(buf, PAGE_SIZE, "trace_mode: %s cycle_acc: %d\n",
+			 drvdata->cmb->trace_mode ?
+			 "trace_on_change" : "continuous",
+			 drvdata->cmb->cycle_acc);
 }
 
 static ssize_t tpdm_store_cmb_mode(struct device *dev,
@@ -3203,180 +3195,118 @@
 				   size_t size)
 {
 	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-	char str[20] = "";
+	unsigned int trace_mode, cycle_acc;
+	int nval;
 
-	if (strlen(buf) >= 20)
+	nval = sscanf(buf, "%u %u", &trace_mode, &cycle_acc);
+	if (nval != 2)
 		return -EINVAL;
-	if (sscanf(buf, "%s", str) != 1)
-		return -EINVAL;
+
 	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
 	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
 		return -EPERM;
 
 	mutex_lock(&drvdata->lock);
-	if (!strcmp(str, "continuous")) {
-		drvdata->cmb->mode = TPDM_CMB_MODE_CONTINUOUS;
-	} else if (!strcmp(str, "trace_on_change")) {
-		drvdata->cmb->mode = TPDM_CMB_MODE_TRACE_ON_CHANGE;
-	} else {
-		mutex_unlock(&drvdata->lock);
-		return -EINVAL;
-	}
+	drvdata->cmb->trace_mode = trace_mode;
+	drvdata->cmb->cycle_acc = cycle_acc;
 	mutex_unlock(&drvdata->lock);
 	return size;
 }
 static DEVICE_ATTR(cmb_mode, 0644,
 		   tpdm_show_cmb_mode, tpdm_store_cmb_mode);
 
-static ssize_t tpdm_show_cmb_patt_val_lsb(struct device *dev,
+static ssize_t tpdm_show_cmb_patt_val(struct device *dev,
 					  struct device_attribute *attr,
 					  char *buf)
 {
 	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-	unsigned long val;
+	ssize_t size = 0;
+	int i;
 
 	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
 	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
 		return -EPERM;
 
-	val = drvdata->cmb->patt_val[TPDM_CMB_LSB];
-
-	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+	mutex_lock(&drvdata->lock);
+	for (i = 0; i < TPDM_CMB_PATT_CMP; i++) {
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index: 0x%x Value: 0x%x\n", i,
+				  drvdata->cmb->patt_val[i]);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
 }
 
-static ssize_t tpdm_store_cmb_patt_val_lsb(struct device *dev,
+static ssize_t tpdm_store_cmb_patt_val(struct device *dev,
 					   struct device_attribute *attr,
 					   const char *buf, size_t size)
 {
 	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-	unsigned long val;
+	unsigned long index, val;
 
-	if (kstrtoul(buf, 16, &val))
+	if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+		return -EINVAL;
+	if (index >= TPDM_CMB_PATT_CMP)
 		return -EINVAL;
 	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
 	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
 		return -EPERM;
 
 	mutex_lock(&drvdata->lock);
-	drvdata->cmb->patt_val[TPDM_CMB_LSB] = val;
+	drvdata->cmb->patt_val[index] = val;
 	mutex_unlock(&drvdata->lock);
+
 	return size;
 }
-static DEVICE_ATTR(cmb_patt_val_lsb, 0644,
-		   tpdm_show_cmb_patt_val_lsb,
-		   tpdm_store_cmb_patt_val_lsb);
+static DEVICE_ATTR(cmb_patt_val, 0644,
+		   tpdm_show_cmb_patt_val,
+		   tpdm_store_cmb_patt_val);
 
-static ssize_t tpdm_show_cmb_patt_mask_lsb(struct device *dev,
+static ssize_t tpdm_show_cmb_patt_mask(struct device *dev,
 					   struct device_attribute *attr,
 					   char *buf)
 {
 	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-	unsigned long val;
+	ssize_t size = 0;
+	int i;
 
 	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
 	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
 		return -EPERM;
 
-	val = drvdata->cmb->patt_mask[TPDM_CMB_LSB];
+	mutex_lock(&drvdata->lock);
+	for (i = 0; i < TPDM_CMB_PATT_CMP; i++) {
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index: 0x%x Value: 0x%x\n", i,
+				  drvdata->cmb->patt_mask[i]);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
 
-	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 }
 
-static ssize_t tpdm_store_cmb_patt_mask_lsb(struct device *dev,
+static ssize_t tpdm_store_cmb_patt_mask(struct device *dev,
 					    struct device_attribute *attr,
 					    const char *buf, size_t size)
 {
 	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-	unsigned long val;
+	unsigned long index, val;
 
-	if (kstrtoul(buf, 16, &val))
+	if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+		return -EINVAL;
+	if (index >= TPDM_CMB_PATT_CMP)
 		return -EINVAL;
 	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
 	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
 		return -EPERM;
 
 	mutex_lock(&drvdata->lock);
-	drvdata->cmb->patt_mask[TPDM_CMB_LSB] = val;
+	drvdata->cmb->patt_mask[index] = val;
 	mutex_unlock(&drvdata->lock);
 	return size;
 }
-static DEVICE_ATTR(cmb_patt_mask_lsb, 0644,
-		   tpdm_show_cmb_patt_mask_lsb, tpdm_store_cmb_patt_mask_lsb);
-
-static ssize_t tpdm_show_cmb_patt_val_msb(struct device *dev,
-					  struct device_attribute *attr,
-					  char *buf)
-{
-	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-	unsigned long val;
-
-	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
-	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
-		return -EPERM;
-
-	val = drvdata->cmb->patt_val[TPDM_CMB_MSB];
-
-	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t tpdm_store_cmb_patt_val_msb(struct device *dev,
-					   struct device_attribute *attr,
-					   const char *buf, size_t size)
-{
-	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-	unsigned long val;
-
-	if (kstrtoul(buf, 16, &val))
-		return -EINVAL;
-	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
-	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
-		return -EPERM;
-
-	mutex_lock(&drvdata->lock);
-	drvdata->cmb->patt_val[TPDM_CMB_MSB] = val;
-	mutex_unlock(&drvdata->lock);
-	return size;
-}
-static DEVICE_ATTR(cmb_patt_val_msb, 0644,
-		   tpdm_show_cmb_patt_val_msb,
-		   tpdm_store_cmb_patt_val_msb);
-
-static ssize_t tpdm_show_cmb_patt_mask_msb(struct device *dev,
-					   struct device_attribute *attr,
-					   char *buf)
-{
-	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-	unsigned long val;
-
-	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
-	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
-		return -EPERM;
-
-	val = drvdata->cmb->patt_mask[TPDM_CMB_MSB];
-
-	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t tpdm_store_cmb_patt_mask_msb(struct device *dev,
-					    struct device_attribute *attr,
-					    const char *buf, size_t size)
-{
-	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-	unsigned long val;
-
-	if (kstrtoul(buf, 16, &val))
-		return -EINVAL;
-	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
-	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
-		return -EPERM;
-
-	mutex_lock(&drvdata->lock);
-	drvdata->cmb->patt_mask[TPDM_CMB_MSB] = val;
-	mutex_unlock(&drvdata->lock);
-	return size;
-}
-static DEVICE_ATTR(cmb_patt_mask_msb, 0644,
-		   tpdm_show_cmb_patt_mask_msb, tpdm_store_cmb_patt_mask_msb);
+static DEVICE_ATTR(cmb_patt_mask, 0644,
+		   tpdm_show_cmb_patt_mask, tpdm_store_cmb_patt_mask);
 
 static ssize_t tpdm_show_cmb_patt_ts(struct device *dev,
 				     struct device_attribute *attr,
@@ -3896,10 +3826,8 @@
 static struct attribute *tpdm_cmb_attrs[] = {
 	&dev_attr_cmb_available_modes.attr,
 	&dev_attr_cmb_mode.attr,
-	&dev_attr_cmb_patt_val_lsb.attr,
-	&dev_attr_cmb_patt_mask_lsb.attr,
-	&dev_attr_cmb_patt_val_msb.attr,
-	&dev_attr_cmb_patt_mask_msb.attr,
+	&dev_attr_cmb_patt_val.attr,
+	&dev_attr_cmb_patt_mask.attr,
 	&dev_attr_cmb_patt_ts.attr,
 	&dev_attr_cmb_trig_patt_val_lsb.attr,
 	&dev_attr_cmb_trig_patt_mask_lsb.attr,
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index bbe1524..f397a5b 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -201,6 +201,7 @@
 	{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
 	{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
 	{ 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+	{ 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
 	{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
 	{ 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
 	{ 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
@@ -329,6 +330,7 @@
 	XPAD_XBOX360_VENDOR(0x24c6),		/* PowerA Controllers */
 	XPAD_XBOXONE_VENDOR(0x24c6),		/* PowerA Controllers */
 	XPAD_XBOX360_VENDOR(0x1532),		/* Razer Sabertooth */
+	XPAD_XBOXONE_VENDOR(0x1532),		/* Razer Wildcat */
 	XPAD_XBOX360_VENDOR(0x15e4),		/* Numark X-Box 360 controllers */
 	XPAD_XBOX360_VENDOR(0x162e),		/* Joytech X-Box 360 controllers */
 	{ }
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 15af9a9..2d203b4 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -230,6 +230,8 @@
 		return -ENOMEM;
 	}
 
+	raw_spin_lock_init(&cd->rlock);
+
 	cd->gpc_base = of_iomap(node, 0);
 	if (!cd->gpc_base) {
 		pr_err("fsl-gpcv2: unable to map gpc registers\n");
diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c
index 0a802fd..a9ddf0f 100644
--- a/drivers/mailbox/qti-tcs.c
+++ b/drivers/mailbox/qti-tcs.c
@@ -1115,8 +1115,7 @@
 	if (irq < 0)
 		return irq;
 
-	ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
-			tcs_irq_handler,
+	ret = devm_request_irq(&pdev->dev, irq, tcs_irq_handler,
 			IRQF_ONESHOT | IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
 			"tcs_irq", drv);
 	if (ret)
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 709f1d8..0fa3262 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1799,6 +1799,34 @@
 		pkt->size += sizeof(u32) + sizeof(struct hfi_frame_size);
 		break;
 	}
+	case HAL_PARAM_VIDEO_CORES_USAGE:
+	{
+		struct hal_videocores_usage_info *hal = pdata;
+		struct hfi_videocores_usage_type *core_info =
+			(struct hfi_videocores_usage_type *)
+			&pkt->rg_property_data[1];
+
+		core_info->video_core_enable_mask = hal->video_core_enable_mask;
+
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
+		pkt->size += sizeof(u32) + sizeof(*core_info);
+		break;
+	}
+	case HAL_PARAM_VIDEO_WORK_MODE:
+	{
+		struct hal_video_work_mode *hal = pdata;
+		struct hfi_video_work_mode *work_mode =
+			(struct hfi_video_work_mode *)
+			&pkt->rg_property_data[1];
+
+		work_mode->video_work_mode = hal->video_work_mode;
+
+		pkt->rg_property_data[0] =
+			HFI_PROPERTY_PARAM_WORK_MODE;
+		pkt->size += sizeof(u32) + sizeof(*work_mode);
+		break;
+	}
 	/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
 	case HAL_CONFIG_BUFFER_REQUIREMENTS:
 	case HAL_CONFIG_PRIORITY:
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 32611e47..c102687 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1095,49 +1095,6 @@
 	return rc;
 }
 
-static inline int msm_venc_power_save_mode_enable(struct msm_vidc_inst *inst)
-{
-	u32 rc = 0;
-	u32 prop_id = 0, power_save_min = 0, power_save_max = 0, inst_load = 0;
-	void *pdata = NULL;
-	struct hfi_device *hdev = NULL;
-	enum hal_perf_mode venc_mode;
-	enum load_calc_quirks quirks = LOAD_CALC_IGNORE_TURBO_LOAD |
-		LOAD_CALC_IGNORE_THUMBNAIL_LOAD;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-
-	inst_load = msm_comm_get_inst_load(inst, quirks);
-	power_save_min = inst->capability.mbs_per_sec_power_save.min;
-	power_save_max = inst->capability.mbs_per_sec_power_save.max;
-
-	if (!power_save_min || !power_save_max)
-		return rc;
-
-	hdev = inst->core->device;
-	if (inst_load >= power_save_min && inst_load <= power_save_max) {
-		prop_id = HAL_CONFIG_VENC_PERF_MODE;
-		venc_mode = HAL_PERF_MODE_POWER_SAVE;
-		pdata = &venc_mode;
-		rc = call_hfi_op(hdev, session_set_property,
-				(void *)inst->session, prop_id, pdata);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"%s: Failed to set power save mode for inst: %pK\n",
-				__func__, inst);
-			goto fail_power_mode_set;
-		}
-		inst->flags |= VIDC_LOW_POWER;
-		dprintk(VIDC_INFO, "Power Save Mode set for inst: %pK\n", inst);
-	}
-
-fail_power_mode_set:
-	return rc;
-}
-
 static struct v4l2_ctrl *get_ctrl_from_cluster(int id,
 		struct v4l2_ctrl **cluster, int ncontrols)
 {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index d96963c..e071037 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -1290,14 +1290,6 @@
 	return rc;
 }
 
-static inline int msm_vidc_decide_core_and_power_mode(
-	struct msm_vidc_inst *inst)
-{
-	dprintk(VIDC_DBG,
-		"Core selection is not yet implemented for inst = %pK\n",
-			inst);
-	return 0;
-}
 static inline int msm_vidc_verify_buffer_counts(struct msm_vidc_inst *inst)
 {
 	int rc = 0, i = 0;
@@ -1358,15 +1350,22 @@
 		goto fail_start;
 	}
 
+	/* Decide work mode for current session */
+	rc = msm_vidc_decide_work_mode(inst);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to decide work mode for session %pK\n", inst);
+		goto fail_start;
+	}
+
 	/* Assign Core and LP mode for current session */
 	rc = msm_vidc_decide_core_and_power_mode(inst);
 	if (rc) {
 		dprintk(VIDC_ERR,
-			"This session can't be submitted to HW%pK\n", inst);
+			"This session can't be submitted to HW %pK\n", inst);
 		goto fail_start;
 	}
 
-
 	if (msm_comm_get_stream_output_mode(inst) ==
 			HAL_VIDEO_DECODER_SECONDARY) {
 		b.buffer_type = HAL_BUFFER_OUTPUT2;
@@ -1381,7 +1380,7 @@
 
 	rc = msm_comm_try_get_bufreqs(inst);
 
-	/* Check if current session is under HW capability */
+	/* Verify if buffer counts are correct */
 	rc = msm_vidc_verify_buffer_counts(inst);
 	if (rc) {
 		dprintk(VIDC_ERR,
@@ -1460,7 +1459,6 @@
 	return rc;
 }
 
-
 static int msm_vidc_start_streaming(struct vb2_queue *q, unsigned int count)
 {
 	struct msm_vidc_inst *inst;
@@ -1974,6 +1972,7 @@
 	inst->state = MSM_VIDC_CORE_UNINIT_DONE;
 	inst->core = core;
 	inst->freq = 0;
+	inst->core_id = VIDC_CORE_ID_DEFAULT;
 	inst->bit_depth = MSM_VIDC_BIT_DEPTH_8;
 	inst->bitrate = 0;
 	inst->pic_struct = MSM_VIDC_PIC_STRUCT_PROGRESSIVE;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 1c78a45..d43ae5a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -589,4 +589,288 @@
 		inst->dcvs.extra_capture_buffer_count;
 }
 
+int msm_vidc_decide_work_mode(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	bool low_latency_mode;
+	struct hfi_device *hdev;
+	struct hal_video_work_mode pdata;
+	struct hal_enable latency;
+
+	hdev = inst->core->device;
+
+	low_latency_mode = msm_comm_g_ctrl_for_id(inst,
+			V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_MODE);
+	if (low_latency_mode) {
+		pdata.video_work_mode = VIDC_WORK_MODE_1;
+		goto decision_done;
+	}
+
+	if (inst->session_type == MSM_VIDC_DECODER) {
+		pdata.video_work_mode = VIDC_WORK_MODE_2;
+		switch (inst->fmts[OUTPUT_PORT].fourcc) {
+		case V4L2_PIX_FMT_MPEG2:
+			pdata.video_work_mode = VIDC_WORK_MODE_1;
+			break;
+		case V4L2_PIX_FMT_H264:
+		case V4L2_PIX_FMT_HEVC:
+			if (inst->prop.height[OUTPUT_PORT] *
+				inst->prop.width[OUTPUT_PORT] <=
+					1280 * 720)
+				pdata.video_work_mode = VIDC_WORK_MODE_1;
+			break;
+		}
+	} else if (inst->session_type == MSM_VIDC_ENCODER) {
+		u32 rc_mode = 0;
+
+		pdata.video_work_mode = VIDC_WORK_MODE_1;
+		rc_mode =  msm_comm_g_ctrl_for_id(inst,
+				V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL);
+		if (rc_mode == V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_VFR ||
+			rc_mode ==
+				V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_CFR)
+			pdata.video_work_mode = VIDC_WORK_MODE_2;
+	} else {
+		return -EINVAL;
+	}
+
+decision_done:
+
+	inst->work_mode = pdata.video_work_mode;
+	rc = call_hfi_op(hdev, session_set_property,
+			(void *)inst->session, HAL_PARAM_VIDEO_WORK_MODE,
+			(void *)&pdata);
+	if (rc)
+		dprintk(VIDC_WARN,
+				" Failed to configure Work Mode %pK\n", inst);
+
+	/* For WORK_MODE_1, set Low Latency mode by default to HW. */
+
+	if (inst->work_mode == VIDC_WORK_MODE_1) {
+		latency.enable = 1;
+		rc = call_hfi_op(hdev, session_set_property,
+			(void *)inst->session, HAL_PARAM_VENC_LOW_LATENCY,
+			(void *)&latency);
+	}
+
+	rc = msm_comm_scale_clocks_and_bus(inst);
+
+	return rc;
+}
+
+static inline int msm_vidc_power_save_mode_enable(struct msm_vidc_inst *inst,
+	bool enable)
+{
+	u32 rc = 0, mbs_per_frame;
+	u32 prop_id = 0;
+	void *pdata = NULL;
+	struct hfi_device *hdev = NULL;
+	enum hal_perf_mode venc_mode;
+
+	hdev = inst->core->device;
+	if (inst->session_type != MSM_VIDC_ENCODER) {
+		dprintk(VIDC_DBG,
+			"%s : Not an encoder session. Nothing to do\n",
+				__func__);
+		return 0;
+	}
+	mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
+	if (inst->core->resources.max_hq_mbs_per_frame > mbs_per_frame ||
+		inst->core->resources.max_hq_fps > inst->prop.fps) {
+		enable = true;
+	}
+
+	prop_id = HAL_CONFIG_VENC_PERF_MODE;
+	venc_mode = enable ? HAL_PERF_MODE_POWER_SAVE :
+		HAL_PERF_MODE_POWER_MAX_QUALITY;
+	pdata = &venc_mode;
+	rc = call_hfi_op(hdev, session_set_property,
+			(void *)inst->session, prop_id, pdata);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"%s: Failed to set power save mode for inst: %pK\n",
+			__func__, inst);
+		goto fail_power_mode_set;
+	}
+	inst->flags |= VIDC_LOW_POWER;
+	dprintk(VIDC_PROF, "Power Save Mode set for inst: %pK\n", inst);
+
+fail_power_mode_set:
+	return rc;
+}
+
+static int msm_vidc_move_core_to_power_save_mode(struct msm_vidc_core *core,
+	u32 core_id)
+{
+	struct msm_vidc_inst *inst = NULL;
+
+	if (!core) {
+		dprintk(VIDC_ERR, "Invalid args: %pK\n", core);
+		return -EINVAL;
+	}
+
+	dprintk(VIDC_PROF, "Core %d : Moving all inst to LP mode\n", core_id);
+	mutex_lock(&core->lock);
+	list_for_each_entry(inst, &core->instances, list) {
+		if (inst->core_id == core_id &&
+			inst->session_type == MSM_VIDC_ENCODER)
+			msm_vidc_power_save_mode_enable(inst, true);
+	}
+	mutex_unlock(&core->lock);
+
+	return 0;
+}
+
+static u32 get_core_load(struct msm_vidc_core *core,
+	u32 core_id, bool lp_mode)
+{
+	struct msm_vidc_inst *inst = NULL;
+	u32 current_inst_mbs_per_sec = 0, load = 0;
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(inst, &core->instances, list) {
+		u32 cycles, lp_cycles;
+
+		if (!inst->core_id && core_id)
+			continue;
+		if (inst->session_type == MSM_VIDC_DECODER) {
+			cycles = lp_cycles = inst->entry->vpp_cycles;
+		} else if (inst->session_type == MSM_VIDC_ENCODER) {
+			lp_mode |= inst->flags & VIDC_LOW_POWER;
+			cycles = lp_mode ?
+				inst->entry->low_power_cycles :
+				inst->entry->vpp_cycles;
+		} else {
+			continue;
+		}
+		if (inst->core_id == 3)
+			cycles = cycles / 2;
+
+		current_inst_mbs_per_sec = msm_comm_get_inst_load(inst,
+				LOAD_CALC_NO_QUIRKS);
+		load += current_inst_mbs_per_sec * cycles;
+	}
+	mutex_unlock(&core->lock);
+
+	return load;
+}
+
+int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst)
+{
+	int rc = 0, hier_mode = 0;
+	struct hfi_device *hdev;
+	struct msm_vidc_core *core;
+	unsigned long max_freq, lp_cycles = 0;
+	struct hal_videocores_usage_info core_info;
+	u32 core0_load = 0, core1_load = 0, core0_lp_load = 0,
+		core1_lp_load = 0;
+	u32 current_inst_load = 0, current_inst_lp_load = 0,
+		min_load = 0, min_lp_load = 0;
+	u32 min_core_id, min_lp_core_id;
+
+	core = inst->core;
+	hdev = core->device;
+	max_freq = msm_vidc_max_freq(inst);
+	inst->core_id = 0;
+
+	core0_load = get_core_load(core, VIDC_CORE_ID_1, false);
+	core1_load = get_core_load(core, VIDC_CORE_ID_2, false);
+	core0_lp_load = get_core_load(core, VIDC_CORE_ID_1, true);
+	core1_lp_load = get_core_load(core, VIDC_CORE_ID_2, true);
+
+	min_load = min(core0_load, core1_load);
+	min_core_id = core0_load < core1_load ?
+		VIDC_CORE_ID_1 : VIDC_CORE_ID_2;
+	min_lp_load = min(core0_lp_load, core1_lp_load);
+	min_lp_core_id = core0_lp_load < core1_lp_load ?
+		VIDC_CORE_ID_1 : VIDC_CORE_ID_2;
+
+	lp_cycles = inst->session_type == MSM_VIDC_ENCODER ?
+			inst->entry->low_power_cycles :
+			inst->entry->vpp_cycles;
+
+	current_inst_load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS) *
+		inst->entry->vpp_cycles;
+
+	current_inst_lp_load = msm_comm_get_inst_load(inst,
+		LOAD_CALC_NO_QUIRKS) * lp_cycles;
+
+	dprintk(VIDC_DBG, "Core 0 Load = %d Core 1 Load = %d\n",
+		 core0_load, core1_load);
+	dprintk(VIDC_DBG, "Core 0 LP Load = %d Core 1 LP Load = %d\n",
+		core0_lp_load, core1_lp_load);
+	dprintk(VIDC_DBG, "Max Load = %lu\n", max_freq);
+	dprintk(VIDC_DBG, "Current Load = %d Current LP Load = %d\n",
+		current_inst_load, current_inst_lp_load);
+
+	/* Hier mode can be normal HP or Hybrid HP. */
+
+	hier_mode = msm_comm_g_ctrl_for_id(inst,
+		V4L2_CID_MPEG_VIDC_VIDEO_HIER_P_NUM_LAYERS);
+	hier_mode |= msm_comm_g_ctrl_for_id(inst,
+		V4L2_CID_MPEG_VIDC_VIDEO_HYBRID_HIERP_MODE);
+
+	/* Try for preferred core based on settings. */
+	if (inst->session_type == MSM_VIDC_ENCODER && hier_mode) {
+		if (current_inst_load / 2 + core0_load <= max_freq &&
+			current_inst_load / 2 + core1_load <= max_freq) {
+			inst->core_id = VIDC_CORE_ID_3;
+			msm_vidc_power_save_mode_enable(inst, false);
+			goto decision_done;
+		}
+	}
+
+	if (inst->session_type == MSM_VIDC_ENCODER && hier_mode) {
+		if (current_inst_lp_load / 2 +
+				core0_lp_load <= max_freq &&
+			current_inst_lp_load / 2 +
+				core1_lp_load <= max_freq) {
+			inst->core_id = VIDC_CORE_ID_3;
+			msm_vidc_power_save_mode_enable(inst, true);
+			goto decision_done;
+		}
+	}
+
+	if (current_inst_load + min_load < max_freq) {
+		inst->core_id = min_core_id;
+		dprintk(VIDC_DBG,
+			"Selected normally : Core ID = %d\n",
+				inst->core_id);
+		msm_vidc_power_save_mode_enable(inst, false);
+	} else if (current_inst_lp_load + min_load < max_freq) {
+		/* Move current instance to LP and return */
+		inst->core_id = min_core_id;
+		dprintk(VIDC_DBG,
+			"Selected by moving current to LP : Core ID = %d\n",
+				inst->core_id);
+		msm_vidc_power_save_mode_enable(inst, true);
+
+	} else if (current_inst_lp_load + min_lp_load < max_freq) {
+		/* Move all instances to LP mode and return */
+		inst->core_id = min_lp_core_id;
+		dprintk(VIDC_DBG,
+			"Moved all inst's to LP: Core ID = %d\n",
+				inst->core_id);
+		msm_vidc_move_core_to_power_save_mode(core, min_lp_core_id);
+	} else {
+		rc = -EINVAL;
+		dprintk(VIDC_ERR,
+			"Sorry ... Core Can't support this load\n");
+		return rc;
+	}
+
+decision_done:
+	core_info.video_core_enable_mask = inst->core_id;
+
+	rc = call_hfi_op(hdev, session_set_property,
+			(void *)inst->session,
+			HAL_PARAM_VIDEO_CORES_USAGE, &core_info);
+	if (rc)
+		dprintk(VIDC_WARN,
+				" Failed to configure CORE ID %pK\n", inst);
+
+	rc = msm_comm_scale_clocks_and_bus(inst);
+
+	return rc;
+}
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
index 79fd8f6..d01f074 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
@@ -38,6 +38,8 @@
 int msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst);
 int msm_comm_init_clocks_and_bus_data(struct msm_vidc_inst *inst);
 void msm_comm_free_freq_table(struct msm_vidc_inst *inst);
+int msm_vidc_decide_work_mode(struct msm_vidc_inst *inst);
+int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst);
 void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst,
 	ion_phys_addr_t device_addr);
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 4b91193..53bc068 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -313,6 +313,8 @@
 	u32 level;
 	u32 entropy_mode;
 	struct clock_profile_entry *entry;
+	u32 core_id;
+	enum hal_work_mode work_mode;
 };
 
 extern struct msm_vidc_drv *vidc_driver;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 763c41d..0a6de41 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -940,6 +940,24 @@
 		goto err_load_max_hw_load;
 	}
 
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,max-hq-mbs-per-frame",
+			&res->max_hq_mbs_per_frame);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to determine Max HQ mbs per frame: %d\n", rc);
+		goto err_load_HQ_values;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,max-hq-frames-per-sec",
+			&res->max_hq_fps);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to determine Max HQ fps: %d\n", rc);
+		goto err_load_HQ_values;
+	}
+
 	rc = msm_vidc_populate_legacy_context_bank(res);
 	if (rc) {
 		dprintk(VIDC_ERR,
@@ -985,6 +1003,7 @@
 	return rc;
 
 err_setup_legacy_cb:
+err_load_HQ_values:
 err_load_max_hw_load:
 	msm_vidc_free_allowed_clocks_table(res);
 err_load_allowed_clocks_table:
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 8fd43006..20b0ffc 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -165,6 +165,8 @@
 	uint32_t imem_size;
 	enum imem_type imem_type;
 	uint32_t max_load;
+	uint32_t max_hq_mbs_per_frame;
+	uint32_t max_hq_fps;
 	struct platform_device *pdev;
 	struct regulator_set regulator_set;
 	struct clock_set clock_set;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 28bb7ab..b0beeec 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -221,6 +221,8 @@
 	HAL_PARAM_VENC_H264_TRANSFORM_8x8,
 	HAL_PARAM_VENC_VIDEO_SIGNAL_INFO,
 	HAL_PARAM_VENC_IFRAMESIZE_TYPE,
+	HAL_PARAM_VIDEO_CORES_USAGE,
+	HAL_PARAM_VIDEO_WORK_MODE,
 };
 
 enum hal_domain {
@@ -819,6 +821,28 @@
 	u32 enable;
 };
 
+enum hal_core_id {
+	VIDC_CORE_ID_DEFAULT = 0,
+	VIDC_CORE_ID_1 = 1, /* 0b01 */
+	VIDC_CORE_ID_2 = 2, /* 0b10 */
+	VIDC_CORE_ID_3 = 3, /* 0b11 */
+	VIDC_CORE_ID_UNUSED = 0x10000000,
+};
+
+struct hal_videocores_usage_info {
+	u32 video_core_enable_mask;
+};
+
+enum hal_work_mode {
+	VIDC_WORK_MODE_1,
+	VIDC_WORK_MODE_2,
+	VIDC_WORK_MODE_UNUSED = 0x10000000,
+};
+
+struct hal_video_work_mode {
+	u32 video_work_mode;
+};
+
 struct hal_vpe_color_space_conversion {
 	u32 csc_matrix[HAL_MAX_MATRIX_COEFFS];
 	u32 csc_bias[HAL_MAX_BIAS_COEFFS];
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index bc7e8bd..04d7d01 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -163,6 +163,9 @@
 #define HFI_VENC_PERFMODE_MAX_QUALITY	0x1
 #define HFI_VENC_PERFMODE_POWER_SAVE	0x2
 
+#define  HFI_WORKMODE_1		(HFI_COMMON_BASE + 0x1)
+#define  HFI_WORKMODE_2		(HFI_COMMON_BASE + 0x2)
+
 struct hfi_buffer_info {
 	u32 buffer_addr;
 	u32 extra_data_addr;
@@ -215,11 +218,15 @@
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x00E)
 #define  HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED	    \
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x010)
+#define  HFI_PROPERTY_PARAM_WORK_MODE                       \
+	(HFI_PROPERTY_PARAM_COMMON_START + 0x015)
 
 #define HFI_PROPERTY_CONFIG_COMMON_START				\
 	(HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x2000)
 #define HFI_PROPERTY_CONFIG_FRAME_RATE					\
 	(HFI_PROPERTY_CONFIG_COMMON_START + 0x001)
+#define HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE				\
+	(HFI_PROPERTY_CONFIG_COMMON_START + 0x002)
 
 #define HFI_PROPERTY_PARAM_VDEC_COMMON_START				\
 	(HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x3000)
@@ -561,6 +568,14 @@
 	u32 height;
 };
 
+struct hfi_videocores_usage_type {
+	u32 video_core_enable_mask;
+};
+
+struct hfi_video_work_mode {
+	u32 video_work_mode;
+};
+
 struct hfi_video_signal_metadata {
 	u32 enable;
 	u32 video_format;
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index a8e6624..a9bb2dd 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -1013,8 +1013,8 @@
 void dvb_usbv2_disconnect(struct usb_interface *intf)
 {
 	struct dvb_usb_device *d = usb_get_intfdata(intf);
-	const char *name = d->name;
-	struct device dev = d->udev->dev;
+	const char *devname = kstrdup(dev_name(&d->udev->dev), GFP_KERNEL);
+	const char *drvname = d->name;
 
 	dev_dbg(&d->udev->dev, "%s: bInterfaceNumber=%d\n", __func__,
 			intf->cur_altsetting->desc.bInterfaceNumber);
@@ -1024,8 +1024,9 @@
 
 	dvb_usbv2_exit(d);
 
-	dev_info(&dev, "%s: '%s' successfully deinitialized and disconnected\n",
-			KBUILD_MODNAME, name);
+	pr_info("%s: '%s:%s' successfully deinitialized and disconnected\n",
+		KBUILD_MODNAME, drvname, devname);
+	kfree(devname);
 }
 EXPORT_SYMBOL(dvb_usbv2_disconnect);
 
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 2434030..9fd43a3 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -59,23 +59,24 @@
 			  u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
 {
 	struct cxusb_state *st = d->priv;
-	int ret, wo;
+	int ret;
 
 	if (1 + wlen > MAX_XFER_SIZE) {
 		warn("i2c wr: len=%d is too big!\n", wlen);
 		return -EOPNOTSUPP;
 	}
 
-	wo = (rbuf == NULL || rlen == 0); /* write-only */
+	if (rlen > MAX_XFER_SIZE) {
+		warn("i2c rd: len=%d is too big!\n", rlen);
+		return -EOPNOTSUPP;
+	}
 
 	mutex_lock(&d->data_mutex);
 	st->data[0] = cmd;
 	memcpy(&st->data[1], wbuf, wlen);
-	if (wo)
-		ret = dvb_usb_generic_write(d, st->data, 1 + wlen);
-	else
-		ret = dvb_usb_generic_rw(d, st->data, 1 + wlen,
-					 rbuf, rlen, 0);
+	ret = dvb_usb_generic_rw(d, st->data, 1 + wlen, st->data, rlen, 0);
+	if (!ret && rbuf && rlen)
+		memcpy(rbuf, st->data, rlen);
 
 	mutex_unlock(&d->data_mutex);
 	return ret;
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
index dd048a7..b8d2ac5 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
@@ -35,42 +35,51 @@
 
 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
 {
-	struct hexline hx;
-	u8 reset;
-	int ret,pos=0;
+	struct hexline *hx;
+	u8 *buf;
+	int ret, pos = 0;
+	u16 cpu_cs_register = cypress[type].cpu_cs_register;
+
+	buf = kmalloc(sizeof(*hx), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+	hx = (struct hexline *)buf;
 
 	/* stop the CPU */
-	reset = 1;
-	if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
+	buf[0] = 1;
+	if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1)
 		err("could not stop the USB controller CPU.");
 
-	while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
-		deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
-		ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
+	while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {
+		deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n", hx->addr, hx->len, hx->chk);
+		ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len);
 
-		if (ret != hx.len) {
+		if (ret != hx->len) {
 			err("error while transferring firmware "
 				"(transferred size: %d, block size: %d)",
-				ret,hx.len);
+				ret, hx->len);
 			ret = -EINVAL;
 			break;
 		}
 	}
 	if (ret < 0) {
 		err("firmware download failed at %d with %d",pos,ret);
+		kfree(buf);
 		return ret;
 	}
 
 	if (ret == 0) {
 		/* restart the CPU */
-		reset = 0;
-		if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
+		buf[0] = 0;
+		if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) {
 			err("could not restart the USB controller CPU.");
 			ret = -EINVAL;
 		}
 	} else
 		ret = -EIO;
 
+	kfree(buf);
+
 	return ret;
 }
 EXPORT_SYMBOL(usb_cypress_load_firmware);
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
index 33fc2b9..3bc7d4e 100644
--- a/drivers/misc/uid_sys_stats.c
+++ b/drivers/misc/uid_sys_stats.c
@@ -238,28 +238,28 @@
 	io_last->fsync -= task->ioac.syscfs;
 }
 
-static void update_io_stats_locked(void)
+static void update_io_stats_all_locked(void)
 {
 	struct uid_entry *uid_entry;
 	struct task_struct *task, *temp;
 	struct io_stats *io_bucket, *io_curr, *io_last;
+	struct user_namespace *user_ns = current_user_ns();
 	unsigned long bkt;
-
-	BUG_ON(!rt_mutex_is_locked(&uid_lock));
+	uid_t uid;
 
 	hash_for_each(hash_table, bkt, uid_entry, hash)
 		memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
 			sizeof(struct io_stats));
 
-	read_lock(&tasklist_lock);
+	rcu_read_lock();
 	do_each_thread(temp, task) {
-		uid_entry = find_or_register_uid(from_kuid_munged(
-			current_user_ns(), task_uid(task)));
+		uid = from_kuid_munged(user_ns, task_uid(task));
+		uid_entry = find_or_register_uid(uid);
 		if (!uid_entry)
 			continue;
 		add_uid_io_curr_stats(uid_entry, task);
 	} while_each_thread(temp, task);
-	read_unlock(&tasklist_lock);
+	rcu_read_unlock();
 
 	hash_for_each(hash_table, bkt, uid_entry, hash) {
 		io_bucket = &uid_entry->io[uid_entry->state];
@@ -282,6 +282,47 @@
 	}
 }
 
+static void update_io_stats_uid_locked(uid_t target_uid)
+{
+	struct uid_entry *uid_entry;
+	struct task_struct *task, *temp;
+	struct io_stats *io_bucket, *io_curr, *io_last;
+	struct user_namespace *user_ns = current_user_ns();
+
+	uid_entry = find_or_register_uid(target_uid);
+	if (!uid_entry)
+		return;
+
+	memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
+		sizeof(struct io_stats));
+
+	rcu_read_lock();
+	do_each_thread(temp, task) {
+		if (from_kuid_munged(user_ns, task_uid(task)) != target_uid)
+			continue;
+		add_uid_io_curr_stats(uid_entry, task);
+	} while_each_thread(temp, task);
+	rcu_read_unlock();
+
+	io_bucket = &uid_entry->io[uid_entry->state];
+	io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
+	io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
+
+	io_bucket->read_bytes +=
+		io_curr->read_bytes - io_last->read_bytes;
+	io_bucket->write_bytes +=
+		io_curr->write_bytes - io_last->write_bytes;
+	io_bucket->rchar += io_curr->rchar - io_last->rchar;
+	io_bucket->wchar += io_curr->wchar - io_last->wchar;
+	io_bucket->fsync += io_curr->fsync - io_last->fsync;
+
+	io_last->read_bytes = io_curr->read_bytes;
+	io_last->write_bytes = io_curr->write_bytes;
+	io_last->rchar = io_curr->rchar;
+	io_last->wchar = io_curr->wchar;
+	io_last->fsync = io_curr->fsync;
+}
+
 static int uid_io_show(struct seq_file *m, void *v)
 {
 	struct uid_entry *uid_entry;
@@ -289,7 +330,7 @@
 
 	rt_mutex_lock(&uid_lock);
 
-	update_io_stats_locked();
+	update_io_stats_all_locked();
 
 	hash_for_each(hash_table, bkt, uid_entry, hash) {
 		seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
@@ -364,7 +405,7 @@
 		return count;
 	}
 
-	update_io_stats_locked();
+	update_io_stats_uid_locked(uid);
 
 	uid_entry->state = state;
 
@@ -402,7 +443,7 @@
 	uid_entry->utime += utime;
 	uid_entry->stime += stime;
 
-	update_io_stats_locked();
+	update_io_stats_uid_locked(uid);
 	clean_uid_io_last_stats(uid_entry, task);
 
 exit:
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 368bb07..481895b 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -557,7 +557,7 @@
 	int work_done = 0;
 
 	u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
-	u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD);
+	u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
 	u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
 
 	/* Handle bus state changes */
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index e2512d5..eedf86b 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -528,6 +528,9 @@
 	if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
 		return 0;
 
+	if (!spec_priv->rfs_chan_spec_scan)
+		return 1;
+
 	/* Output buffers are full, no need to process anything
 	 * since there is no space to put the result anyway
 	 */
@@ -1072,7 +1075,7 @@
 
 void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
 {
-	if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS)) {
+	if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) {
 		relay_close(spec_priv->rfs_chan_spec_scan);
 		spec_priv->rfs_chan_spec_scan = NULL;
 	}
@@ -1086,6 +1089,9 @@
 					    debugfs_phy,
 					    1024, 256, &rfs_spec_scan_cb,
 					    NULL);
+	if (!spec_priv->rfs_chan_spec_scan)
+		return;
+
 	debugfs_create_file("spectral_scan_ctl",
 			    S_IRUSR | S_IWUSR,
 			    debugfs_phy, spec_priv,
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 23d4a17..351bac8 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -934,8 +934,14 @@
 	rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
 	if (rc < 0)
 		goto out_unlock;
+	nvdimm_bus_unlock(&nvdimm_bus->dev);
+
 	if (copy_to_user(p, buf, buf_len))
 		rc = -EFAULT;
+
+	vfree(buf);
+	return rc;
+
  out_unlock:
 	nvdimm_bus_unlock(&nvdimm_bus->dev);
  out:
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index d614493..dcb32f3 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -388,7 +388,7 @@
 
 int alias_dpa_busy(struct device *dev, void *data)
 {
-	resource_size_t map_end, blk_start, new, busy;
+	resource_size_t map_end, blk_start, new;
 	struct blk_alloc_info *info = data;
 	struct nd_mapping *nd_mapping;
 	struct nd_region *nd_region;
@@ -429,29 +429,19 @@
  retry:
 	/*
 	 * Find the free dpa from the end of the last pmem allocation to
-	 * the end of the interleave-set mapping that is not already
-	 * covered by a blk allocation.
+	 * the end of the interleave-set mapping.
 	 */
-	busy = 0;
 	for_each_dpa_resource(ndd, res) {
+		if (strncmp(res->name, "pmem", 4) != 0)
+			continue;
 		if ((res->start >= blk_start && res->start < map_end)
 				|| (res->end >= blk_start
 					&& res->end <= map_end)) {
-			if (strncmp(res->name, "pmem", 4) == 0) {
-				new = max(blk_start, min(map_end + 1,
-							res->end + 1));
-				if (new != blk_start) {
-					blk_start = new;
-					goto retry;
-				}
-			} else
-				busy += min(map_end, res->end)
-					- max(nd_mapping->start, res->start) + 1;
-		} else if (nd_mapping->start > res->start
-				&& map_end < res->end) {
-			/* total eclipse of the PMEM region mapping */
-			busy += nd_mapping->size;
-			break;
+			new = max(blk_start, min(map_end + 1, res->end + 1));
+			if (new != blk_start) {
+				blk_start = new;
+				goto retry;
+			}
 		}
 	}
 
@@ -463,52 +453,11 @@
 		return 1;
 	}
 
-	info->available -= blk_start - nd_mapping->start + busy;
+	info->available -= blk_start - nd_mapping->start;
 
 	return 0;
 }
 
-static int blk_dpa_busy(struct device *dev, void *data)
-{
-	struct blk_alloc_info *info = data;
-	struct nd_mapping *nd_mapping;
-	struct nd_region *nd_region;
-	resource_size_t map_end;
-	int i;
-
-	if (!is_nd_pmem(dev))
-		return 0;
-
-	nd_region = to_nd_region(dev);
-	for (i = 0; i < nd_region->ndr_mappings; i++) {
-		nd_mapping  = &nd_region->mapping[i];
-		if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
-			break;
-	}
-
-	if (i >= nd_region->ndr_mappings)
-		return 0;
-
-	map_end = nd_mapping->start + nd_mapping->size - 1;
-	if (info->res->start >= nd_mapping->start
-			&& info->res->start < map_end) {
-		if (info->res->end <= map_end) {
-			info->busy = 0;
-			return 1;
-		} else {
-			info->busy -= info->res->end - map_end;
-			return 0;
-		}
-	} else if (info->res->end >= nd_mapping->start
-			&& info->res->end <= map_end) {
-		info->busy -= nd_mapping->start - info->res->start;
-		return 0;
-	} else {
-		info->busy -= nd_mapping->size;
-		return 0;
-	}
-}
-
 /**
  * nd_blk_available_dpa - account the unused dpa of BLK region
  * @nd_mapping: container of dpa-resource-root + labels
@@ -538,11 +487,7 @@
 	for_each_dpa_resource(ndd, res) {
 		if (strncmp(res->name, "blk", 3) != 0)
 			continue;
-
-		info.res = res;
-		info.busy = resource_size(res);
-		device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
-		info.available -= info.busy;
+		info.available -= resource_size(res);
 	}
 
 	return info.available;
diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c
index b1d1dfa..f5e23c68 100644
--- a/drivers/platform/msm/gsi/gsi_dbg.c
+++ b/drivers/platform/msm/gsi/gsi_dbg.c
@@ -29,6 +29,7 @@
 
 static struct dentry *dent;
 static char dbg_buff[4096];
+static void *gsi_ipc_logbuf_low;
 
 static void gsi_wq_print_dp_stats(struct work_struct *work);
 static DECLARE_DELAYED_WORK(gsi_print_dp_stats_work, gsi_wq_print_dp_stats);
@@ -758,22 +759,20 @@
 	if (kstrtos8(dbg_buff, 0, &option))
 		return -EFAULT;
 
+	mutex_lock(&gsi_ctx->mlock);
 	if (option) {
-		if (!gsi_ctx->ipc_logbuf_low) {
-			gsi_ctx->ipc_logbuf_low =
+		if (!gsi_ipc_logbuf_low) {
+			gsi_ipc_logbuf_low =
 				ipc_log_context_create(GSI_IPC_LOG_PAGES,
 					"gsi_low", 0);
+			if (gsi_ipc_logbuf_low == NULL)
+				TERR("failed to get ipc_logbuf_low\n");
 		}
-
-		if (gsi_ctx->ipc_logbuf_low == NULL) {
-			TERR("failed to get ipc_logbuf_low\n");
-			return -EFAULT;
-		}
+		gsi_ctx->ipc_logbuf_low = gsi_ipc_logbuf_low;
 	} else {
-		if (gsi_ctx->ipc_logbuf_low)
-			ipc_log_context_destroy(gsi_ctx->ipc_logbuf_low);
 		gsi_ctx->ipc_logbuf_low = NULL;
 	}
+	mutex_unlock(&gsi_ctx->mlock);
 
 	return count;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 0af9387..4672233 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1541,6 +1541,9 @@
 			memcpy(mux_channel[rmnet_index].vchannel_name,
 				extend_ioctl_data.u.rmnet_mux_val.vchannel_name,
 				sizeof(mux_channel[rmnet_index].vchannel_name));
+			mux_channel[rmnet_index].vchannel_name[
+				IFNAMSIZ - 1] = '\0';
+
 			IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n",
 				mux_channel[rmnet_index].vchannel_name,
 				mux_channel[rmnet_index].mux_id,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index cf99e64..83fd2b2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -2397,11 +2397,11 @@
 }
 
 /**
- * _ipa_init_sram_v3_0() - Initialize IPA local SRAM.
+ * _ipa_init_sram_v3() - Initialize IPA local SRAM.
  *
  * Return codes: 0 for success, negative value for failure
  */
-int _ipa_init_sram_v3_0(void)
+int _ipa_init_sram_v3(void)
 {
 	u32 *ipa_sram_mmio;
 	unsigned long phys_addr;
@@ -2444,7 +2444,10 @@
 		IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
 	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst) - 4);
 	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst));
-	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(end_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio,
+		(ipa_get_hw_type() >= IPA_HW_v3_5) ?
+			IPA_MEM_PART(uc_event_ring_ofst) :
+			IPA_MEM_PART(end_ofst));
 
 	iounmap(ipa_sram_mmio);
 
@@ -5528,13 +5531,6 @@
 		return result;
 	}
 
-	result = of_platform_populate(pdev_p->dev.of_node,
-		pdrv_match, NULL, &pdev_p->dev);
-	if (result) {
-		IPAERR("failed to populate platform\n");
-		return result;
-	}
-
 	if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
 		if (of_property_read_bool(pdev_p->dev.of_node,
 		    "qcom,smmu-s1-bypass"))
@@ -5580,6 +5576,13 @@
 		}
 	}
 
+	result = of_platform_populate(pdev_p->dev.of_node,
+		pdrv_match, NULL, &pdev_p->dev);
+	if (result) {
+		IPAERR("failed to populate platform\n");
+		return result;
+	}
+
 	return result;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 18ee4ee..2ca33d8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1390,6 +1390,8 @@
 	u32 apps_v6_rt_hash_size;
 	u32 apps_v6_rt_nhash_ofst;
 	u32 apps_v6_rt_nhash_size;
+	u32 uc_event_ring_ofst;
+	u32 uc_event_ring_size;
 };
 
 struct ipa3_controller {
@@ -1844,7 +1846,7 @@
 int ipa3_teth_bridge_driver_init(void);
 void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data);
 
-int _ipa_init_sram_v3_0(void);
+int _ipa_init_sram_v3(void);
 int _ipa_init_hdr_v3_0(void);
 int _ipa_init_rt4_v3(void);
 int _ipa_init_rt6_v3(void);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 4208a49..5a38db3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -2909,17 +2909,54 @@
  */
 int ipa3_init_mem_partition(struct device_node *node)
 {
+	const size_t ram_mmap_v3_0_size = 70;
+	const size_t ram_mmap_v3_5_size = 72;
+	const size_t ram_mmap_current_version_size =
+		sizeof(ipa3_ctx->ctrl->mem_partition) / sizeof(u32);
+	const size_t version = ipa_get_hw_type();
 	int result;
 
 	IPADBG("Reading from DTS as u32 array\n");
-	result = of_property_read_u32_array(node,
-		"qcom,ipa-ram-mmap", (u32 *)&ipa3_ctx->ctrl->mem_partition,
-		sizeof(ipa3_ctx->ctrl->mem_partition) / sizeof(u32));
 
-	if (result) {
+	/*
+	 * The size of ipa-ram-mmap array depends on the IPA version. The
+	 * actual size can't be assumed because of possible DTS versions
+	 * mismatch. The size of the array monotonically increasing because the
+	 * obsolete entries are set to zero rather than deleted, so the
+	 * possible sizes are in range
+	 *	[ram_mmap_v3_0_size, ram_mmap_current_version_size]
+	 */
+	result = of_property_read_variable_u32_array(node, "qcom,ipa-ram-mmap",
+		(u32 *)&ipa3_ctx->ctrl->mem_partition,
+		ram_mmap_v3_0_size, ram_mmap_current_version_size);
+
+	if (result <= 0) {
 		IPAERR("Read operation failed\n");
 		return -ENODEV;
 	}
+	if (version < IPA_HW_v3_0)
+		ipa_assert();
+	if (version < IPA_HW_v3_5) {
+		if (result != ram_mmap_v3_0_size) {
+			IPAERR("Mismatch at IPA RAM MMAP DTS entry\n");
+			return -ENODEV;
+		}
+	} else {
+		if (result != ram_mmap_v3_5_size) {
+			IPAERR("Mismatch at IPA RAM MMAP DTS entry\n");
+			return -ENODEV;
+		}
+
+		if (IPA_MEM_PART(uc_event_ring_ofst) & 1023) {
+			IPAERR("UC EVENT RING OFST 0x%x is unaligned\n",
+				IPA_MEM_PART(uc_event_ring_ofst));
+			return -ENODEV;
+		}
+
+		IPADBG("UC EVENT RING OFST 0x%x SIZE 0x%x\n",
+			IPA_MEM_PART(uc_event_ring_ofst),
+			IPA_MEM_PART(uc_event_ring_size));
+	}
 
 	IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
 		IPA_MEM_PART(nat_size));
@@ -3167,7 +3204,7 @@
 	ctrl->clock_scaling_bw_threshold_turbo =
 		IPA_V3_0_BW_THRESHOLD_TURBO_MBPS;
 	ctrl->ipa_reg_base_ofst = ipahal_get_reg_base();
-	ctrl->ipa_init_sram = _ipa_init_sram_v3_0;
+	ctrl->ipa_init_sram = _ipa_init_sram_v3;
 	ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v3_0;
 
 	ctrl->ipa_init_hdr = _ipa_init_hdr_v3_0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index cb25b09..56e7718 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1655,6 +1655,9 @@
 				extend_ioctl_data.u.rmnet_mux_val.vchannel_name,
 				sizeof(mux_channel[rmnet_index]
 					.vchannel_name));
+			mux_channel[rmnet_index].vchannel_name[
+				IFNAMSIZ - 1] = '\0';
+
 			IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n",
 				mux_channel[rmnet_index].vchannel_name,
 				mux_channel[rmnet_index].mux_id,
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index a66192f..c29b9b6 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -1846,11 +1846,24 @@
 	return status;
 }
 
+#define ACER_WMID_ACCEL_HID	"BST0001"
+
 static acpi_status __init acer_wmi_get_handle_cb(acpi_handle ah, u32 level,
 						void *ctx, void **retval)
 {
+	struct acpi_device *dev;
+
+	if (!strcmp(ctx, "SENR")) {
+		if (acpi_bus_get_device(ah, &dev))
+			return AE_OK;
+		if (!strcmp(ACER_WMID_ACCEL_HID, acpi_device_hid(dev)))
+			return AE_OK;
+	} else
+		return AE_OK;
+
 	*(acpi_handle *)retval = ah;
-	return AE_OK;
+
+	return AE_CTRL_TERMINATE;
 }
 
 static int __init acer_wmi_get_handle(const char *name, const char *prop,
@@ -1877,7 +1890,7 @@
 {
 	int err;
 
-	err = acer_wmi_get_handle("SENR", "BST0001", &gsensor_handle);
+	err = acer_wmi_get_handle("SENR", ACER_WMID_ACCEL_HID, &gsensor_handle);
 	if (err)
 		return err;
 
@@ -2233,10 +2246,11 @@
 		err = acer_wmi_input_setup();
 		if (err)
 			return err;
+		err = acer_wmi_accel_setup();
+		if (err)
+			return err;
 	}
 
-	acer_wmi_accel_setup();
-
 	err = platform_driver_register(&acer_platform_driver);
 	if (err) {
 		pr_err("Unable to register platform driver\n");
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index ef89df1..744d561 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -191,6 +191,28 @@
 	return 0;
 }
 
+static int rockchip_pwm_enable(struct pwm_chip *chip,
+			 struct pwm_device *pwm,
+			 bool enable,
+			 enum pwm_polarity polarity)
+{
+	struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+	int ret;
+
+	if (enable) {
+		ret = clk_enable(pc->clk);
+		if (ret)
+			return ret;
+	}
+
+	pc->data->set_enable(chip, pwm, enable, polarity);
+
+	if (!enable)
+		clk_disable(pc->clk);
+
+	return 0;
+}
+
 static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 			      struct pwm_state *state)
 {
@@ -207,22 +229,26 @@
 		return ret;
 
 	if (state->polarity != curstate.polarity && enabled) {
-		pc->data->set_enable(chip, pwm, false, state->polarity);
+		ret = rockchip_pwm_enable(chip, pwm, false, state->polarity);
+		if (ret)
+			goto out;
 		enabled = false;
 	}
 
 	ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period);
 	if (ret) {
 		if (enabled != curstate.enabled)
-			pc->data->set_enable(chip, pwm, !enabled,
-					     state->polarity);
-
+			rockchip_pwm_enable(chip, pwm, !enabled,
+				      state->polarity);
 		goto out;
 	}
 
-	if (state->enabled != enabled)
-		pc->data->set_enable(chip, pwm, state->enabled,
-				     state->polarity);
+	if (state->enabled != enabled) {
+		ret = rockchip_pwm_enable(chip, pwm, state->enabled,
+				    state->polarity);
+		if (ret)
+			goto out;
+	}
 
 	/*
 	 * Update the state with the real hardware, which can differ a bit
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 3853ba9..19e03d0 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -18,6 +18,7 @@
  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  */
 #include <linux/kernel.h>
+#include <linux/clk.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -59,6 +60,7 @@
 	struct platform_device	*pdev;
 	struct rtc_device	*rtc_dev;
 	void __iomem		*rtc_base; /* NULL if not initialized. */
+	struct clk		*clk;
 	int			tegra_rtc_irq; /* alarm and periodic irq */
 	spinlock_t		tegra_rtc_lock;
 };
@@ -326,6 +328,14 @@
 	if (info->tegra_rtc_irq <= 0)
 		return -EBUSY;
 
+	info->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(info->clk))
+		return PTR_ERR(info->clk);
+
+	ret = clk_prepare_enable(info->clk);
+	if (ret < 0)
+		return ret;
+
 	/* set context info. */
 	info->pdev = pdev;
 	spin_lock_init(&info->tegra_rtc_lock);
@@ -346,7 +356,7 @@
 		ret = PTR_ERR(info->rtc_dev);
 		dev_err(&pdev->dev, "Unable to register device (err=%d).\n",
 			ret);
-		return ret;
+		goto disable_clk;
 	}
 
 	ret = devm_request_irq(&pdev->dev, info->tegra_rtc_irq,
@@ -356,12 +366,25 @@
 		dev_err(&pdev->dev,
 			"Unable to request interrupt for device (err=%d).\n",
 			ret);
-		return ret;
+		goto disable_clk;
 	}
 
 	dev_notice(&pdev->dev, "Tegra internal Real Time Clock\n");
 
 	return 0;
+
+disable_clk:
+	clk_disable_unprepare(info->clk);
+	return ret;
+}
+
+static int tegra_rtc_remove(struct platform_device *pdev)
+{
+	struct tegra_rtc_info *info = platform_get_drvdata(pdev);
+
+	clk_disable_unprepare(info->clk);
+
+	return 0;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -413,6 +436,7 @@
 
 MODULE_ALIAS("platform:tegra_rtc");
 static struct platform_driver tegra_rtc_driver = {
+	.remove		= tegra_rtc_remove,
 	.shutdown	= tegra_rtc_shutdown,
 	.driver		= {
 		.name	= "tegra_rtc",
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 4f361d8..734e592 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -968,8 +968,13 @@
 uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
 {
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
 
-	return ((RD_REG_DWORD(&reg->host_status)) == ISP_REG_DISCONNECT);
+	if (IS_P3P_TYPE(ha))
+		return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
+	else
+		return ((RD_REG_DWORD(&reg->host_status)) ==
+			ISP_REG_DISCONNECT);
 }
 
 /**************************************************************************
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b7889c7..c2ac982 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1982,6 +1982,22 @@
 
 #define READ_CAPACITY_RETRIES_ON_RESET	10
 
+/*
+ * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
+ * and the reported logical block size is bigger than 512 bytes. Note
+ * that last_sector is a u64 and therefore logical_to_sectors() is not
+ * applicable.
+ */
+static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
+{
+	u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
+
+	if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
+		return false;
+
+	return true;
+}
+
 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
 						unsigned char *buffer)
 {
@@ -2047,7 +2063,7 @@
 		return -ENODEV;
 	}
 
-	if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
+	if (!sd_addressable_capacity(lba, sector_size)) {
 		sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
 			"kernel compiled with support for large block "
 			"devices.\n");
@@ -2133,7 +2149,7 @@
 		return sector_size;
 	}
 
-	if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
+	if (!sd_addressable_capacity(lba, sector_size)) {
 		sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
 			"kernel compiled with support for large block "
 			"devices.\n");
@@ -2780,7 +2796,8 @@
 		q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
 		rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
 	} else
-		rw_max = BLK_DEF_MAX_SECTORS;
+		rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
+				      (sector_t)BLK_DEF_MAX_SECTORS);
 
 	/* Combine with controller limits */
 	q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index bed2bbd..e635973 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -833,6 +833,7 @@
 	unsigned char *buffer;
 	struct scsi_mode_data data;
 	struct scsi_sense_hdr sshdr;
+	unsigned int ms_len = 128;
 	int rc, n;
 
 	static const char *loadmech[] =
@@ -859,10 +860,11 @@
 	scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
 
 	/* ask for mode page 0x2a */
-	rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
+	rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
 			     SR_TIMEOUT, 3, &data, NULL);
 
-	if (!scsi_status_is_good(rc)) {
+	if (!scsi_status_is_good(rc) || data.length > ms_len ||
+	    data.header_length + data.block_descriptor_length > data.length) {
 		/* failed, drive doesn't have capabilities mode page */
 		cd->cdi.speed = 1;
 		cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index 72fc3a4..91d6349 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -311,7 +311,12 @@
 		if (list_empty(&cur_bcm_clist[i]))
 			continue;
 		list_for_each_entry(cur_bcm, &cur_bcm_clist[i], link) {
-			if (cur_bcm->updated) {
+			if (cur_bcm->updated ||
+				(cur_bcm->node_vec[DUAL_CTX].vec_a == 0 &&
+				cur_bcm->node_vec[ACTIVE_CTX].vec_a == 0 &&
+				cur_bcm->node_vec[DUAL_CTX].vec_b == 0 &&
+				cur_bcm->node_vec[ACTIVE_CTX].vec_b == 0 &&
+				init_time == true)) {
 				if (last_tcs != -1 &&
 					list_is_last(&cur_bcm->link,
 						&cur_bcm_clist[i])) {
@@ -356,18 +361,19 @@
 				if (last_tcs != -1 &&
 					list_is_last(&cur_bcm->link,
 					&cur_bcm_clist[i])) {
-					cmdlist_wake[k].data |=
+					cmdlist_wake[last_tcs].data |=
 						BCM_TCS_CMD_COMMIT_MASK;
-					cmdlist_sleep[k].data |=
+					cmdlist_sleep[last_tcs].data |=
 						BCM_TCS_CMD_COMMIT_MASK;
-					cmdlist_wake[k].complete = true;
-					cmdlist_sleep[k].complete = true;
+					cmdlist_wake[last_tcs].complete = true;
+					cmdlist_sleep[last_tcs].complete = true;
 					idx++;
 				}
 				continue;
 			}
 			last_tcs = k;
 			n_sleep[idx]++;
+			n_wake[idx]++;
 			if (list_is_last(&cur_bcm->link,
 						&cur_bcm_clist[i])) {
 				commit = true;
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index 272e70a..6ff39de 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -738,10 +738,21 @@
 	desc.args[0] = proc = d->pas_id;
 	desc.arginfo = SCM_ARGS(1);
 
+	if (d->bus_client) {
+		rc = msm_bus_scale_client_update_request(d->bus_client, 1);
+		if (rc) {
+			dev_err(pil->dev, "bandwidth request failed(rc:%d)\n",
+									rc);
+			return rc;
+		}
+	} else
+		WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
+					d->subsys_desc.name);
+
 	rc = enable_regulators(d, pil->dev, d->proxy_regs,
 					d->proxy_reg_count, true);
 	if (rc)
-		return rc;
+		goto err_regulators;
 
 	rc = prepare_enable_clocks(pil->dev, d->proxy_clks,
 						d->proxy_clk_count);
@@ -759,6 +770,11 @@
 
 	disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
 	disable_regulators(d, d->proxy_regs, d->proxy_reg_count, false);
+	if (d->bus_client)
+		msm_bus_scale_client_update_request(d->bus_client, 0);
+	else
+		WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
+					d->subsys_desc.name);
 
 	if (rc)
 		return rc;
@@ -767,8 +783,15 @@
 	disable_regulators(d, d->regs, d->reg_count, false);
 
 	return scm_ret;
+
 err_clks:
 	disable_regulators(d, d->proxy_regs, d->proxy_reg_count, false);
+err_regulators:
+	if (d->bus_client)
+		msm_bus_scale_client_update_request(d->bus_client, 0);
+	else
+		WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
+					d->subsys_desc.name);
 	return rc;
 }
 
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 0efa80b..4a073339a 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -782,22 +782,6 @@
 		if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
 			SET_PSTATE_REPLY_OPTIONAL(param);
 		/*
-		 * The GlobalSAN iSCSI Initiator for MacOSX does
-		 * not respond to MaxBurstLength, FirstBurstLength,
-		 * DefaultTime2Wait or DefaultTime2Retain parameter keys.
-		 * So, we set them to 'reply optional' here, and assume the
-		 * the defaults from iscsi_parameters.h if the initiator
-		 * is not RFC compliant and the keys are not negotiated.
-		 */
-		if (!strcmp(param->name, MAXBURSTLENGTH))
-			SET_PSTATE_REPLY_OPTIONAL(param);
-		if (!strcmp(param->name, FIRSTBURSTLENGTH))
-			SET_PSTATE_REPLY_OPTIONAL(param);
-		if (!strcmp(param->name, DEFAULTTIME2WAIT))
-			SET_PSTATE_REPLY_OPTIONAL(param);
-		if (!strcmp(param->name, DEFAULTTIME2RETAIN))
-			SET_PSTATE_REPLY_OPTIONAL(param);
-		/*
 		 * Required for gPXE iSCSI boot client
 		 */
 		if (!strcmp(param->name, MAXCONNECTIONS))
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 1f38177..da5a5fc 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -735,21 +735,23 @@
 {
 	struct se_cmd *se_cmd = NULL;
 	int rc;
+	bool op_scsi = false;
 	/*
 	 * Determine if a struct se_cmd is associated with
 	 * this struct iscsi_cmd.
 	 */
 	switch (cmd->iscsi_opcode) {
 	case ISCSI_OP_SCSI_CMD:
-		se_cmd = &cmd->se_cmd;
-		__iscsit_free_cmd(cmd, true, shutdown);
+		op_scsi = true;
 		/*
 		 * Fallthrough
 		 */
 	case ISCSI_OP_SCSI_TMFUNC:
-		rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
-		if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
-			__iscsit_free_cmd(cmd, true, shutdown);
+		se_cmd = &cmd->se_cmd;
+		__iscsit_free_cmd(cmd, op_scsi, shutdown);
+		rc = transport_generic_free_cmd(se_cmd, shutdown);
+		if (!rc && shutdown && se_cmd->se_sess) {
+			__iscsit_free_cmd(cmd, op_scsi, shutdown);
 			target_put_sess_cmd(se_cmd);
 		}
 		break;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 31a096a..6e456de 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -92,6 +92,11 @@
 		pr_err("Source se_lun->lun_se_dev does not exist\n");
 		return -EINVAL;
 	}
+	if (lun->lun_shutdown) {
+		pr_err("Unable to create mappedlun symlink because"
+			" lun->lun_shutdown=true\n");
+		return -EINVAL;
+	}
 	se_tpg = lun->lun_tpg;
 
 	nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 2744251..1949f50 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -640,6 +640,8 @@
 	 */
 	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
 
+	lun->lun_shutdown = true;
+
 	core_clear_lun_from_tpg(lun, tpg);
 	/*
 	 * Wait for any active I/O references to percpu se_lun->lun_ref to
@@ -661,6 +663,8 @@
 	}
 	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
 		hlist_del_rcu(&lun->link);
+
+	lun->lun_shutdown = false;
 	mutex_unlock(&tpg->tpg_lun_mutex);
 
 	percpu_ref_exit(&lun->lun_ref);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 70c143a..1a83456 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -306,24 +306,50 @@
 		   DATA_BLOCK_BITS);
 }
 
-static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap,
-		struct scatterlist *data_sg, unsigned int data_nents)
+static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+			     bool bidi)
 {
+	struct se_cmd *se_cmd = cmd->se_cmd;
 	int i, block;
 	int block_remaining = 0;
 	void *from, *to;
 	size_t copy_bytes, from_offset;
-	struct scatterlist *sg;
+	struct scatterlist *sg, *data_sg;
+	unsigned int data_nents;
+	DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
+
+	bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
+
+	if (!bidi) {
+		data_sg = se_cmd->t_data_sg;
+		data_nents = se_cmd->t_data_nents;
+	} else {
+		uint32_t count;
+
+		/*
+		 * For bidi case, the first count blocks are for Data-Out
+		 * buffer blocks, and before gathering the Data-In buffer
+		 * the Data-Out buffer blocks should be discarded.
+		 */
+		count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
+		while (count--) {
+			block = find_first_bit(bitmap, DATA_BLOCK_BITS);
+			clear_bit(block, bitmap);
+		}
+
+		data_sg = se_cmd->t_bidi_data_sg;
+		data_nents = se_cmd->t_bidi_data_nents;
+	}
 
 	for_each_sg(data_sg, sg, data_nents, i) {
 		int sg_remaining = sg->length;
 		to = kmap_atomic(sg_page(sg)) + sg->offset;
 		while (sg_remaining > 0) {
 			if (block_remaining == 0) {
-				block = find_first_bit(cmd_bitmap,
+				block = find_first_bit(bitmap,
 						DATA_BLOCK_BITS);
 				block_remaining = DATA_BLOCK_SIZE;
-				clear_bit(block, cmd_bitmap);
+				clear_bit(block, bitmap);
 			}
 			copy_bytes = min_t(size_t, sg_remaining,
 					block_remaining);
@@ -389,6 +415,27 @@
 	return true;
 }
 
+static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
+{
+	struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
+	size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
+
+	if (se_cmd->se_cmd_flags & SCF_BIDI) {
+		BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
+		data_length += round_up(se_cmd->t_bidi_data_sg->length,
+				DATA_BLOCK_SIZE);
+	}
+
+	return data_length;
+}
+
+static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
+{
+	size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
+
+	return data_length / DATA_BLOCK_SIZE;
+}
+
 static sense_reason_t
 tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 {
@@ -402,7 +449,7 @@
 	uint32_t cmd_head;
 	uint64_t cdb_off;
 	bool copy_to_data_area;
-	size_t data_length;
+	size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
 	DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
 
 	if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
@@ -416,8 +463,7 @@
 	 * expensive to tell how many regions are freed in the bitmap
 	*/
 	base_command_size = max(offsetof(struct tcmu_cmd_entry,
-				req.iov[se_cmd->t_bidi_data_nents +
-					se_cmd->t_data_nents]),
+				req.iov[tcmu_cmd_get_block_cnt(tcmu_cmd)]),
 				sizeof(struct tcmu_cmd_entry));
 	command_size = base_command_size
 		+ round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
@@ -428,11 +474,6 @@
 
 	mb = udev->mb_addr;
 	cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
-	data_length = se_cmd->data_length;
-	if (se_cmd->se_cmd_flags & SCF_BIDI) {
-		BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
-		data_length += se_cmd->t_bidi_data_sg->length;
-	}
 	if ((command_size > (udev->cmdr_size / 2)) ||
 	    data_length > udev->data_size) {
 		pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
@@ -502,11 +543,14 @@
 	entry->req.iov_dif_cnt = 0;
 
 	/* Handle BIDI commands */
-	iov_cnt = 0;
-	alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
-		se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
-	entry->req.iov_bidi_cnt = iov_cnt;
-
+	if (se_cmd->se_cmd_flags & SCF_BIDI) {
+		iov_cnt = 0;
+		iov++;
+		alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
+				se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
+				false);
+		entry->req.iov_bidi_cnt = iov_cnt;
+	}
 	/* cmd's data_bitmap is what changed in process */
 	bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
 			DATA_BLOCK_BITS);
@@ -582,19 +626,11 @@
 			       se_cmd->scsi_sense_length);
 		free_data_area(udev, cmd);
 	} else if (se_cmd->se_cmd_flags & SCF_BIDI) {
-		DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
-
 		/* Get Data-In buffer before clean up */
-		bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
-		gather_data_area(udev, bitmap,
-			se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
+		gather_data_area(udev, cmd, true);
 		free_data_area(udev, cmd);
 	} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
-		DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
-
-		bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
-		gather_data_area(udev, bitmap,
-			se_cmd->t_data_sg, se_cmd->t_data_nents);
+		gather_data_area(udev, cmd, false);
 		free_data_area(udev, cmd);
 	} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
 		free_data_area(udev, cmd);
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 2faed7f..acbd26b 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -52,7 +52,7 @@
 obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o
 obj-$(CONFIG_INTEL_PCH_THERMAL)	+= intel_pch_thermal.o
 obj-$(CONFIG_ST_THERMAL)	+= st/
-obj-$(CONFIG_QCOM_TSENS)	+= qcom/
+obj-$(CONFIG_ARCH_QCOM)		+= qcom/
 obj-$(CONFIG_TEGRA_SOCTHERM)	+= tegra/
 obj-$(CONFIG_HISI_THERMAL)     += hisi_thermal.o
 obj-$(CONFIG_MTK_THERMAL)	+= mtk_thermal.o
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index a6245d5..37125c0 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -548,11 +548,29 @@
 	if (cpufreq_device->cpufreq_floor_state == state)
 		return 0;
 
-	floor_freq = cpufreq_device->freq_table[state];
 	cpufreq_device->cpufreq_floor_state = state;
-	cpufreq_device->floor_freq = floor_freq;
 
-	cpufreq_update_policy(cpu);
+	/*
+	 * Check if the device has a platform mitigation function that
+	 * can handle the CPU freq mitigation, if not, notify cpufreq
+	 * framework.
+	 */
+	if (cpufreq_device->plat_ops &&
+		cpufreq_device->plat_ops->floor_limit) {
+		/*
+		 * Last level is core isolation so use the frequency
+		 * of previous state.
+		 */
+		if (state == cpufreq_device->max_level)
+			state--;
+		floor_freq = cpufreq_device->freq_table[state];
+		cpufreq_device->floor_freq = floor_freq;
+		cpufreq_device->plat_ops->floor_limit(cpu, floor_freq);
+	} else {
+		floor_freq = cpufreq_device->freq_table[state];
+		cpufreq_device->floor_freq = floor_freq;
+		cpufreq_update_policy(cpu);
+	}
 
 	return 0;
 }
diff --git a/drivers/thermal/qcom/bcl_peripheral.c b/drivers/thermal/qcom/bcl_peripheral.c
index 55ff770..75e553f 100644
--- a/drivers/thermal/qcom/bcl_peripheral.c
+++ b/drivers/thermal/qcom/bcl_peripheral.c
@@ -259,7 +259,7 @@
 		 */
 		for (vbat_idx = 2; vbat_idx < BCL_STD_VBAT_NR;
 			vbat_idx++) {
-			if (vbat_uv > vbat_low[vbat_idx])
+			if (vbat_uv >= vbat_low[vbat_idx])
 				continue;
 			break;
 		}
@@ -274,7 +274,7 @@
 		 */
 		for (vbat_idx = 1; vbat_idx < (BCL_STD_VBAT_NR - 1);
 			vbat_idx++) {
-			if (vbat_uv > vbat_low[vbat_idx])
+			if (vbat_uv >= vbat_low[vbat_idx])
 				continue;
 			break;
 		}
diff --git a/drivers/thermal/qcom/msm_lmh_dcvs.c b/drivers/thermal/qcom/msm_lmh_dcvs.c
index 7b6952f..74f5ce0 100644
--- a/drivers/thermal/qcom/msm_lmh_dcvs.c
+++ b/drivers/thermal/qcom/msm_lmh_dcvs.c
@@ -396,8 +396,6 @@
 			continue;
 		lmh_node = of_parse_phandle(cpu_node, "qcom,lmh-dcvs", 0);
 		if (lmh_node == dn) {
-			affinity = MPIDR_AFFINITY_LEVEL(
-					cpu_logical_map(cpu), 1);
 			/*set the cpumask*/
 			cpumask_set_cpu(cpu, &(mask));
 		}
@@ -409,7 +407,7 @@
 	 * We return error if none of the CPUs have
 	 * reference to our LMH node
 	 */
-	if (affinity == -1)
+	if (cpumask_empty(&mask))
 		return -EINVAL;
 
 	ret = limits_dcvs_get_freq_limits(cpumask_first(&mask), &max_freq,
@@ -426,6 +424,9 @@
 		return -ENOMEM;
 
 	cpumask_copy(&hw->core_map, &mask);
+	ret = of_property_read_u32(dn, "qcom,affinity", &affinity);
+	if (ret)
+		return -ENODEV;
 	switch (affinity) {
 	case 0:
 		hw->affinity = LIMITS_CLUSTER_0;
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index b3cb5c0..df9be34 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -27,6 +27,8 @@
 #include <linux/serial_core.h>
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
 
 /* UART specific GENI registers */
 #define SE_UART_LOOPBACK_CFG		(0x22C)
@@ -95,6 +97,10 @@
 #define GENI_UART_NR_PORTS	(15)
 #define DEF_FIFO_DEPTH_WORDS	(16)
 #define DEF_FIFO_WIDTH_BITS	(32)
+#define UART_CORE2X_VOTE	(10000)
+#define DEFAULT_SE_CLK		(19200000)
+#define DEFAULT_BUS_WIDTH	(4)
+
 
 struct msm_geni_serial_port {
 	struct uart_port uport;
@@ -1257,6 +1263,25 @@
 	}
 
 	uport->dev = &pdev->dev;
+
+	if (!(of_property_read_u32(pdev->dev.of_node, "qcom,bus-mas",
+					&dev_port->serial_rsc.bus_mas))) {
+		dev_port->serial_rsc.bus_bw =
+				msm_bus_scale_register(
+					dev_port->serial_rsc.bus_mas,
+					MSM_BUS_SLAVE_EBI_CH0,
+					(char *)dev_name(&pdev->dev),
+					false);
+		if (IS_ERR_OR_NULL(dev_port->serial_rsc.bus_bw)) {
+			ret = PTR_ERR(dev_port->serial_rsc.bus_bw);
+			goto exit_geni_serial_probe;
+		}
+		dev_port->serial_rsc.ab = UART_CORE2X_VOTE;
+		dev_port->serial_rsc.ib = DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH;
+	} else {
+		dev_info(&pdev->dev, "No bus master specified");
+	}
+
 	dev_port->serial_rsc.se_clk = devm_clk_get(&pdev->dev, "se-clk");
 	if (IS_ERR(dev_port->serial_rsc.se_clk)) {
 		ret = PTR_ERR(dev_port->serial_rsc.se_clk);
@@ -1364,6 +1389,7 @@
 			(struct uart_driver *)port->uport.private_data;
 
 	uart_remove_one_port(drv, &port->uport);
+	msm_bus_scale_unregister(port->serial_rsc.bus_bw);
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index c6aa884..aaa0fc2 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -587,11 +587,17 @@
 	struct mtp_dev *dev = fp->private_data;
 	struct usb_composite_dev *cdev = dev->cdev;
 	struct usb_request *req;
-	ssize_t r = count, xfer, len;
+	ssize_t r = count;
+	unsigned xfer;
 	int ret = 0;
+	size_t len;
 
 	DBG(cdev, "mtp_read(%zu) state:%d\n", count, dev->state);
 
+	len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count);
+	if (len > MTP_BULK_BUFFER_SIZE)
+		return -EINVAL;
+
 	/* we will block until we're online */
 	DBG(cdev, "mtp_read: waiting for online state\n");
 	ret = wait_event_interruptible(dev->read_wq,
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 37a37c4..6f2e729 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -10,6 +10,7 @@
 #include <linux/efi.h>
 #include <linux/errno.h>
 #include <linux/fb.h>
+#include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/screen_info.h>
 #include <video/vga.h>
@@ -118,6 +119,8 @@
 	return false;
 }
 
+static bool pci_dev_disabled;	/* FB base matches BAR of a disabled device */
+
 static int efifb_probe(struct platform_device *dev)
 {
 	struct fb_info *info;
@@ -127,7 +130,7 @@
 	unsigned int size_total;
 	char *option = NULL;
 
-	if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+	if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
 		return -ENODEV;
 
 	if (fb_get_options("efifb", &option))
@@ -327,3 +330,64 @@
 };
 
 builtin_platform_driver(efifb_driver);
+
+#if defined(CONFIG_PCI) && !defined(CONFIG_X86)
+
+static bool pci_bar_found;	/* did we find a BAR matching the efifb base? */
+
+static void claim_efifb_bar(struct pci_dev *dev, int idx)
+{
+	u16 word;
+
+	pci_bar_found = true;
+
+	pci_read_config_word(dev, PCI_COMMAND, &word);
+	if (!(word & PCI_COMMAND_MEMORY)) {
+		pci_dev_disabled = true;
+		dev_err(&dev->dev,
+			"BAR %d: assigned to efifb but device is disabled!\n",
+			idx);
+		return;
+	}
+
+	if (pci_claim_resource(dev, idx)) {
+		pci_dev_disabled = true;
+		dev_err(&dev->dev,
+			"BAR %d: failed to claim resource for efifb!\n", idx);
+		return;
+	}
+
+	dev_info(&dev->dev, "BAR %d: assigned to efifb\n", idx);
+}
+
+static void efifb_fixup_resources(struct pci_dev *dev)
+{
+	u64 base = screen_info.lfb_base;
+	u64 size = screen_info.lfb_size;
+	int i;
+
+	if (pci_bar_found || screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+		return;
+
+	if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+		base |= (u64)screen_info.ext_lfb_base << 32;
+
+	if (!base)
+		return;
+
+	for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
+		struct resource *res = &dev->resource[i];
+
+		if (!(res->flags & IORESOURCE_MEM))
+			continue;
+
+		if (res->start <= base && res->end >= base + size - 1) {
+			claim_efifb_bar(dev, i);
+			break;
+		}
+	}
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY,
+			       16, efifb_fixup_resources);
+
+#endif
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 0567d51..ea2f19f 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -644,7 +644,6 @@
 		break;
 
 	case XenbusStateInitWait:
-InitWait:
 		xenbus_switch_state(dev, XenbusStateConnected);
 		break;
 
@@ -655,7 +654,8 @@
 		 * get Connected twice here.
 		 */
 		if (dev->state != XenbusStateConnected)
-			goto InitWait; /* no InitWait seen yet, fudge it */
+			/* no InitWait seen yet, fudge it */
+			xenbus_switch_state(dev, XenbusStateConnected);
 
 		if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
 				 "request-update", "%d", &val) < 0)
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 1cd0e2e..3925758 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2597,7 +2597,7 @@
 		wdata->credits = credits;
 
 		if (!wdata->cfile->invalidHandle ||
-		    !cifs_reopen_file(wdata->cfile, false))
+		    !(rc = cifs_reopen_file(wdata->cfile, false)))
 			rc = server->ops->async_writev(wdata,
 					cifs_uncached_writedata_release);
 		if (rc) {
@@ -3002,7 +3002,7 @@
 		rdata->credits = credits;
 
 		if (!rdata->cfile->invalidHandle ||
-		    !cifs_reopen_file(rdata->cfile, true))
+		    !(rc = cifs_reopen_file(rdata->cfile, true)))
 			rc = server->ops->async_readv(rdata);
 error:
 		if (rc) {
@@ -3577,7 +3577,7 @@
 		}
 
 		if (!rdata->cfile->invalidHandle ||
-		    !cifs_reopen_file(rdata->cfile, true))
+		    !(rc = cifs_reopen_file(rdata->cfile, true)))
 			rc = server->ops->async_readv(rdata);
 		if (rc) {
 			add_credits_and_wake_if(server, rdata->credits, 0);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index bdd3292..7080dac 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1987,6 +1987,9 @@
 	struct cifs_tcon *tcon, *tcon2;
 	struct list_head tmp_list;
 	int tcon_exist = false;
+	int rc;
+	int resched = false;
+
 
 	/* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
 	mutex_lock(&server->reconnect_mutex);
@@ -2014,13 +2017,18 @@
 	spin_unlock(&cifs_tcp_ses_lock);
 
 	list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
-		if (!smb2_reconnect(SMB2_INTERNAL_CMD, tcon))
+		rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon);
+		if (!rc)
 			cifs_reopen_persistent_handles(tcon);
+		else
+			resched = true;
 		list_del_init(&tcon->rlist);
 		cifs_put_tcon(tcon);
 	}
 
 	cifs_dbg(FYI, "Reconnecting tcons finished\n");
+	if (resched)
+		queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
 	mutex_unlock(&server->reconnect_mutex);
 
 	/* now we can safely release srv struct */
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index a826864..3cb7fa2 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -72,10 +72,9 @@
 			csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
 					   csum_size);
 			offset += csum_size;
-			csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
-					   EXT4_INODE_SIZE(inode->i_sb) -
-					   offset);
 		}
+		csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
+				   EXT4_INODE_SIZE(inode->i_sb) - offset);
 	}
 
 	return csum;
diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
index f419dd9..fe2cbeb 100644
--- a/fs/orangefs/devorangefs-req.c
+++ b/fs/orangefs/devorangefs-req.c
@@ -208,14 +208,19 @@
 				continue;
 			/*
 			 * Skip ops whose filesystem we don't know about unless
-			 * it is being mounted.
+			 * it is being mounted or unmounted.  It is possible for
+			 * a filesystem we don't know about to be unmounted if
+			 * it fails to mount in the kernel after userspace has
+			 * been sent the mount request.
 			 */
 			/* XXX: is there a better way to detect this? */
 			} else if (ret == -1 &&
 				   !(op->upcall.type ==
 					ORANGEFS_VFS_OP_FS_MOUNT ||
 				     op->upcall.type ==
-					ORANGEFS_VFS_OP_GETATTR)) {
+					ORANGEFS_VFS_OP_GETATTR ||
+				     op->upcall.type ==
+					ORANGEFS_VFS_OP_FS_UMOUNT)) {
 				gossip_debug(GOSSIP_DEV_DEBUG,
 				    "orangefs: skipping op tag %llu %s\n",
 				    llu(op->tag), get_opname_string(op));
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index 3bf803d..45dd8f2 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -249,6 +249,7 @@
 	char devname[ORANGEFS_MAX_SERVER_ADDR_LEN];
 	struct super_block *sb;
 	int mount_pending;
+	int no_list;
 	struct list_head list;
 };
 
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
index cd261c8..629d8c9 100644
--- a/fs/orangefs/super.c
+++ b/fs/orangefs/super.c
@@ -493,7 +493,7 @@
 
 	if (ret) {
 		d = ERR_PTR(ret);
-		goto free_op;
+		goto free_sb_and_op;
 	}
 
 	/*
@@ -519,6 +519,9 @@
 	spin_unlock(&orangefs_superblocks_lock);
 	op_release(new_op);
 
+	/* Must be removed from the list now. */
+	ORANGEFS_SB(sb)->no_list = 0;
+
 	if (orangefs_userspace_version >= 20906) {
 		new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES);
 		if (!new_op)
@@ -533,6 +536,10 @@
 
 	return dget(sb->s_root);
 
+free_sb_and_op:
+	/* Will call orangefs_kill_sb with sb not in list. */
+	ORANGEFS_SB(sb)->no_list = 1;
+	deactivate_locked_super(sb);
 free_op:
 	gossip_err("orangefs_mount: mount request failed with %d\n", ret);
 	if (ret == -EINVAL) {
@@ -558,12 +565,14 @@
 	 */
 	 orangefs_unmount_sb(sb);
 
-	/* remove the sb from our list of orangefs specific sb's */
-
-	spin_lock(&orangefs_superblocks_lock);
-	__list_del_entry(&ORANGEFS_SB(sb)->list);	/* not list_del_init */
-	ORANGEFS_SB(sb)->list.prev = NULL;
-	spin_unlock(&orangefs_superblocks_lock);
+	if (!ORANGEFS_SB(sb)->no_list) {
+		/* remove the sb from our list of orangefs specific sb's */
+		spin_lock(&orangefs_superblocks_lock);
+		/* not list_del_init */
+		__list_del_entry(&ORANGEFS_SB(sb)->list);
+		ORANGEFS_SB(sb)->list.prev = NULL;
+		spin_unlock(&orangefs_superblocks_lock);
+	}
 
 	/*
 	 * make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 65d28f9..f998332 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -962,7 +962,14 @@
 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
 		unsigned long addr, pmd_t *pmdp)
 {
-	pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
+	pmd_t pmd = *pmdp;
+
+	/* See comment in change_huge_pmd() */
+	pmdp_invalidate(vma, addr, pmdp);
+	if (pmd_dirty(*pmdp))
+		pmd = pmd_mkdirty(pmd);
+	if (pmd_young(*pmdp))
+		pmd = pmd_mkyoung(pmd);
 
 	pmd = pmd_wrprotect(pmd);
 	pmd = pmd_clear_soft_dirty(pmd);
diff --git a/fs/sdcardfs/derived_perm.c b/fs/sdcardfs/derived_perm.c
index 14747a8..2964527 100644
--- a/fs/sdcardfs/derived_perm.c
+++ b/fs/sdcardfs/derived_perm.c
@@ -222,7 +222,7 @@
 		break;
 	case PERM_ANDROID_PACKAGE_CACHE:
 		if (info->d_uid != 0)
-			gid = multiuser_get_cache_gid(info->d_uid);
+			gid = multiuser_get_ext_cache_gid(info->d_uid);
 		else
 			gid = multiuser_get_uid(info->userid, uid);
 		break;
@@ -252,7 +252,7 @@
 				goto retry_deleg;
 		}
 		if (error)
-			pr_err("sdcardfs: Failed to touch up lower fs gid/uid.\n");
+			pr_debug("sdcardfs: Failed to touch up lower fs gid/uid for %s\n", name);
 	}
 	sdcardfs_put_lower_path(dentry, &path);
 }
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index f9c0282..19154b7 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -91,7 +91,9 @@
 	struct sdcardfs_inode_info *info;
 	struct inode_data data;
 	struct inode *inode; /* the new inode to return */
-	int err;
+
+	if (!igrab(lower_inode))
+		return ERR_PTR(-ESTALE);
 
 	data.id = id;
 	data.lower_inode = lower_inode;
@@ -106,22 +108,19 @@
 			     sdcardfs_inode_set, /* inode init function */
 			     &data); /* data passed to test+set fxns */
 	if (!inode) {
-		err = -EACCES;
 		iput(lower_inode);
-		return ERR_PTR(err);
+		return ERR_PTR(-ENOMEM);
 	}
-	/* if found a cached inode, then just return it */
-	if (!(inode->i_state & I_NEW))
+	/* if found a cached inode, then just return it (after iput) */
+	if (!(inode->i_state & I_NEW)) {
+		iput(lower_inode);
 		return inode;
+	}
 
 	/* initialize new inode */
 	info = SDCARDFS_I(inode);
 
 	inode->i_ino = lower_inode->i_ino;
-	if (!igrab(lower_inode)) {
-		err = -ESTALE;
-		return ERR_PTR(err);
-	}
 	sdcardfs_set_lower_inode(inode, lower_inode);
 
 	inode->i_version++;
diff --git a/fs/sdcardfs/multiuser.h b/fs/sdcardfs/multiuser.h
index 2e89b58..d0c925c 100644
--- a/fs/sdcardfs/multiuser.h
+++ b/fs/sdcardfs/multiuser.h
@@ -23,6 +23,8 @@
 #define AID_APP_END          19999 /* last app user */
 #define AID_CACHE_GID_START  20000 /* start of gids for apps to mark cached data */
 #define AID_EXT_GID_START    30000 /* start of gids for apps to mark external data */
+#define AID_EXT_CACHE_GID_START 40000 /* start of gids for apps to mark external cached data */
+#define AID_EXT_CACHE_GID_END 49999   /* end of gids for apps to mark external cached data */
 #define AID_SHARED_GID_START 50000 /* start of gids for apps in each user to share */
 
 typedef uid_t userid_t;
@@ -33,9 +35,9 @@
 	return (user_id * AID_USER_OFFSET) + (app_id % AID_USER_OFFSET);
 }
 
-static inline gid_t multiuser_get_cache_gid(uid_t uid)
+static inline gid_t multiuser_get_ext_cache_gid(uid_t uid)
 {
-	return uid - AID_APP_START + AID_CACHE_GID_START;
+	return uid - AID_APP_START + AID_EXT_CACHE_GID_START;
 }
 
 static inline gid_t multiuser_get_ext_gid(uid_t uid)
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 1d4f365..f6d9af3e 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -166,6 +166,16 @@
 	return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
 }
 
+static inline void ahash_request_complete(struct ahash_request *req, int err)
+{
+	req->base.complete(&req->base, err);
+}
+
+static inline u32 ahash_request_flags(struct ahash_request *req)
+{
+	return req->base.flags;
+}
+
 static inline struct crypto_ahash *crypto_spawn_ahash(
 	struct crypto_ahash_spawn *spawn)
 {
diff --git a/include/dt-bindings/clock/qcom,aop-qmp.h b/include/dt-bindings/clock/qcom,aop-qmp.h
new file mode 100644
index 0000000..b88dc36
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,aop-qmp.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_AOP_QMP_H
+#define _DT_BINDINGS_CLK_QCOM_AOP_QMP_H
+
+#define QDSS_CLK_LEVEL_OFF		0
+#define QDSS_CLK_LEVEL_DYNAMIC		1
+#define QDSS_CLK_LEVEL_TURBO		2
+#define QDSS_CLK_LEVEL_NOMINAL		3
+#define QDSS_CLK_LEVEL_SVS_L1		4
+#define QDSS_CLK_LEVEL_SVS		5
+#define QDSS_CLK_LEVEL_LOW_SVS		6
+#define QDSS_CLK_LEVEL_MIN_SVS		7
+
+/* clocks id */
+#define QDSS_CLK			0
+
+#endif
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index af84de6..0353461 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -579,6 +579,24 @@
  */
 int subsys_cgroup_allow_attach(struct cgroup_taskset *tset);
 
+static inline void cgroup_init_kthreadd(void)
+{
+	/*
+	 * kthreadd is inherited by all kthreads, keep it in the root so
+	 * that the new kthreads are guaranteed to stay in the root until
+	 * initialization is finished.
+	 */
+	current->no_cgroup_migration = 1;
+}
+
+static inline void cgroup_kthread_ready(void)
+{
+	/*
+	 * This kthread finished initialization.  The creator should have
+	 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
+	 */
+	current->no_cgroup_migration = 0;
+}
 
 #else /* !CONFIG_CGROUPS */
 
@@ -600,6 +618,8 @@
 
 static inline int cgroup_init_early(void) { return 0; }
 static inline int cgroup_init(void) { return 0; }
+static inline void cgroup_init_kthreadd(void) {}
+static inline void cgroup_kthread_ready(void) {}
 
 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
 					       struct cgroup *ancestor)
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 8fd5fba..b1f2d00 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -36,7 +36,11 @@
 #define CLK_IS_CRITICAL		BIT(11) /* do not gate, ever */
 /* parents need enable during gate/ungate, set rate and re-parent */
 #define CLK_OPS_PARENT_ENABLE	BIT(12)
-				/* unused */
+#define CLK_ENABLE_HAND_OFF	BIT(13) /* enable clock when registered. */
+					/*
+					 * hand-off enable_count & prepare_count
+					 * to first consumer that enables clk
+					 */
 #define CLK_IS_MEASURE          BIT(14) /* measure clock */
 
 struct clk;
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 373dbd5..ec9c128 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -855,7 +855,9 @@
 		struct vm_area_struct *vma, void *cpu_addr,
 		dma_addr_t dma_addr, size_t size)
 {
-	return -ENODEV;
+	unsigned long attrs = DMA_ATTR_NON_CONSISTENT;
+
+	return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
 }
 
 #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index a0f161e..cb4387d 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -39,6 +39,7 @@
 	struct clk *m_ahb_clk;
 	struct clk *s_ahb_clk;
 	struct msm_bus_client_handle *bus_bw;
+	unsigned int bus_mas;
 	unsigned long ab;
 	unsigned long ib;
 	struct pinctrl *geni_pinctrl;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 867de7d..52524a8 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1167,6 +1167,8 @@
 	struct capacity_state *cap_states; /* ptr to capacity state array */
 };
 
+extern bool sched_is_energy_aware(void);
+
 unsigned long capacity_curr_of(int cpu);
 
 struct sched_group;
@@ -1751,6 +1753,10 @@
 #ifdef CONFIG_COMPAT_BRK
 	unsigned brk_randomized:1;
 #endif
+#ifdef CONFIG_CGROUPS
+	/* disallow userland-initiated cgroup migration */
+	unsigned no_cgroup_migration:1;
+#endif
 
 	unsigned long atomic_flags; /* Flags needing atomic access. */
 
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 6e22b54..c146ebc 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -39,7 +39,10 @@
 	};
 	union {
 		unsigned long nr_segs;
-		int idx;
+		struct {
+			int idx;
+			int start_idx;
+		};
 	};
 };
 
@@ -81,6 +84,7 @@
 size_t iov_iter_copy_from_user_atomic(struct page *page,
 		struct iov_iter *i, unsigned long offset, size_t bytes);
 void iov_iter_advance(struct iov_iter *i, size_t bytes);
+void iov_iter_revert(struct iov_iter *i, size_t bytes);
 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
 size_t iov_iter_single_seg_count(const struct iov_iter *i);
 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 6233e8f..0383c60 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -705,6 +705,7 @@
 	u64			unpacked_lun;
 #define SE_LUN_LINK_MAGIC			0xffff7771
 	u32			lun_link_magic;
+	bool			lun_shutdown;
 	bool			lun_access_ro;
 	u32			lun_index;
 
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index eadd942..9d1ed58 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2941,11 +2941,12 @@
 		tsk = tsk->group_leader;
 
 	/*
-	 * Workqueue threads may acquire PF_NO_SETAFFINITY and become
-	 * trapped in a cpuset, or RT worker may be born in a cgroup
-	 * with no rt_runtime allocated.  Just say no.
+	 * kthreads may acquire PF_NO_SETAFFINITY during initialization.
+	 * If userland migrates such a kthread to a non-root cgroup, it can
+	 * become trapped in a cpuset, or RT kthread may be born in a
+	 * cgroup with no rt_runtime allocated.  Just say no.
 	 */
-	if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
+	if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
 		ret = -EINVAL;
 		goto out_unlock_rcu;
 	}
diff --git a/kernel/kthread.c b/kernel/kthread.c
index be2cc1f..c2c911a 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -18,6 +18,7 @@
 #include <linux/freezer.h>
 #include <linux/ptrace.h>
 #include <linux/uaccess.h>
+#include <linux/cgroup.h>
 #include <trace/events/sched.h>
 
 static DEFINE_SPINLOCK(kthread_create_lock);
@@ -205,6 +206,7 @@
 	ret = -EINTR;
 
 	if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
+		cgroup_kthread_ready();
 		__kthread_parkme(&self);
 		ret = threadfn(data);
 	}
@@ -530,6 +532,7 @@
 	set_mems_allowed(node_states[N_MEMORY]);
 
 	current->flags |= PF_NOFREEZE;
+	cgroup_init_kthreadd();
 
 	for (;;) {
 		set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/sched/energy.c b/kernel/sched/energy.c
index b0656b7..05dd2cb 100644
--- a/kernel/sched/energy.c
+++ b/kernel/sched/energy.c
@@ -56,6 +56,9 @@
 	int sd_level, i, nstates, cpu;
 	const __be32 *val;
 
+	if (!sched_is_energy_aware())
+		return;
+
 	for_each_possible_cpu(cpu) {
 		cn = of_get_cpu_node(cpu, NULL);
 		if (!cn) {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2a8643c..6fb615e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5343,6 +5343,15 @@
 	return sched_feat(ENERGY_AWARE);
 }
 
+/*
+ * Externally visible function. Let's keep the one above
+ * so that the check is inlined/optimized in the sched paths.
+ */
+bool sched_is_energy_aware(void)
+{
+	return energy_aware();
+}
+
 struct energy_env {
 	struct sched_group	*sg_top;
 	struct sched_group	*sg_cap;
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index a088a55..b1c7852 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -49,6 +49,7 @@
 #include <linux/sched/deadline.h>
 #include <linux/timer.h>
 #include <linux/freezer.h>
+#include <linux/delay.h>
 
 #include <asm/uaccess.h>
 
@@ -1638,6 +1639,12 @@
 				raw_spin_unlock(&old_base->lock);
 				raw_spin_unlock(&new_base->lock);
 				cpu_relax();
+				/*
+				 * cpu_relax may just be a barrier. Grant the
+				 * run_hrtimer_list code some time to obtain
+				 * the spinlock.
+				 */
+				udelay(1);
 				raw_spin_lock(&new_base->lock);
 				raw_spin_lock_nested(&old_base->lock,
 							SINGLE_DEPTH_NESTING);
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 470d966..5463c3b 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1856,7 +1856,8 @@
 		spin_lock_irqsave(&new_base->lock, flags);
 		spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
 
-		BUG_ON(old_base->running_timer);
+		if (!cpu_online(cpu))
+			BUG_ON(old_base->running_timer);
 
 		for (i = 0; i < WHEEL_SIZE; i++)
 			migrate_timer_list(new_base, old_base->vectors + i,
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index da87b3c..221eb59 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3736,23 +3736,24 @@
 	ftrace_probe_registered = 1;
 }
 
-static void __disable_ftrace_function_probe(void)
+static bool __disable_ftrace_function_probe(void)
 {
 	int i;
 
 	if (!ftrace_probe_registered)
-		return;
+		return false;
 
 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
 		struct hlist_head *hhd = &ftrace_func_hash[i];
 		if (hhd->first)
-			return;
+			return false;
 	}
 
 	/* no more funcs left */
 	ftrace_shutdown(&trace_probe_ops, 0);
 
 	ftrace_probe_registered = 0;
+	return true;
 }
 
 
@@ -3882,6 +3883,7 @@
 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
 				  void *data, int flags)
 {
+	struct ftrace_ops_hash old_hash_ops;
 	struct ftrace_func_entry *rec_entry;
 	struct ftrace_func_probe *entry;
 	struct ftrace_func_probe *p;
@@ -3893,6 +3895,7 @@
 	struct hlist_node *tmp;
 	char str[KSYM_SYMBOL_LEN];
 	int i, ret;
+	bool disabled;
 
 	if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
 		func_g.search = NULL;
@@ -3911,6 +3914,10 @@
 
 	mutex_lock(&trace_probe_ops.func_hash->regex_lock);
 
+	old_hash_ops.filter_hash = old_hash;
+	/* Probes only have filters */
+	old_hash_ops.notrace_hash = NULL;
+
 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
 	if (!hash)
 		/* Hmm, should report this somehow */
@@ -3948,12 +3955,17 @@
 		}
 	}
 	mutex_lock(&ftrace_lock);
-	__disable_ftrace_function_probe();
+	disabled = __disable_ftrace_function_probe();
 	/*
 	 * Remove after the disable is called. Otherwise, if the last
 	 * probe is removed, a null hash means *all enabled*.
 	 */
 	ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
+
+	/* still need to update the function call sites */
+	if (ftrace_enabled && !disabled)
+		ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
+				       &old_hash_ops);
 	synchronize_sched();
 	if (!ret)
 		free_ftrace_hash_rcu(old_hash);
@@ -5389,6 +5401,15 @@
 	trace_free_pid_list(pid_list);
 }
 
+void ftrace_clear_pids(struct trace_array *tr)
+{
+	mutex_lock(&ftrace_lock);
+
+	clear_ftrace_pids(tr);
+
+	mutex_unlock(&ftrace_lock);
+}
+
 static void ftrace_pid_reset(struct trace_array *tr)
 {
 	mutex_lock(&ftrace_lock);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b0c47c2..63c6d28 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -7245,6 +7245,7 @@
 
 	tracing_set_nop(tr);
 	event_trace_del_tracer(tr);
+	ftrace_clear_pids(tr);
 	ftrace_destroy_function_files(tr);
 	tracefs_remove_recursive(tr->dir);
 	free_trace_buffers(tr);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 38dbb36..e5d06c9 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -871,6 +871,7 @@
 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
 void ftrace_init_tracefs_toplevel(struct trace_array *tr,
 				  struct dentry *d_tracer);
+void ftrace_clear_pids(struct trace_array *tr);
 #else
 static inline int ftrace_trace_task(struct trace_array *tr)
 {
@@ -889,6 +890,7 @@
 static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
+static inline void ftrace_clear_pids(struct trace_array *tr) { }
 /* ftace_func_t type is not defined, use macro instead of static inline */
 #define ftrace_init_array_ops(tr, func) do { } while (0)
 #endif /* CONFIG_FUNCTION_TRACER */
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index efb0b4d..a75ea63 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -734,6 +734,68 @@
 }
 EXPORT_SYMBOL(iov_iter_advance);
 
+void iov_iter_revert(struct iov_iter *i, size_t unroll)
+{
+	if (!unroll)
+		return;
+	i->count += unroll;
+	if (unlikely(i->type & ITER_PIPE)) {
+		struct pipe_inode_info *pipe = i->pipe;
+		int idx = i->idx;
+		size_t off = i->iov_offset;
+		while (1) {
+			size_t n = off - pipe->bufs[idx].offset;
+			if (unroll < n) {
+				off -= (n - unroll);
+				break;
+			}
+			unroll -= n;
+			if (!unroll && idx == i->start_idx) {
+				off = 0;
+				break;
+			}
+			if (!idx--)
+				idx = pipe->buffers - 1;
+			off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
+		}
+		i->iov_offset = off;
+		i->idx = idx;
+		pipe_truncate(i);
+		return;
+	}
+	if (unroll <= i->iov_offset) {
+		i->iov_offset -= unroll;
+		return;
+	}
+	unroll -= i->iov_offset;
+	if (i->type & ITER_BVEC) {
+		const struct bio_vec *bvec = i->bvec;
+		while (1) {
+			size_t n = (--bvec)->bv_len;
+			i->nr_segs++;
+			if (unroll <= n) {
+				i->bvec = bvec;
+				i->iov_offset = n - unroll;
+				return;
+			}
+			unroll -= n;
+		}
+	} else { /* same logics for iovec and kvec */
+		const struct iovec *iov = i->iov;
+		while (1) {
+			size_t n = (--iov)->iov_len;
+			i->nr_segs++;
+			if (unroll <= n) {
+				i->iov = iov;
+				i->iov_offset = n - unroll;
+				return;
+			}
+			unroll -= n;
+		}
+	}
+}
+EXPORT_SYMBOL(iov_iter_revert);
+
 /*
  * Return the count of just the current iov_iter segment.
  */
@@ -787,6 +849,7 @@
 	i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
 	i->iov_offset = 0;
 	i->count = count;
+	i->start_idx = i->idx;
 }
 EXPORT_SYMBOL(iov_iter_pipe);
 
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 917555c..d5b2b75 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1380,8 +1380,7 @@
 		deactivate_page(page);
 
 	if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
-		orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
-			tlb->fullmm);
+		pmdp_invalidate(vma, addr, pmd);
 		orig_pmd = pmd_mkold(orig_pmd);
 		orig_pmd = pmd_mkclean(orig_pmd);
 
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e866ddcc..fdc790a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2152,6 +2152,8 @@
 	struct work_struct work;
 };
 
+static struct workqueue_struct *memcg_kmem_cache_create_wq;
+
 static void memcg_kmem_cache_create_func(struct work_struct *w)
 {
 	struct memcg_kmem_cache_create_work *cw =
@@ -2183,7 +2185,7 @@
 	cw->cachep = cachep;
 	INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
 
-	schedule_work(&cw->work);
+	queue_work(memcg_kmem_cache_create_wq, &cw->work);
 }
 
 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
@@ -5796,6 +5798,17 @@
 {
 	int cpu, node;
 
+#ifndef CONFIG_SLOB
+	/*
+	 * Kmem cache creation is mostly done with the slab_mutex held,
+	 * so use a special workqueue to avoid stalling all worker
+	 * threads in case lots of cgroups are created simultaneously.
+	 */
+	memcg_kmem_cache_create_wq =
+		alloc_ordered_workqueue("memcg_kmem_cache_create", 0);
+	BUG_ON(!memcg_kmem_cache_create_wq);
+#endif
+
 	hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
 
 	for_each_possible_cpu(cpu)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index b0bc023..1689bb5 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -280,7 +280,7 @@
 struct zspage {
 	struct {
 		unsigned int fullness:FULLNESS_BITS;
-		unsigned int class:CLASS_BITS;
+		unsigned int class:CLASS_BITS + 1;
 		unsigned int isolated:ISOLATED_BITS;
 		unsigned int magic:MAGIC_VAL_BITS;
 	};
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b7de71f..963732e 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -378,7 +378,7 @@
 			   struct iov_iter *to, int len)
 {
 	int start = skb_headlen(skb);
-	int i, copy = start - offset;
+	int i, copy = start - offset, start_off = offset, n;
 	struct sk_buff *frag_iter;
 
 	trace_skb_copy_datagram_iovec(skb, len);
@@ -387,11 +387,12 @@
 	if (copy > 0) {
 		if (copy > len)
 			copy = len;
-		if (copy_to_iter(skb->data + offset, copy, to) != copy)
+		n = copy_to_iter(skb->data + offset, copy, to);
+		offset += n;
+		if (n != copy)
 			goto short_copy;
 		if ((len -= copy) == 0)
 			return 0;
-		offset += copy;
 	}
 
 	/* Copy paged appendix. Hmm... why does this look so complicated? */
@@ -405,13 +406,14 @@
 		if ((copy = end - offset) > 0) {
 			if (copy > len)
 				copy = len;
-			if (copy_page_to_iter(skb_frag_page(frag),
+			n = copy_page_to_iter(skb_frag_page(frag),
 					      frag->page_offset + offset -
-					      start, copy, to) != copy)
+					      start, copy, to);
+			offset += n;
+			if (n != copy)
 				goto short_copy;
 			if (!(len -= copy))
 				return 0;
-			offset += copy;
 		}
 		start = end;
 	}
@@ -443,6 +445,7 @@
 	 */
 
 fault:
+	iov_iter_revert(to, offset - start_off);
 	return -EFAULT;
 
 short_copy:
@@ -593,7 +596,7 @@
 				      __wsum *csump)
 {
 	int start = skb_headlen(skb);
-	int i, copy = start - offset;
+	int i, copy = start - offset, start_off = offset;
 	struct sk_buff *frag_iter;
 	int pos = 0;
 	int n;
@@ -603,11 +606,11 @@
 		if (copy > len)
 			copy = len;
 		n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
+		offset += n;
 		if (n != copy)
 			goto fault;
 		if ((len -= copy) == 0)
 			return 0;
-		offset += copy;
 		pos = copy;
 	}
 
@@ -629,12 +632,12 @@
 						  offset - start, copy,
 						  &csum2, to);
 			kunmap(page);
+			offset += n;
 			if (n != copy)
 				goto fault;
 			*csump = csum_block_add(*csump, csum2, pos);
 			if (!(len -= copy))
 				return 0;
-			offset += copy;
 			pos += copy;
 		}
 		start = end;
@@ -667,6 +670,7 @@
 		return 0;
 
 fault:
+	iov_iter_revert(to, offset - start_off);
 	return -EFAULT;
 }
 
@@ -751,6 +755,7 @@
 	}
 	return 0;
 csum_error:
+	iov_iter_revert(&msg->msg_iter, chunk);
 	return -EINVAL;
 fault:
 	return -EFAULT;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 477600f..73527d8 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2165,6 +2165,8 @@
 				continue;
 			if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
 				continue;
+			if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
+				continue;
 			dst_hold(&rt->dst);
 			read_unlock_bh(&table->tb6_lock);
 
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6cbe5bd..6734420 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4735,6 +4735,12 @@
 	if (!asoc)
 		return -EINVAL;
 
+	/* If there is a thread waiting on more sndbuf space for
+	 * sending on this asoc, it cannot be peeled.
+	 */
+	if (waitqueue_active(&asoc->wait))
+		return -EBUSY;
+
 	/* An association cannot be branched off from an already peeled-off
 	 * socket, nor is this supported for tcp style sockets.
 	 */
@@ -7427,8 +7433,6 @@
 		 */
 		release_sock(sk);
 		current_timeo = schedule_timeout(current_timeo);
-		if (sk != asoc->base.sk)
-			goto do_error;
 		lock_sock(sk);
 
 		*timeo_p = current_timeo;
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index fd5d1e0..e18fe9d 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -33,11 +33,9 @@
 	select SND_SOC_INTEL_SST_MATCH if ACPI
 	depends on (X86 || COMPILE_TEST)
 
-# firmware stuff depends DW_DMAC_CORE; since there is no depends-on from
-# the reverse selection, each machine driver needs to select
-# SND_SOC_INTEL_SST_FIRMWARE carefully depending on DW_DMAC_CORE
 config SND_SOC_INTEL_SST_FIRMWARE
 	tristate
+	select DW_DMAC_CORE
 
 config SND_SOC_INTEL_SST_ACPI
 	tristate
@@ -47,16 +45,18 @@
 
 config SND_SOC_INTEL_HASWELL
 	tristate
+	select SND_SOC_INTEL_SST
 	select SND_SOC_INTEL_SST_FIRMWARE
 
 config SND_SOC_INTEL_BAYTRAIL
 	tristate
+	select SND_SOC_INTEL_SST
+	select SND_SOC_INTEL_SST_FIRMWARE
 
 config SND_SOC_INTEL_HASWELL_MACH
 	tristate "ASoC Audio DSP support for Intel Haswell Lynxpoint"
 	depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
-	depends on DW_DMAC_CORE
-	select SND_SOC_INTEL_SST
+	depends on DMADEVICES
 	select SND_SOC_INTEL_HASWELL
 	select SND_SOC_RT5640
 	help
@@ -99,9 +99,8 @@
 config SND_SOC_INTEL_BYT_RT5640_MACH
 	tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
 	depends on X86_INTEL_LPSS && I2C
-	depends on DW_DMAC_CORE && (SND_SST_IPC_ACPI = n)
-	select SND_SOC_INTEL_SST
-	select SND_SOC_INTEL_SST_FIRMWARE
+	depends on DMADEVICES
+	depends on SND_SST_IPC_ACPI = n
 	select SND_SOC_INTEL_BAYTRAIL
 	select SND_SOC_RT5640
 	help
@@ -112,9 +111,8 @@
 config SND_SOC_INTEL_BYT_MAX98090_MACH
 	tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
 	depends on X86_INTEL_LPSS && I2C
-	depends on DW_DMAC_CORE && (SND_SST_IPC_ACPI = n)
-	select SND_SOC_INTEL_SST
-	select SND_SOC_INTEL_SST_FIRMWARE
+	depends on DMADEVICES
+	depends on SND_SST_IPC_ACPI = n
 	select SND_SOC_INTEL_BAYTRAIL
 	select SND_SOC_MAX98090
 	help
@@ -123,9 +121,8 @@
 
 config SND_SOC_INTEL_BDW_RT5677_MACH
 	tristate "ASoC Audio driver for Intel Broadwell with RT5677 codec"
-	depends on X86_INTEL_LPSS && GPIOLIB && I2C && DW_DMAC
-	depends on DW_DMAC_CORE=y
-	select SND_SOC_INTEL_SST
+	depends on X86_INTEL_LPSS && GPIOLIB && I2C
+	depends on DMADEVICES
 	select SND_SOC_INTEL_HASWELL
 	select SND_SOC_RT5677
 	help
@@ -134,10 +131,8 @@
 
 config SND_SOC_INTEL_BROADWELL_MACH
 	tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint"
-	depends on X86_INTEL_LPSS && I2C && DW_DMAC && \
-		   I2C_DESIGNWARE_PLATFORM
-	depends on DW_DMAC_CORE
-	select SND_SOC_INTEL_SST
+	depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
+	depends on DMADEVICES
 	select SND_SOC_INTEL_HASWELL
 	select SND_SOC_RT286
 	help
diff --git a/sound/soc/msm/sdm845.c b/sound/soc/msm/sdm845.c
index 6987949..96f3f85 100644
--- a/sound/soc/msm/sdm845.c
+++ b/sound/soc/msm/sdm845.c
@@ -508,6 +508,8 @@
 	.key_code[7] = 0,
 	.linein_th = 5000,
 	.moisture_en = true,
+	.anc_micbias = MIC_BIAS_2,
+	.enable_anc_mic_detect = false,
 };
 
 static struct snd_soc_dapm_route wcd_audio_paths[] = {
@@ -4655,6 +4657,42 @@
 	},
 };
 
+static struct snd_soc_dai_link msm_common_misc_fe_dai_links[] = {
+	{
+		.name = MSM_DAILINK_NAME(ASM Loopback),
+		.stream_name = "MultiMedia6",
+		.cpu_dai_name = "MultiMedia6",
+		.platform_name = "msm-pcm-loopback",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.ignore_suspend = 1,
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_pmdown_time = 1,
+		.id = MSM_FRONTEND_DAI_MULTIMEDIA6,
+	},
+	{
+		.name = "USB Audio Hostless",
+		.stream_name = "USB Audio Hostless",
+		.cpu_dai_name = "USBAUDIO_HOSTLESS",
+		.platform_name = "msm-pcm-hostless",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+	},
+};
+
 static struct snd_soc_dai_link msm_common_be_dai_links[] = {
 	/* Backend AFE DAI Links */
 	{
@@ -5373,6 +5411,7 @@
 static struct snd_soc_dai_link msm_tavil_snd_card_dai_links[
 			 ARRAY_SIZE(msm_common_dai_links) +
 			 ARRAY_SIZE(msm_tavil_fe_dai_links) +
+			 ARRAY_SIZE(msm_common_misc_fe_dai_links) +
 			 ARRAY_SIZE(msm_common_be_dai_links) +
 			 ARRAY_SIZE(msm_tavil_be_dai_links) +
 			 ARRAY_SIZE(msm_wcn_be_dai_links) +
@@ -5662,7 +5701,7 @@
 {
 	struct snd_soc_card *card = NULL;
 	struct snd_soc_dai_link *dailink;
-	int len_1, len_2, len_3;
+	int len_1, len_2, len_3, len_4;
 	int total_links;
 	const struct of_device_id *match;
 
@@ -5677,8 +5716,9 @@
 		card = &snd_soc_card_tavil_msm;
 		len_1 = ARRAY_SIZE(msm_common_dai_links);
 		len_2 = len_1 + ARRAY_SIZE(msm_tavil_fe_dai_links);
-		len_3 = len_2 + ARRAY_SIZE(msm_common_be_dai_links);
-		total_links = len_3 + ARRAY_SIZE(msm_tavil_be_dai_links);
+		len_3 = len_2 + ARRAY_SIZE(msm_common_misc_fe_dai_links);
+		len_4 = len_3 + ARRAY_SIZE(msm_common_be_dai_links);
+		total_links = len_4 + ARRAY_SIZE(msm_tavil_be_dai_links);
 		memcpy(msm_tavil_snd_card_dai_links,
 		       msm_common_dai_links,
 		       sizeof(msm_common_dai_links));
@@ -5686,9 +5726,12 @@
 		       msm_tavil_fe_dai_links,
 		       sizeof(msm_tavil_fe_dai_links));
 		memcpy(msm_tavil_snd_card_dai_links + len_2,
+		       msm_common_misc_fe_dai_links,
+		       sizeof(msm_common_misc_fe_dai_links));
+		memcpy(msm_tavil_snd_card_dai_links + len_3,
 		       msm_common_be_dai_links,
 		       sizeof(msm_common_be_dai_links));
-		memcpy(msm_tavil_snd_card_dai_links + len_3,
+		memcpy(msm_tavil_snd_card_dai_links + len_4,
 		       msm_tavil_be_dai_links,
 		       sizeof(msm_tavil_be_dai_links));
 
@@ -6186,14 +6229,19 @@
 			pdev->dev.of_node->full_name);
 		dev_dbg(&pdev->dev, "Jack type properties set to default");
 	} else {
-		if (!strcmp(mbhc_audio_jack_type, "4-pole-jack"))
+		if (!strcmp(mbhc_audio_jack_type, "4-pole-jack")) {
+			wcd_mbhc_cfg.enable_anc_mic_detect = false;
 			dev_dbg(&pdev->dev, "This hardware has 4 pole jack");
-		else if (!strcmp(mbhc_audio_jack_type, "5-pole-jack"))
+		} else if (!strcmp(mbhc_audio_jack_type, "5-pole-jack")) {
+			wcd_mbhc_cfg.enable_anc_mic_detect = true;
 			dev_dbg(&pdev->dev, "This hardware has 5 pole jack");
-		else if (!strcmp(mbhc_audio_jack_type, "6-pole-jack"))
+		} else if (!strcmp(mbhc_audio_jack_type, "6-pole-jack")) {
+			wcd_mbhc_cfg.enable_anc_mic_detect = true;
 			dev_dbg(&pdev->dev, "This hardware has 6 pole jack");
-		else
+		} else {
+			wcd_mbhc_cfg.enable_anc_mic_detect = false;
 			dev_dbg(&pdev->dev, "Unknown value, set to default");
+		}
 	}
 	/*
 	 * Parse US-Euro gpio info from DT. Report no error if us-euro